3 changed files with 2 additions and 1499 deletions
@ -1,111 +0,0 @@ |
|||||
""" A bitcoind proxy that allows instrumentation and canned responses |
|
||||
""" |
|
||||
from flask import Flask, request |
|
||||
from bitcoin.rpc import JSONRPCError |
|
||||
from bitcoin.rpc import RawProxy as BitcoinProxy |
|
||||
from cheroot.wsgi import Server |
|
||||
from cheroot.wsgi import PathInfoDispatcher |
|
||||
|
|
||||
import decimal |
|
||||
import flask |
|
||||
import json |
|
||||
import logging |
|
||||
import threading |
|
||||
|
|
||||
|
|
||||
class DecimalEncoder(json.JSONEncoder): |
|
||||
"""By default json.dumps does not handle Decimals correctly, so we override it's handling |
|
||||
""" |
|
||||
def default(self, o): |
|
||||
if isinstance(o, decimal.Decimal): |
|
||||
return "{:.8f}".format(float(o)) |
|
||||
return super(DecimalEncoder, self).default(o) |
|
||||
|
|
||||
|
|
||||
class BitcoinRpcProxy(object): |
|
||||
def __init__(self, bitcoind, rpcport=0): |
|
||||
self.app = Flask("BitcoindProxy") |
|
||||
self.app.add_url_rule("/", "API entrypoint", self.proxy, methods=['POST']) |
|
||||
self.rpcport = rpcport |
|
||||
self.mocks = {} |
|
||||
self.mock_counts = {} |
|
||||
self.bitcoind = bitcoind |
|
||||
self.request_count = 0 |
|
||||
|
|
||||
def _handle_request(self, r): |
|
||||
brpc = BitcoinProxy(btc_conf_file=self.bitcoind.conf_file) |
|
||||
method = r['method'] |
|
||||
|
|
||||
# If we have set a mock for this method reply with that instead of |
|
||||
# forwarding the request. |
|
||||
if method in self.mocks and type(self.mocks[method]) == dict: |
|
||||
ret = {} |
|
||||
ret['id'] = r['id'] |
|
||||
ret['error'] = None |
|
||||
ret['result'] = self.mocks[method] |
|
||||
self.mock_counts[method] += 1 |
|
||||
return ret |
|
||||
elif method in self.mocks and callable(self.mocks[method]): |
|
||||
self.mock_counts[method] += 1 |
|
||||
return self.mocks[method](r) |
|
||||
|
|
||||
try: |
|
||||
reply = { |
|
||||
"result": brpc._call(r['method'], *r['params']), |
|
||||
"error": None, |
|
||||
"id": r['id'] |
|
||||
} |
|
||||
except JSONRPCError as e: |
|
||||
reply = { |
|
||||
"error": e.error, |
|
||||
"code": -32603, |
|
||||
"id": r['id'] |
|
||||
} |
|
||||
self.request_count += 1 |
|
||||
return reply |
|
||||
|
|
||||
def proxy(self): |
|
||||
r = json.loads(request.data.decode('ASCII')) |
|
||||
|
|
||||
if isinstance(r, list): |
|
||||
reply = [self._handle_request(subreq) for subreq in r] |
|
||||
else: |
|
||||
reply = self._handle_request(r) |
|
||||
|
|
||||
response = flask.Response(json.dumps(reply, cls=DecimalEncoder)) |
|
||||
response.headers['Content-Type'] = 'application/json' |
|
||||
return response |
|
||||
|
|
||||
def start(self): |
|
||||
d = PathInfoDispatcher({'/': self.app}) |
|
||||
self.server = Server(('0.0.0.0', self.rpcport), d) |
|
||||
self.proxy_thread = threading.Thread(target=self.server.start) |
|
||||
self.proxy_thread.daemon = True |
|
||||
self.proxy_thread.start() |
|
||||
|
|
||||
# Now that bitcoind is running on the real rpcport, let's tell all |
|
||||
# future callers to talk to the proxyport. We use the bind_addr as a |
|
||||
# signal that the port is bound and accepting connections. |
|
||||
while self.server.bind_addr[1] == 0: |
|
||||
pass |
|
||||
self.rpcport = self.server.bind_addr[1] |
|
||||
logging.debug("BitcoinRpcProxy proxying incoming port {} to {}".format(self.rpcport, self.bitcoind.rpcport)) |
|
||||
|
|
||||
def stop(self): |
|
||||
self.server.stop() |
|
||||
self.proxy_thread.join() |
|
||||
logging.debug("BitcoinRpcProxy shut down after processing {} requests".format(self.request_count)) |
|
||||
|
|
||||
def mock_rpc(self, method, response=None): |
|
||||
"""Mock the response to a future RPC call of @method |
|
||||
|
|
||||
The response can either be a dict with the full JSON-RPC response, or a |
|
||||
function that returns such a response. If the response is None the mock |
|
||||
is removed and future calls will be passed through to bitcoind again. |
|
||||
|
|
||||
""" |
|
||||
if response is not None: |
|
||||
self.mocks[method] = response |
|
||||
self.mock_counts[method] = 0 |
|
||||
elif method in self.mocks: |
|
||||
del self.mocks[method] |
|
@ -1,322 +1,2 @@ |
|||||
from concurrent import futures |
|
||||
from db import SqliteDbProvider, PostgresDbProvider |
|
||||
from utils import NodeFactory, BitcoinD, ElementsD, env |
|
||||
from utils import DEVELOPER, TEST_NETWORK # noqa: F401,F403 |
from utils import DEVELOPER, TEST_NETWORK # noqa: F401,F403 |
||||
|
from pyln.testing.fixtures import directory, test_base_dir, test_name, chainparams, node_factory, bitcoind, teardown_checks, db_provider, executor # noqa: F401,F403 |
||||
import logging |
|
||||
import os |
|
||||
import pytest |
|
||||
import re |
|
||||
import shutil |
|
||||
import tempfile |
|
||||
|
|
||||
|
|
||||
# A dict in which we count how often a particular test has run so far. Used to |
|
||||
# give each attempt its own numbered directory, and avoid clashes. |
|
||||
__attempts = {} |
|
||||
|
|
||||
|
|
||||
@pytest.fixture(scope="session") |
|
||||
def test_base_dir(): |
|
||||
d = env("TEST_DIR", "/tmp") |
|
||||
|
|
||||
directory = tempfile.mkdtemp(prefix='ltests-', dir=d) |
|
||||
print("Running tests in {}".format(directory)) |
|
||||
|
|
||||
yield directory |
|
||||
|
|
||||
if os.listdir(directory) == []: |
|
||||
shutil.rmtree(directory) |
|
||||
|
|
||||
|
|
||||
@pytest.fixture |
|
||||
def directory(request, test_base_dir, test_name): |
|
||||
"""Return a per-test specific directory. |
|
||||
|
|
||||
This makes a unique test-directory even if a test is rerun multiple times. |
|
||||
|
|
||||
""" |
|
||||
global __attempts |
|
||||
# Auto set value if it isn't in the dict yet |
|
||||
__attempts[test_name] = __attempts.get(test_name, 0) + 1 |
|
||||
directory = os.path.join(test_base_dir, "{}_{}".format(test_name, __attempts[test_name])) |
|
||||
request.node.has_errors = False |
|
||||
|
|
||||
yield directory |
|
||||
|
|
||||
# This uses the status set in conftest.pytest_runtest_makereport to |
|
||||
# determine whether we succeeded or failed. Outcome can be None if the |
|
||||
# failure occurs during the setup phase, hence the use to getattr instead |
|
||||
# of accessing it directly. |
|
||||
outcome = getattr(request.node, 'rep_call', None).outcome |
|
||||
failed = not outcome or request.node.has_errors or outcome != 'passed' |
|
||||
|
|
||||
if not failed: |
|
||||
shutil.rmtree(directory) |
|
||||
else: |
|
||||
logging.debug("Test execution failed, leaving the test directory {} intact.".format(directory)) |
|
||||
|
|
||||
|
|
||||
@pytest.fixture |
|
||||
def test_name(request): |
|
||||
yield request.function.__name__ |
|
||||
|
|
||||
|
|
||||
network_daemons = { |
|
||||
'regtest': BitcoinD, |
|
||||
'liquid-regtest': ElementsD, |
|
||||
} |
|
||||
|
|
||||
|
|
||||
@pytest.fixture |
|
||||
def bitcoind(directory, teardown_checks): |
|
||||
chaind = network_daemons[env('TEST_NETWORK', 'regtest')] |
|
||||
bitcoind = chaind(bitcoin_dir=directory) |
|
||||
|
|
||||
try: |
|
||||
bitcoind.start() |
|
||||
except Exception: |
|
||||
bitcoind.stop() |
|
||||
raise |
|
||||
|
|
||||
info = bitcoind.rpc.getnetworkinfo() |
|
||||
|
|
||||
if info['version'] < 160000: |
|
||||
bitcoind.rpc.stop() |
|
||||
raise ValueError("bitcoind is too old. At least version 16000 (v0.16.0)" |
|
||||
" is needed, current version is {}".format(info['version'])) |
|
||||
|
|
||||
info = bitcoind.rpc.getblockchaininfo() |
|
||||
# Make sure we have some spendable funds |
|
||||
if info['blocks'] < 101: |
|
||||
bitcoind.generate_block(101 - info['blocks']) |
|
||||
elif bitcoind.rpc.getwalletinfo()['balance'] < 1: |
|
||||
logging.debug("Insufficient balance, generating 1 block") |
|
||||
bitcoind.generate_block(1) |
|
||||
|
|
||||
yield bitcoind |
|
||||
|
|
||||
try: |
|
||||
bitcoind.stop() |
|
||||
except Exception: |
|
||||
bitcoind.proc.kill() |
|
||||
bitcoind.proc.wait() |
|
||||
|
|
||||
|
|
||||
class TeardownErrors(object): |
|
||||
def __init__(self): |
|
||||
self.errors = [] |
|
||||
self.node_errors = [] |
|
||||
|
|
||||
def add_error(self, msg): |
|
||||
self.errors.append(msg) |
|
||||
|
|
||||
def add_node_error(self, node, msg): |
|
||||
self.node_errors.append((node.daemon.prefix, msg)) |
|
||||
|
|
||||
def __str__(self): |
|
||||
node_errors = [" - {}: {}".format(*e) for e in self.node_errors] |
|
||||
errors = [" - {}".format(e) for e in self.errors] |
|
||||
|
|
||||
errors = ["\nNode errors:"] + node_errors + ["Global errors:"] + errors |
|
||||
return "\n".join(errors) |
|
||||
|
|
||||
def has_errors(self): |
|
||||
return len(self.errors) > 0 or len(self.node_errors) > 0 |
|
||||
|
|
||||
|
|
||||
@pytest.fixture |
|
||||
def teardown_checks(request): |
|
||||
"""A simple fixture to collect errors during teardown. |
|
||||
|
|
||||
We need to collect the errors and raise them as the very last step in the |
|
||||
fixture tree, otherwise some fixtures may not be cleaned up |
|
||||
correctly. Require this fixture in all other fixtures that need to either |
|
||||
cleanup before reporting an error or want to add an error that is to be |
|
||||
reported. |
|
||||
|
|
||||
""" |
|
||||
errors = TeardownErrors() |
|
||||
yield errors |
|
||||
|
|
||||
if errors.has_errors(): |
|
||||
# Format a nice list of everything that went wrong and raise an exception |
|
||||
request.node.has_errors = True |
|
||||
raise ValueError(str(errors)) |
|
||||
|
|
||||
|
|
||||
@pytest.fixture |
|
||||
def node_factory(request, directory, test_name, bitcoind, executor, db_provider, teardown_checks): |
|
||||
nf = NodeFactory( |
|
||||
test_name, |
|
||||
bitcoind, |
|
||||
executor, |
|
||||
directory=directory, |
|
||||
db_provider=db_provider, |
|
||||
) |
|
||||
|
|
||||
yield nf |
|
||||
ok, errs = nf.killall([not n.may_fail for n in nf.nodes]) |
|
||||
|
|
||||
for e in errs: |
|
||||
teardown_checks.add_error(e) |
|
||||
|
|
||||
def map_node_error(nodes, f, msg): |
|
||||
for n in nodes: |
|
||||
if n and f(n): |
|
||||
teardown_checks.add_node_error(n, msg) |
|
||||
|
|
||||
map_node_error(nf.nodes, printValgrindErrors, "reported valgrind errors") |
|
||||
map_node_error(nf.nodes, printCrashLog, "had crash.log files") |
|
||||
map_node_error(nf.nodes, lambda n: not n.allow_broken_log and n.daemon.is_in_log(r'\*\*BROKEN\*\*'), "had BROKEN messages") |
|
||||
map_node_error(nf.nodes, checkReconnect, "had unexpected reconnections") |
|
||||
map_node_error(nf.nodes, checkBadGossip, "had bad gossip messages") |
|
||||
map_node_error(nf.nodes, lambda n: n.daemon.is_in_log('Bad reestablish'), "had bad reestablish") |
|
||||
map_node_error(nf.nodes, lambda n: n.daemon.is_in_log('bad hsm request'), "had bad hsm requests") |
|
||||
map_node_error(nf.nodes, lambda n: n.daemon.is_in_log(r'Accessing a null column'), "Accessing a null column") |
|
||||
map_node_error(nf.nodes, checkMemleak, "had memleak messages") |
|
||||
|
|
||||
if not ok: |
|
||||
teardown_checks.add_error("At least one lightning exited with unexpected non-zero return code") |
|
||||
|
|
||||
|
|
||||
def getValgrindErrors(node): |
|
||||
for error_file in os.listdir(node.daemon.lightning_dir): |
|
||||
if not re.fullmatch(r"valgrind-errors.\d+", error_file): |
|
||||
continue |
|
||||
with open(os.path.join(node.daemon.lightning_dir, error_file), 'r') as f: |
|
||||
errors = f.read().strip() |
|
||||
if errors: |
|
||||
return errors, error_file |
|
||||
return None, None |
|
||||
|
|
||||
|
|
||||
def printValgrindErrors(node): |
|
||||
errors, fname = getValgrindErrors(node) |
|
||||
if errors: |
|
||||
print("-" * 31, "Valgrind errors", "-" * 32) |
|
||||
print("Valgrind error file:", fname) |
|
||||
print(errors) |
|
||||
print("-" * 80) |
|
||||
return 1 if errors else 0 |
|
||||
|
|
||||
|
|
||||
def getCrashLog(node): |
|
||||
if node.may_fail: |
|
||||
return None, None |
|
||||
try: |
|
||||
crashlog = os.path.join(node.daemon.lightning_dir, 'crash.log') |
|
||||
with open(crashlog, 'r') as f: |
|
||||
return f.readlines(), crashlog |
|
||||
except Exception: |
|
||||
return None, None |
|
||||
|
|
||||
|
|
||||
def printCrashLog(node): |
|
||||
errors, fname = getCrashLog(node) |
|
||||
if errors: |
|
||||
print("-" * 10, "{} (last 50 lines)".format(fname), "-" * 10) |
|
||||
print("".join(errors[-50:])) |
|
||||
print("-" * 80) |
|
||||
return 1 if errors else 0 |
|
||||
|
|
||||
|
|
||||
def checkReconnect(node): |
|
||||
# Without DEVELOPER, we can't suppress reconnection. |
|
||||
if node.may_reconnect or not DEVELOPER: |
|
||||
return 0 |
|
||||
if node.daemon.is_in_log('Peer has reconnected'): |
|
||||
return 1 |
|
||||
return 0 |
|
||||
|
|
||||
|
|
||||
def checkBadGossip(node): |
|
||||
if node.allow_bad_gossip: |
|
||||
return 0 |
|
||||
# We can get bad gossip order from inside error msgs. |
|
||||
if node.daemon.is_in_log('Bad gossip order from (?!error)'): |
|
||||
# This can happen if a node sees a node_announce after a channel |
|
||||
# is deleted, however. |
|
||||
if node.daemon.is_in_log('Deleting channel'): |
|
||||
return 0 |
|
||||
return 1 |
|
||||
|
|
||||
# Other 'Bad' messages shouldn't happen. |
|
||||
if node.daemon.is_in_log(r'gossipd.*Bad (?!gossip order from error)'): |
|
||||
return 1 |
|
||||
return 0 |
|
||||
|
|
||||
|
|
||||
def checkBroken(node): |
|
||||
if node.allow_broken_log: |
|
||||
return 0 |
|
||||
# We can get bad gossip order from inside error msgs. |
|
||||
if node.daemon.is_in_log(r'\*\*BROKEN\*\*'): |
|
||||
return 1 |
|
||||
return 0 |
|
||||
|
|
||||
|
|
||||
def checkBadReestablish(node): |
|
||||
if node.daemon.is_in_log('Bad reestablish'): |
|
||||
return 1 |
|
||||
return 0 |
|
||||
|
|
||||
|
|
||||
def checkBadHSMRequest(node): |
|
||||
if node.daemon.is_in_log('bad hsm request'): |
|
||||
return 1 |
|
||||
return 0 |
|
||||
|
|
||||
|
|
||||
def checkMemleak(node): |
|
||||
if node.daemon.is_in_log('MEMLEAK:'): |
|
||||
return 1 |
|
||||
return 0 |
|
||||
|
|
||||
|
|
||||
# Mapping from TEST_DB_PROVIDER env variable to class to be used |
|
||||
providers = { |
|
||||
'sqlite3': SqliteDbProvider, |
|
||||
'postgres': PostgresDbProvider, |
|
||||
} |
|
||||
|
|
||||
|
|
||||
@pytest.fixture(scope="session") |
|
||||
def db_provider(test_base_dir): |
|
||||
provider = providers[os.getenv('TEST_DB_PROVIDER', 'sqlite3')](test_base_dir) |
|
||||
provider.start() |
|
||||
yield provider |
|
||||
provider.stop() |
|
||||
|
|
||||
|
|
||||
@pytest.fixture |
|
||||
def executor(teardown_checks): |
|
||||
ex = futures.ThreadPoolExecutor(max_workers=20) |
|
||||
yield ex |
|
||||
ex.shutdown(wait=False) |
|
||||
|
|
||||
|
|
||||
@pytest.fixture |
|
||||
def chainparams(): |
|
||||
chainparams = { |
|
||||
'regtest': { |
|
||||
"bip173_prefix": "bcrt", |
|
||||
"elements": False, |
|
||||
"name": "regtest", |
|
||||
"p2sh_prefix": '2', |
|
||||
"elements": False, |
|
||||
"example_addr": "bcrt1qeyyk6sl5pr49ycpqyckvmttus5ttj25pd0zpvg", |
|
||||
"feeoutput": False, |
|
||||
}, |
|
||||
'liquid-regtest': { |
|
||||
"bip173_prefix": "ert", |
|
||||
"elements": True, |
|
||||
"name": "liquid-regtest", |
|
||||
"p2sh_prefix": 'X', |
|
||||
"elements": True, |
|
||||
"example_addr": "ert1qq8adjz4u6enf0cjey9j8yt0y490tact9fahkwf", |
|
||||
"feeoutput": True, |
|
||||
} |
|
||||
} |
|
||||
|
|
||||
return chainparams[env('TEST_NETWORK', 'regtest')] |
|
||||
|
File diff suppressed because it is too large
Loading…
Reference in new issue