Browse Source
We'll rewrite the tests to use this infrastructure in the next commit. Changelog-Added: The new `pyln-testing` package now contains the testing infrastructure so it can be reused to test against c-lighting in external projectstravis-debug
Christian Decker
5 years ago
8 changed files with 1773 additions and 0 deletions
@ -0,0 +1,33 @@ |
|||||
|
# pyln-testing: A library to write tests against c-lightning |
||||
|
|
||||
|
This library implements a number of utilities that help building tests for |
||||
|
c-lightning nodes. In particular it provides a number of pytest fixtures that |
||||
|
allow the management of a test network of a given topology and then execute a |
||||
|
test scenarion. |
||||
|
|
||||
|
`pyln-testing` is used by c-lightning for its internal tests, and by the |
||||
|
community plugin directory to exercise the plugins. |
||||
|
|
||||
|
## Installation |
||||
|
|
||||
|
`pyln-testing` is available on `pip`: |
||||
|
|
||||
|
```bash |
||||
|
pip install pyln-testing |
||||
|
``` |
||||
|
|
||||
|
Alternatively you can also install the development version to get access to |
||||
|
currently unreleased features by checking out the c-lightning source code and |
||||
|
installing into your python3 environment: |
||||
|
|
||||
|
```bash |
||||
|
git clone https://github.com/ElementsProject/lightning.git |
||||
|
cd lightning/contrib/pyln-testing |
||||
|
python3 setup.py develop |
||||
|
``` |
||||
|
|
||||
|
This will add links to the library into your environment so changing the |
||||
|
checked out source code will also result in the environment picking up these |
||||
|
changes. Notice however that unreleased versions may change API without |
||||
|
warning, so test thoroughly with the released version. |
||||
|
|
@ -0,0 +1 @@ |
|||||
|
__version__ = "0.0.1" |
@ -0,0 +1,111 @@ |
|||||
|
""" A bitcoind proxy that allows instrumentation and canned responses |
||||
|
""" |
||||
|
from flask import Flask, request |
||||
|
from bitcoin.rpc import JSONRPCError |
||||
|
from bitcoin.rpc import RawProxy as BitcoinProxy |
||||
|
from cheroot.wsgi import Server |
||||
|
from cheroot.wsgi import PathInfoDispatcher |
||||
|
|
||||
|
import decimal |
||||
|
import flask |
||||
|
import json |
||||
|
import logging |
||||
|
import threading |
||||
|
|
||||
|
|
||||
|
class DecimalEncoder(json.JSONEncoder): |
||||
|
"""By default json.dumps does not handle Decimals correctly, so we override it's handling |
||||
|
""" |
||||
|
def default(self, o): |
||||
|
if isinstance(o, decimal.Decimal): |
||||
|
return "{:.8f}".format(float(o)) |
||||
|
return super(DecimalEncoder, self).default(o) |
||||
|
|
||||
|
|
||||
|
class BitcoinRpcProxy(object): |
||||
|
def __init__(self, bitcoind, rpcport=0): |
||||
|
self.app = Flask("BitcoindProxy") |
||||
|
self.app.add_url_rule("/", "API entrypoint", self.proxy, methods=['POST']) |
||||
|
self.rpcport = rpcport |
||||
|
self.mocks = {} |
||||
|
self.mock_counts = {} |
||||
|
self.bitcoind = bitcoind |
||||
|
self.request_count = 0 |
||||
|
|
||||
|
def _handle_request(self, r): |
||||
|
brpc = BitcoinProxy(btc_conf_file=self.bitcoind.conf_file) |
||||
|
method = r['method'] |
||||
|
|
||||
|
# If we have set a mock for this method reply with that instead of |
||||
|
# forwarding the request. |
||||
|
if method in self.mocks and type(self.mocks[method]) == dict: |
||||
|
ret = {} |
||||
|
ret['id'] = r['id'] |
||||
|
ret['error'] = None |
||||
|
ret['result'] = self.mocks[method] |
||||
|
self.mock_counts[method] += 1 |
||||
|
return ret |
||||
|
elif method in self.mocks and callable(self.mocks[method]): |
||||
|
self.mock_counts[method] += 1 |
||||
|
return self.mocks[method](r) |
||||
|
|
||||
|
try: |
||||
|
reply = { |
||||
|
"result": brpc._call(r['method'], *r['params']), |
||||
|
"error": None, |
||||
|
"id": r['id'] |
||||
|
} |
||||
|
except JSONRPCError as e: |
||||
|
reply = { |
||||
|
"error": e.error, |
||||
|
"code": -32603, |
||||
|
"id": r['id'] |
||||
|
} |
||||
|
self.request_count += 1 |
||||
|
return reply |
||||
|
|
||||
|
def proxy(self): |
||||
|
r = json.loads(request.data.decode('ASCII')) |
||||
|
|
||||
|
if isinstance(r, list): |
||||
|
reply = [self._handle_request(subreq) for subreq in r] |
||||
|
else: |
||||
|
reply = self._handle_request(r) |
||||
|
|
||||
|
response = flask.Response(json.dumps(reply, cls=DecimalEncoder)) |
||||
|
response.headers['Content-Type'] = 'application/json' |
||||
|
return response |
||||
|
|
||||
|
def start(self): |
||||
|
d = PathInfoDispatcher({'/': self.app}) |
||||
|
self.server = Server(('0.0.0.0', self.rpcport), d) |
||||
|
self.proxy_thread = threading.Thread(target=self.server.start) |
||||
|
self.proxy_thread.daemon = True |
||||
|
self.proxy_thread.start() |
||||
|
|
||||
|
# Now that bitcoind is running on the real rpcport, let's tell all |
||||
|
# future callers to talk to the proxyport. We use the bind_addr as a |
||||
|
# signal that the port is bound and accepting connections. |
||||
|
while self.server.bind_addr[1] == 0: |
||||
|
pass |
||||
|
self.rpcport = self.server.bind_addr[1] |
||||
|
logging.debug("BitcoinRpcProxy proxying incoming port {} to {}".format(self.rpcport, self.bitcoind.rpcport)) |
||||
|
|
||||
|
def stop(self): |
||||
|
self.server.stop() |
||||
|
self.proxy_thread.join() |
||||
|
logging.debug("BitcoinRpcProxy shut down after processing {} requests".format(self.request_count)) |
||||
|
|
||||
|
def mock_rpc(self, method, response=None): |
||||
|
"""Mock the response to a future RPC call of @method |
||||
|
|
||||
|
The response can either be a dict with the full JSON-RPC response, or a |
||||
|
function that returns such a response. If the response is None the mock |
||||
|
is removed and future calls will be passed through to bitcoind again. |
||||
|
|
||||
|
""" |
||||
|
if response is not None: |
||||
|
self.mocks[method] = response |
||||
|
self.mock_counts[method] = 0 |
||||
|
elif method in self.mocks: |
||||
|
del self.mocks[method] |
@ -0,0 +1,197 @@ |
|||||
|
from ephemeral_port_reserve import reserve |
||||
|
from glob import glob |
||||
|
|
||||
|
import logging |
||||
|
import os |
||||
|
import psycopg2 |
||||
|
import random |
||||
|
import re |
||||
|
import shutil |
||||
|
import signal |
||||
|
import sqlite3 |
||||
|
import string |
||||
|
import subprocess |
||||
|
import time |
||||
|
|
||||
|
|
||||
|
class Sqlite3Db(object): |
||||
|
def __init__(self, path): |
||||
|
self.path = path |
||||
|
|
||||
|
def get_dsn(self): |
||||
|
"""SQLite3 doesn't provide a DSN, resulting in no CLI-option. |
||||
|
""" |
||||
|
return None |
||||
|
|
||||
|
def query(self, query): |
||||
|
orig = os.path.join(self.path) |
||||
|
copy = self.path + ".copy" |
||||
|
shutil.copyfile(orig, copy) |
||||
|
db = sqlite3.connect(copy) |
||||
|
|
||||
|
db.row_factory = sqlite3.Row |
||||
|
c = db.cursor() |
||||
|
c.execute(query) |
||||
|
rows = c.fetchall() |
||||
|
|
||||
|
result = [] |
||||
|
for row in rows: |
||||
|
result.append(dict(zip(row.keys(), row))) |
||||
|
|
||||
|
db.commit() |
||||
|
c.close() |
||||
|
db.close() |
||||
|
return result |
||||
|
|
||||
|
def execute(self, query): |
||||
|
db = sqlite3.connect(self.path) |
||||
|
c = db.cursor() |
||||
|
c.execute(query) |
||||
|
db.commit() |
||||
|
c.close() |
||||
|
db.close() |
||||
|
|
||||
|
|
||||
|
class PostgresDb(object): |
||||
|
def __init__(self, dbname, port): |
||||
|
self.dbname = dbname |
||||
|
self.port = port |
||||
|
|
||||
|
self.conn = psycopg2.connect("dbname={dbname} user=postgres host=localhost port={port}".format( |
||||
|
dbname=dbname, port=port |
||||
|
)) |
||||
|
cur = self.conn.cursor() |
||||
|
cur.execute('SELECT 1') |
||||
|
cur.close() |
||||
|
|
||||
|
def get_dsn(self): |
||||
|
return "postgres://postgres:password@localhost:{port}/{dbname}".format( |
||||
|
port=self.port, dbname=self.dbname |
||||
|
) |
||||
|
|
||||
|
def query(self, query): |
||||
|
cur = self.conn.cursor() |
||||
|
cur.execute(query) |
||||
|
|
||||
|
# Collect the results into a list of dicts. |
||||
|
res = [] |
||||
|
for r in cur: |
||||
|
t = {} |
||||
|
# Zip the column definition with the value to get its name. |
||||
|
for c, v in zip(cur.description, r): |
||||
|
t[c.name] = v |
||||
|
res.append(t) |
||||
|
cur.close() |
||||
|
return res |
||||
|
|
||||
|
def execute(self, query): |
||||
|
with self.conn, self.conn.cursor() as cur: |
||||
|
cur.execute(query) |
||||
|
|
||||
|
|
||||
|
class SqliteDbProvider(object): |
||||
|
def __init__(self, directory): |
||||
|
self.directory = directory |
||||
|
|
||||
|
def start(self): |
||||
|
pass |
||||
|
|
||||
|
def get_db(self, node_directory, testname, node_id): |
||||
|
path = os.path.join( |
||||
|
node_directory, |
||||
|
'lightningd.sqlite3' |
||||
|
) |
||||
|
return Sqlite3Db(path) |
||||
|
|
||||
|
def stop(self): |
||||
|
pass |
||||
|
|
||||
|
|
||||
|
class PostgresDbProvider(object): |
||||
|
def __init__(self, directory): |
||||
|
self.directory = directory |
||||
|
self.port = None |
||||
|
self.proc = None |
||||
|
print("Starting PostgresDbProvider") |
||||
|
|
||||
|
def locate_path(self): |
||||
|
prefix = '/usr/lib/postgresql/*' |
||||
|
matches = glob(prefix) |
||||
|
|
||||
|
candidates = {} |
||||
|
for m in matches: |
||||
|
g = re.search(r'([0-9]+[\.0-9]*)', m) |
||||
|
if not g: |
||||
|
continue |
||||
|
candidates[float(g.group(1))] = m |
||||
|
|
||||
|
if len(candidates) == 0: |
||||
|
raise ValueError("Could not find `postgres` and `initdb` binaries in {}. Is postgresql installed?".format(prefix)) |
||||
|
|
||||
|
# Now iterate in reverse order through matches |
||||
|
for k, v in sorted(candidates.items())[::-1]: |
||||
|
initdb = os.path.join(v, 'bin', 'initdb') |
||||
|
postgres = os.path.join(v, 'bin', 'postgres') |
||||
|
if os.path.isfile(initdb) and os.path.isfile(postgres): |
||||
|
logging.info("Found `postgres` and `initdb` in {}".format(os.path.join(v, 'bin'))) |
||||
|
return initdb, postgres |
||||
|
|
||||
|
raise ValueError("Could not find `postgres` and `initdb` in any of the possible paths: {}".format(candidates.values())) |
||||
|
|
||||
|
def start(self): |
||||
|
passfile = os.path.join(self.directory, "pgpass.txt") |
||||
|
self.pgdir = os.path.join(self.directory, 'pgsql') |
||||
|
# Need to write a tiny file containing the password so `initdb` can pick it up |
||||
|
with open(passfile, 'w') as f: |
||||
|
f.write('cltest\n') |
||||
|
|
||||
|
initdb, postgres = self.locate_path() |
||||
|
subprocess.check_call([ |
||||
|
initdb, |
||||
|
'--pwfile={}'.format(passfile), |
||||
|
'--pgdata={}'.format(self.pgdir), |
||||
|
'--auth=trust', |
||||
|
'--username=postgres', |
||||
|
]) |
||||
|
self.port = reserve() |
||||
|
self.proc = subprocess.Popen([ |
||||
|
postgres, |
||||
|
'-k', '/tmp/', # So we don't use /var/lib/... |
||||
|
'-D', self.pgdir, |
||||
|
'-p', str(self.port), |
||||
|
'-F', |
||||
|
'-i', |
||||
|
]) |
||||
|
# Hacky but seems to work ok (might want to make the postgres proc a TailableProc as well if too flaky). |
||||
|
time.sleep(1) |
||||
|
self.conn = psycopg2.connect("dbname=template1 user=postgres host=localhost port={}".format(self.port)) |
||||
|
|
||||
|
# Required for CREATE DATABASE to work |
||||
|
self.conn.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT) |
||||
|
|
||||
|
def get_db(self, node_directory, testname, node_id): |
||||
|
# Random suffix to avoid collisions on repeated tests |
||||
|
nonce = ''.join(random.choice(string.ascii_lowercase + string.digits) for _ in range(8)) |
||||
|
dbname = "{}_{}_{}".format(testname, node_id, nonce) |
||||
|
|
||||
|
cur = self.conn.cursor() |
||||
|
cur.execute("CREATE DATABASE {};".format(dbname)) |
||||
|
cur.close() |
||||
|
db = PostgresDb(dbname, self.port) |
||||
|
return db |
||||
|
|
||||
|
def stop(self): |
||||
|
# Send fast shutdown signal see [1] for details: |
||||
|
# |
||||
|
# SIGINT |
||||
|
# |
||||
|
# This is the Fast Shutdown mode. The server disallows new connections |
||||
|
# and sends all existing server processes SIGTERM, which will cause |
||||
|
# them to abort their current transactions and exit promptly. It then |
||||
|
# waits for all server processes to exit and finally shuts down. If |
||||
|
# the server is in online backup mode, backup mode will be terminated, |
||||
|
# rendering the backup useless. |
||||
|
# |
||||
|
# [1] https://www.postgresql.org/docs/9.1/server-shutdown.html |
||||
|
self.proc.send_signal(signal.SIGINT) |
||||
|
self.proc.wait() |
@ -0,0 +1,321 @@ |
|||||
|
from concurrent import futures |
||||
|
from pyln.testing.db import SqliteDbProvider, PostgresDbProvider |
||||
|
from pyln.testing.utils import NodeFactory, BitcoinD, ElementsD, env, DEVELOPER |
||||
|
|
||||
|
import logging |
||||
|
import os |
||||
|
import pytest |
||||
|
import re |
||||
|
import shutil |
||||
|
import tempfile |
||||
|
|
||||
|
|
||||
|
# A dict in which we count how often a particular test has run so far. Used to |
||||
|
# give each attempt its own numbered directory, and avoid clashes. |
||||
|
__attempts = {} |
||||
|
|
||||
|
|
||||
|
@pytest.fixture(scope="session") |
||||
|
def test_base_dir(): |
||||
|
d = os.getenv("TEST_DIR", "/tmp") |
||||
|
|
||||
|
directory = tempfile.mkdtemp(prefix='ltests-', dir=d) |
||||
|
print("Running tests in {}".format(directory)) |
||||
|
|
||||
|
yield directory |
||||
|
|
||||
|
if os.listdir(directory) == []: |
||||
|
shutil.rmtree(directory) |
||||
|
|
||||
|
|
||||
|
@pytest.fixture |
||||
|
def directory(request, test_base_dir, test_name): |
||||
|
"""Return a per-test specific directory. |
||||
|
|
||||
|
This makes a unique test-directory even if a test is rerun multiple times. |
||||
|
|
||||
|
""" |
||||
|
global __attempts |
||||
|
# Auto set value if it isn't in the dict yet |
||||
|
__attempts[test_name] = __attempts.get(test_name, 0) + 1 |
||||
|
directory = os.path.join(test_base_dir, "{}_{}".format(test_name, __attempts[test_name])) |
||||
|
request.node.has_errors = False |
||||
|
|
||||
|
yield directory |
||||
|
|
||||
|
# This uses the status set in conftest.pytest_runtest_makereport to |
||||
|
# determine whether we succeeded or failed. Outcome can be None if the |
||||
|
# failure occurs during the setup phase, hence the use to getattr instead |
||||
|
# of accessing it directly. |
||||
|
outcome = getattr(request.node, 'rep_call', None).outcome |
||||
|
failed = not outcome or request.node.has_errors or outcome != 'passed' |
||||
|
|
||||
|
if not failed: |
||||
|
shutil.rmtree(directory) |
||||
|
else: |
||||
|
logging.debug("Test execution failed, leaving the test directory {} intact.".format(directory)) |
||||
|
|
||||
|
|
||||
|
@pytest.fixture |
||||
|
def test_name(request): |
||||
|
yield request.function.__name__ |
||||
|
|
||||
|
|
||||
|
network_daemons = { |
||||
|
'regtest': BitcoinD, |
||||
|
'liquid-regtest': ElementsD, |
||||
|
} |
||||
|
|
||||
|
|
||||
|
@pytest.fixture |
||||
|
def bitcoind(directory, teardown_checks): |
||||
|
chaind = network_daemons[env('TEST_NETWORK', 'regtest')] |
||||
|
bitcoind = chaind(bitcoin_dir=directory) |
||||
|
|
||||
|
try: |
||||
|
bitcoind.start() |
||||
|
except Exception: |
||||
|
bitcoind.stop() |
||||
|
raise |
||||
|
|
||||
|
info = bitcoind.rpc.getnetworkinfo() |
||||
|
|
||||
|
if info['version'] < 160000: |
||||
|
bitcoind.rpc.stop() |
||||
|
raise ValueError("bitcoind is too old. At least version 16000 (v0.16.0)" |
||||
|
" is needed, current version is {}".format(info['version'])) |
||||
|
|
||||
|
info = bitcoind.rpc.getblockchaininfo() |
||||
|
# Make sure we have some spendable funds |
||||
|
if info['blocks'] < 101: |
||||
|
bitcoind.generate_block(101 - info['blocks']) |
||||
|
elif bitcoind.rpc.getwalletinfo()['balance'] < 1: |
||||
|
logging.debug("Insufficient balance, generating 1 block") |
||||
|
bitcoind.generate_block(1) |
||||
|
|
||||
|
yield bitcoind |
||||
|
|
||||
|
try: |
||||
|
bitcoind.stop() |
||||
|
except Exception: |
||||
|
bitcoind.proc.kill() |
||||
|
bitcoind.proc.wait() |
||||
|
|
||||
|
|
||||
|
class TeardownErrors(object): |
||||
|
def __init__(self): |
||||
|
self.errors = [] |
||||
|
self.node_errors = [] |
||||
|
|
||||
|
def add_error(self, msg): |
||||
|
self.errors.append(msg) |
||||
|
|
||||
|
def add_node_error(self, node, msg): |
||||
|
self.node_errors.append((node.daemon.prefix, msg)) |
||||
|
|
||||
|
def __str__(self): |
||||
|
node_errors = [" - {}: {}".format(*e) for e in self.node_errors] |
||||
|
errors = [" - {}".format(e) for e in self.errors] |
||||
|
|
||||
|
errors = ["\nNode errors:"] + node_errors + ["Global errors:"] + errors |
||||
|
return "\n".join(errors) |
||||
|
|
||||
|
def has_errors(self): |
||||
|
return len(self.errors) > 0 or len(self.node_errors) > 0 |
||||
|
|
||||
|
|
||||
|
@pytest.fixture |
||||
|
def teardown_checks(request): |
||||
|
"""A simple fixture to collect errors during teardown. |
||||
|
|
||||
|
We need to collect the errors and raise them as the very last step in the |
||||
|
fixture tree, otherwise some fixtures may not be cleaned up |
||||
|
correctly. Require this fixture in all other fixtures that need to either |
||||
|
cleanup before reporting an error or want to add an error that is to be |
||||
|
reported. |
||||
|
|
||||
|
""" |
||||
|
errors = TeardownErrors() |
||||
|
yield errors |
||||
|
|
||||
|
if errors.has_errors(): |
||||
|
# Format a nice list of everything that went wrong and raise an exception |
||||
|
request.node.has_errors = True |
||||
|
raise ValueError(str(errors)) |
||||
|
|
||||
|
|
||||
|
@pytest.fixture |
||||
|
def node_factory(request, directory, test_name, bitcoind, executor, db_provider, teardown_checks): |
||||
|
nf = NodeFactory( |
||||
|
test_name, |
||||
|
bitcoind, |
||||
|
executor, |
||||
|
directory=directory, |
||||
|
db_provider=db_provider, |
||||
|
) |
||||
|
|
||||
|
yield nf |
||||
|
ok, errs = nf.killall([not n.may_fail for n in nf.nodes]) |
||||
|
|
||||
|
for e in errs: |
||||
|
teardown_checks.add_error(e) |
||||
|
|
||||
|
def map_node_error(nodes, f, msg): |
||||
|
for n in nodes: |
||||
|
if n and f(n): |
||||
|
teardown_checks.add_node_error(n, msg) |
||||
|
|
||||
|
map_node_error(nf.nodes, printValgrindErrors, "reported valgrind errors") |
||||
|
map_node_error(nf.nodes, printCrashLog, "had crash.log files") |
||||
|
map_node_error(nf.nodes, lambda n: not n.allow_broken_log and n.daemon.is_in_log(r'\*\*BROKEN\*\*'), "had BROKEN messages") |
||||
|
map_node_error(nf.nodes, checkReconnect, "had unexpected reconnections") |
||||
|
map_node_error(nf.nodes, checkBadGossip, "had bad gossip messages") |
||||
|
map_node_error(nf.nodes, lambda n: n.daemon.is_in_log('Bad reestablish'), "had bad reestablish") |
||||
|
map_node_error(nf.nodes, lambda n: n.daemon.is_in_log('bad hsm request'), "had bad hsm requests") |
||||
|
map_node_error(nf.nodes, lambda n: n.daemon.is_in_log(r'Accessing a null column'), "Accessing a null column") |
||||
|
map_node_error(nf.nodes, checkMemleak, "had memleak messages") |
||||
|
|
||||
|
if not ok: |
||||
|
teardown_checks.add_error("At least one lightning exited with unexpected non-zero return code") |
||||
|
|
||||
|
|
||||
|
def getValgrindErrors(node): |
||||
|
for error_file in os.listdir(node.daemon.lightning_dir): |
||||
|
if not re.fullmatch(r"valgrind-errors.\d+", error_file): |
||||
|
continue |
||||
|
with open(os.path.join(node.daemon.lightning_dir, error_file), 'r') as f: |
||||
|
errors = f.read().strip() |
||||
|
if errors: |
||||
|
return errors, error_file |
||||
|
return None, None |
||||
|
|
||||
|
|
||||
|
def printValgrindErrors(node): |
||||
|
errors, fname = getValgrindErrors(node) |
||||
|
if errors: |
||||
|
print("-" * 31, "Valgrind errors", "-" * 32) |
||||
|
print("Valgrind error file:", fname) |
||||
|
print(errors) |
||||
|
print("-" * 80) |
||||
|
return 1 if errors else 0 |
||||
|
|
||||
|
|
||||
|
def getCrashLog(node): |
||||
|
if node.may_fail: |
||||
|
return None, None |
||||
|
try: |
||||
|
crashlog = os.path.join(node.daemon.lightning_dir, 'crash.log') |
||||
|
with open(crashlog, 'r') as f: |
||||
|
return f.readlines(), crashlog |
||||
|
except Exception: |
||||
|
return None, None |
||||
|
|
||||
|
|
||||
|
def printCrashLog(node): |
||||
|
errors, fname = getCrashLog(node) |
||||
|
if errors: |
||||
|
print("-" * 10, "{} (last 50 lines)".format(fname), "-" * 10) |
||||
|
print("".join(errors[-50:])) |
||||
|
print("-" * 80) |
||||
|
return 1 if errors else 0 |
||||
|
|
||||
|
|
||||
|
def checkReconnect(node): |
||||
|
# Without DEVELOPER, we can't suppress reconnection. |
||||
|
if node.may_reconnect or not DEVELOPER: |
||||
|
return 0 |
||||
|
if node.daemon.is_in_log('Peer has reconnected'): |
||||
|
return 1 |
||||
|
return 0 |
||||
|
|
||||
|
|
||||
|
def checkBadGossip(node): |
||||
|
if node.allow_bad_gossip: |
||||
|
return 0 |
||||
|
# We can get bad gossip order from inside error msgs. |
||||
|
if node.daemon.is_in_log('Bad gossip order from (?!error)'): |
||||
|
# This can happen if a node sees a node_announce after a channel |
||||
|
# is deleted, however. |
||||
|
if node.daemon.is_in_log('Deleting channel'): |
||||
|
return 0 |
||||
|
return 1 |
||||
|
|
||||
|
# Other 'Bad' messages shouldn't happen. |
||||
|
if node.daemon.is_in_log(r'gossipd.*Bad (?!gossip order from error)'): |
||||
|
return 1 |
||||
|
return 0 |
||||
|
|
||||
|
|
||||
|
def checkBroken(node): |
||||
|
if node.allow_broken_log: |
||||
|
return 0 |
||||
|
# We can get bad gossip order from inside error msgs. |
||||
|
if node.daemon.is_in_log(r'\*\*BROKEN\*\*'): |
||||
|
return 1 |
||||
|
return 0 |
||||
|
|
||||
|
|
||||
|
def checkBadReestablish(node): |
||||
|
if node.daemon.is_in_log('Bad reestablish'): |
||||
|
return 1 |
||||
|
return 0 |
||||
|
|
||||
|
|
||||
|
def checkBadHSMRequest(node): |
||||
|
if node.daemon.is_in_log('bad hsm request'): |
||||
|
return 1 |
||||
|
return 0 |
||||
|
|
||||
|
|
||||
|
def checkMemleak(node): |
||||
|
if node.daemon.is_in_log('MEMLEAK:'): |
||||
|
return 1 |
||||
|
return 0 |
||||
|
|
||||
|
|
||||
|
# Mapping from TEST_DB_PROVIDER env variable to class to be used |
||||
|
providers = { |
||||
|
'sqlite3': SqliteDbProvider, |
||||
|
'postgres': PostgresDbProvider, |
||||
|
} |
||||
|
|
||||
|
|
||||
|
@pytest.fixture(scope="session") |
||||
|
def db_provider(test_base_dir): |
||||
|
provider = providers[os.getenv('TEST_DB_PROVIDER', 'sqlite3')](test_base_dir) |
||||
|
provider.start() |
||||
|
yield provider |
||||
|
provider.stop() |
||||
|
|
||||
|
|
||||
|
@pytest.fixture |
||||
|
def executor(teardown_checks): |
||||
|
ex = futures.ThreadPoolExecutor(max_workers=20) |
||||
|
yield ex |
||||
|
ex.shutdown(wait=False) |
||||
|
|
||||
|
|
||||
|
@pytest.fixture |
||||
|
def chainparams(): |
||||
|
chainparams = { |
||||
|
'regtest': { |
||||
|
"bip173_prefix": "bcrt", |
||||
|
"elements": False, |
||||
|
"name": "regtest", |
||||
|
"p2sh_prefix": '2', |
||||
|
"elements": False, |
||||
|
"example_addr": "bcrt1qeyyk6sl5pr49ycpqyckvmttus5ttj25pd0zpvg", |
||||
|
"feeoutput": False, |
||||
|
}, |
||||
|
'liquid-regtest': { |
||||
|
"bip173_prefix": "ert", |
||||
|
"elements": True, |
||||
|
"name": "liquid-regtest", |
||||
|
"p2sh_prefix": 'X', |
||||
|
"elements": True, |
||||
|
"example_addr": "ert1qq8adjz4u6enf0cjey9j8yt0y490tact9fahkwf", |
||||
|
"feeoutput": True, |
||||
|
} |
||||
|
} |
||||
|
|
||||
|
return chainparams[env('TEST_NETWORK', 'regtest')] |
File diff suppressed because it is too large
@ -0,0 +1,5 @@ |
|||||
|
pytest==5.0.1 |
||||
|
Flask==1.1.1 |
||||
|
cheroot==6.5.5 |
||||
|
ephemeral-port-reserve==1.1.1 |
||||
|
python-bitcoinlib==0.10.1 |
@ -0,0 +1,22 @@ |
|||||
|
from setuptools import setup |
||||
|
from pyln.testing import __version__ |
||||
|
|
||||
|
|
||||
|
with open('README.md', encoding='utf-8') as f: |
||||
|
long_description = f.read() |
||||
|
|
||||
|
with open('requirements.txt', 'r') as f: |
||||
|
requirements = [l.strip() for l in f] |
||||
|
|
||||
|
setup(name='pyln-testing', |
||||
|
version=__version__, |
||||
|
description='Library to facilitate writing tests for for lightningd', |
||||
|
long_description=long_description, |
||||
|
long_description_content_type='text/markdown', |
||||
|
url='http://github.com/ElementsProject/lightning', |
||||
|
author='Christian Decker', |
||||
|
author_email='decker.christian@gmail.com', |
||||
|
install_requires=requirements, |
||||
|
license='MIT', |
||||
|
packages=['pyln.testing'], |
||||
|
zip_safe=True) |
Loading…
Reference in new issue