SomberNight
6 years ago
15 changed files with 266 additions and 194 deletions
@ -1,7 +1,29 @@ |
|||
#!/usr/bin/env python3 |
|||
from . import util |
|||
import json |
|||
from electrum.network import filter_protocol |
|||
peers = filter_protocol(util.get_peers()) |
|||
results = util.send_request(peers, 'blockchain.estimatefee', [2]) |
|||
print(json.dumps(results, indent=4)) |
|||
import asyncio |
|||
from statistics import median |
|||
from numbers import Number |
|||
|
|||
from electrum.network import filter_protocol, Network |
|||
from electrum.util import create_and_start_event_loop, log_exceptions |
|||
|
|||
import util |
|||
|
|||
|
|||
loop, stopping_fut, loop_thread = create_and_start_event_loop() |
|||
network = Network() |
|||
network.start() |
|||
|
|||
@log_exceptions |
|||
async def f(): |
|||
try: |
|||
peers = await util.get_peers(network) |
|||
peers = filter_protocol(peers) |
|||
results = await util.send_request(network, peers, 'blockchain.estimatefee', [2]) |
|||
print(json.dumps(results, indent=4)) |
|||
feerate_estimates = filter(lambda x: isinstance(x, Number), results.values()) |
|||
print(f"median feerate: {median(feerate_estimates)}") |
|||
finally: |
|||
stopping_fut.set_result(1) |
|||
|
|||
asyncio.run_coroutine_threadsafe(f(), loop) |
|||
|
@ -1,14 +1,28 @@ |
|||
#!/usr/bin/env python3 |
|||
import asyncio |
|||
|
|||
from . import util |
|||
from electrum.network import filter_protocol, Network |
|||
from electrum.util import create_and_start_event_loop, log_exceptions |
|||
from electrum.blockchain import hash_raw_header |
|||
|
|||
from electrum.network import filter_protocol |
|||
from electrum.blockchain import hash_header |
|||
import util |
|||
|
|||
peers = util.get_peers() |
|||
peers = filter_protocol(peers, 's') |
|||
|
|||
results = util.send_request(peers, 'blockchain.headers.subscribe', []) |
|||
loop, stopping_fut, loop_thread = create_and_start_event_loop() |
|||
network = Network() |
|||
network.start() |
|||
|
|||
for n,v in sorted(results.items(), key=lambda x:x[1].get('block_height')): |
|||
print("%60s"%n, v.get('block_height'), hash_header(v)) |
|||
@log_exceptions |
|||
async def f(): |
|||
try: |
|||
peers = await util.get_peers(network) |
|||
peers = filter_protocol(peers, 's') |
|||
results = await util.send_request(network, peers, 'blockchain.headers.subscribe', []) |
|||
for server, header in sorted(results.items(), key=lambda x: x[1].get('height')): |
|||
height = header.get('height') |
|||
blockhash = hash_raw_header(header.get('hex')) |
|||
print("%60s" % server, height, blockhash) |
|||
finally: |
|||
stopping_fut.set_result(1) |
|||
|
|||
asyncio.run_coroutine_threadsafe(f(), loop) |
|||
|
@ -1,10 +1,27 @@ |
|||
#!/usr/bin/env python3 |
|||
|
|||
from .. import set_verbosity |
|||
from electrum.network import filter_version |
|||
from . import util |
|||
import json |
|||
set_verbosity(False) |
|||
import asyncio |
|||
|
|||
from electrum.network import filter_version, Network |
|||
from electrum.util import create_and_start_event_loop, log_exceptions |
|||
from electrum import constants |
|||
|
|||
import util |
|||
|
|||
|
|||
#constants.set_testnet() |
|||
|
|||
loop, stopping_fut, loop_thread = create_and_start_event_loop() |
|||
network = Network() |
|||
network.start() |
|||
|
|||
@log_exceptions |
|||
async def f(): |
|||
try: |
|||
peers = await util.get_peers(network) |
|||
peers = filter_version(peers) |
|||
print(json.dumps(peers, sort_keys=True, indent=4)) |
|||
finally: |
|||
stopping_fut.set_result(1) |
|||
|
|||
servers = filter_version(util.get_peers()) |
|||
print(json.dumps(servers, sort_keys = True, indent = 4)) |
|||
asyncio.run_coroutine_threadsafe(f(), loop) |
|||
|
@ -1,20 +1,38 @@ |
|||
#!/usr/bin/env python3 |
|||
from . import util |
|||
import sys |
|||
import asyncio |
|||
|
|||
from electrum.network import filter_protocol, Network |
|||
from electrum.util import create_and_start_event_loop, log_exceptions |
|||
|
|||
import util |
|||
|
|||
|
|||
try: |
|||
tx = sys.argv[1] |
|||
txid = sys.argv[1] |
|||
except: |
|||
print("usage: txradar txid") |
|||
sys.exit(1) |
|||
|
|||
peers = util.get_peers() |
|||
results = util.send_request(peers, 'blockchain.transaction.get', [tx]) |
|||
|
|||
r1 = [] |
|||
r2 = [] |
|||
loop, stopping_fut, loop_thread = create_and_start_event_loop() |
|||
network = Network() |
|||
network.start() |
|||
|
|||
for k, v in results.items(): |
|||
(r1 if v else r2).append(k) |
|||
@log_exceptions |
|||
async def f(): |
|||
try: |
|||
peers = await util.get_peers(network) |
|||
peers = filter_protocol(peers, 's') |
|||
results = await util.send_request(network, peers, 'blockchain.transaction.get', [txid]) |
|||
r1, r2 = [], [] |
|||
for k, v in results.items(): |
|||
(r1 if not isinstance(v, Exception) else r2).append(k) |
|||
print(f"Received {len(results)} answers") |
|||
try: propagation = len(r1) * 100. / (len(r1) + len(r2)) |
|||
except ZeroDivisionError: propagation = 0 |
|||
print(f"Propagation rate: {propagation:.1f} percent") |
|||
finally: |
|||
stopping_fut.set_result(1) |
|||
|
|||
print("Received %d answers"%len(results)) |
|||
print("Propagation rate: %.1f percent" % (len(r1) *100./(len(r1)+ len(r2)))) |
|||
asyncio.run_coroutine_threadsafe(f(), loop) |
|||
|
@ -1,87 +1,46 @@ |
|||
import select, time, queue |
|||
# import electrum |
|||
from .. import Connection, Interface, SimpleConfig |
|||
import asyncio |
|||
from typing import List, Sequence |
|||
|
|||
from electrum.network import parse_servers |
|||
from collections import defaultdict |
|||
from aiorpcx import TaskGroup |
|||
|
|||
from electrum.network import parse_servers, Network |
|||
from electrum.interface import Interface |
|||
|
|||
# electrum.util.set_verbosity(1) |
|||
def get_interfaces(servers, timeout=10): |
|||
'''Returns a map of servers to connected interfaces. If any |
|||
connections fail or timeout, they will be missing from the map. |
|||
''' |
|||
assert type(servers) is list |
|||
socket_queue = queue.Queue() |
|||
config = SimpleConfig() |
|||
connecting = {} |
|||
for server in servers: |
|||
if server not in connecting: |
|||
connecting[server] = Connection(server, socket_queue, config.path) |
|||
interfaces = {} |
|||
timeout = time.time() + timeout |
|||
count = 0 |
|||
while time.time() < timeout and count < len(servers): |
|||
try: |
|||
server, socket = socket_queue.get(True, 0.3) |
|||
except queue.Empty: |
|||
continue |
|||
if socket: |
|||
interfaces[server] = Interface(server, socket) |
|||
count += 1 |
|||
return interfaces |
|||
|
|||
def wait_on_interfaces(interfaces, timeout=10): |
|||
'''Return a map of servers to a list of (request, response) tuples. |
|||
Waits timeout seconds, or until each interface has a response''' |
|||
result = defaultdict(list) |
|||
timeout = time.time() + timeout |
|||
while len(result) < len(interfaces) and time.time() < timeout: |
|||
rin = [i for i in interfaces.values()] |
|||
win = [i for i in interfaces.values() if i.unsent_requests] |
|||
rout, wout, xout = select.select(rin, win, [], 1) |
|||
for interface in wout: |
|||
interface.send_requests() |
|||
for interface in rout: |
|||
responses = interface.get_responses() |
|||
if responses: |
|||
result[interface.server].extend(responses) |
|||
return result |
|||
#electrum.util.set_verbosity(True) |
|||
|
|||
def get_peers(): |
|||
config = SimpleConfig() |
|||
peers = {} |
|||
# 1. get connected interfaces |
|||
server = config.get('server') |
|||
if server is None: |
|||
print("You need to set a secure server, for example (for mainnet): 'electrum setconfig server helicarrier.bauerj.eu:50002:s'") |
|||
return [] |
|||
interfaces = get_interfaces([server]) |
|||
if not interfaces: |
|||
print("No connection to", server) |
|||
return [] |
|||
# 2. get list of peers |
|||
interface = interfaces[server] |
|||
interface.queue_request('server.peers.subscribe', [], 0) |
|||
responses = wait_on_interfaces(interfaces).get(server) |
|||
if responses: |
|||
response = responses[0][1] # One response, (req, response) tuple |
|||
peers = parse_servers(response.get('result')) |
|||
async def get_peers(network: Network): |
|||
while not network.is_connected(): |
|||
await asyncio.sleep(1) |
|||
interface = network.interface |
|||
session = interface.session |
|||
print(f"asking server {interface.server} for its peers") |
|||
peers = parse_servers(await session.send_request('server.peers.subscribe')) |
|||
print(f"got {len(peers)} servers") |
|||
return peers |
|||
|
|||
|
|||
def send_request(peers, method, params): |
|||
print("Contacting %d servers"%len(peers)) |
|||
interfaces = get_interfaces(peers) |
|||
print("%d servers could be reached" % len(interfaces)) |
|||
for peer in peers: |
|||
if not peer in interfaces: |
|||
print("Connection failed:", peer) |
|||
for msg_id, i in enumerate(interfaces.values()): |
|||
i.queue_request(method, params, msg_id) |
|||
responses = wait_on_interfaces(interfaces) |
|||
for peer in interfaces: |
|||
if not peer in responses: |
|||
print(peer, "did not answer") |
|||
results = dict(zip(responses.keys(), [t[0][1].get('result') for t in responses.values()])) |
|||
print("%d answers"%len(results)) |
|||
return results |
|||
async def send_request(network: Network, servers: List[str], method: str, params: Sequence): |
|||
print(f"contacting {len(servers)} servers") |
|||
num_connecting = len(network.connecting) |
|||
for server in servers: |
|||
network._start_interface(server) |
|||
# sleep a bit |
|||
for _ in range(10): |
|||
if len(network.connecting) < num_connecting: |
|||
break |
|||
await asyncio.sleep(1) |
|||
print(f"connected to {len(network.interfaces)} servers. sending request to all.") |
|||
responses = dict() |
|||
async def get_response(iface: Interface): |
|||
try: |
|||
res = await iface.session.send_request(method, params, timeout=10) |
|||
except Exception as e: |
|||
print(f"server {iface.server} errored or timed out: ({repr(e)})") |
|||
res = e |
|||
responses[iface.server] = res |
|||
async with TaskGroup() as group: |
|||
for interface in network.interfaces.values(): |
|||
await group.spawn(get_response(interface)) |
|||
print("%d answers" % len(responses)) |
|||
return responses |
|||
|
Loading…
Reference in new issue