Browse Source

Merge branch 'master' of github.com:kyuupichan/electrumx into develop

master
Neil Booth 8 years ago
parent
commit
bf202f8395
  1. 2
      .gitignore
  2. 5
      .travis.yml
  3. 220
      README.rst
  4. 76
      compact_history.py
  5. 0
      contrib/daemontools/env/COIN
  6. 0
      contrib/daemontools/env/DAEMON_URL
  7. 0
      contrib/daemontools/env/DB_DIRECTORY
  8. 0
      contrib/daemontools/env/ELECTRUMX
  9. 0
      contrib/daemontools/env/NET
  10. 0
      contrib/daemontools/env/USERNAME
  11. 0
      contrib/daemontools/log/run
  12. 3
      contrib/daemontools/run
  13. 13
      contrib/python3.6/python-3.6.sh
  14. 26
      contrib/raspberrypi3/install_electrumx.sh
  15. 37
      contrib/raspberrypi3/run_electrumx.sh
  16. 0
      contrib/systemd/electrumx.conf
  17. 0
      contrib/systemd/electrumx.service
  18. 3
      docs/AUTHORS
  19. 83
      docs/ENVIRONMENT.rst
  20. 36
      docs/HOWTO.rst
  21. 56
      docs/PEER_DISCOVERY.rst
  22. 106
      docs/PROTOCOL.rst
  23. 2
      electrumx_server.py
  24. 399
      lib/coins.py
  25. 10
      lib/hash.py
  26. 47
      lib/peer.py
  27. 120
      lib/socks.py
  28. 136
      lib/tx.py
  29. 28
      lib/util.py
  30. 3
      samples/daemontools/run
  31. 74
      server/block_processor.py
  32. 31
      server/controller.py
  33. 114
      server/daemon.py
  34. 459
      server/db.py
  35. 124
      server/env.py
  36. 29
      server/mempool.py
  37. 310
      server/peers.py
  38. 104
      server/session.py
  39. 2
      server/version.py
  40. 17
      tests/blocks/bitcoin_mainnet_100000.json
  41. 19
      tests/blocks/digibyte_mainnet_4394891.json
  42. 19
      tests/blocks/dogecoin_mainnet_371337.json
  43. 18
      tests/blocks/litecoin_mainnet_900000.json
  44. 14
      tests/blocks/namecoin_mainnet_19200.json
  45. 15
      tests/blocks/namecoin_mainnet_19204.json
  46. 15
      tests/blocks/reddcoin_mainnet_1200000.json
  47. 20
      tests/blocks/reddcoin_mainnet_8000.json
  48. 15
      tests/blocks/zcash_mainnet_1000.json
  49. 10
      tests/lib/test_addresses.py
  50. 68
      tests/lib/test_hash.py
  51. 27
      tests/lib/test_util.py
  52. 131
      tests/server/test_compaction.py
  53. 282
      tests/server/test_env.py
  54. 0
      tests/server/test_storage.py
  55. 68
      tests/test_blocks.py
  56. 397
      tests/wallet/test_bip32.py
  57. 306
      wallet/bip32.py

2
.gitignore

@ -1,4 +1,6 @@
*/__pycache__/
.cache/
tests/*/__pycache__/
*/*~
*.#*
*#

5
.travis.yml

@ -12,10 +12,11 @@ python:
# command to install dependencies
install:
- pip install aiohttp
- pip install ecdsa
- pip install plyvel
- pip install pyrocksdb
- pip install pytest-cov
- pip install python-coveralls
# command to run tests
script: pytest --cov=server --cov=lib
after_success: coveralls
script: pytest --cov=server --cov=lib --cov=wallet
after_success: coveralls

220
README.rst

@ -17,6 +17,12 @@ Getting Started
===============
See `docs/HOWTO.rst`_.
There is also an `installer`_ available that simplifies the installation on various Linux-based distributions.
There is also an `Dockerfile`_ available .
.. _installer: https://github.com/bauerj/electrumx-installer
.. _Dockerfile: https://github.com/followtheart/electrumx-docker
Features
========
@ -127,94 +133,139 @@ Roadmap
ChangeLog
=========
Version 1.0
-----------
* Minor doc tweaks only
Version 0.99.4
--------------
* Add support for Bitcoin Unlimited's nolnet; set **NET** to nolnet
* Choose 2 peers per bucket
* Minor bugfix
Version 0.99.3
--------------
* Require Python 3.5.3. 3.5.2 has asyncio API and socket-related issues.
Resolves `#135`_
* Remove peer semaphore
* Improved Base58 handling for >1 byte version prefix (erasmospunk)
Version 0.99.2
--------------
* don't announce self if a non-public IP address
* logging tweaks
Version 0.99.1
--------------
* Add more verbose logging in attempt to understand issue `#135`_
* REPORT_TCP_PORT_TOR and REPORT_SSL_PORT_TOR were ignored when constructing
IRC real names. Fixes `#136`_
* Only serve chunk requests in forward direction; disconnect clients iterating
backwards. Minimizes bandwidth consumption caused by misbehaving Electrum
clients. Closes `#132`_
* Tor coin peers would always be scheduled for check, fixes `#138`_ (fr3aker)
Version 0.99
------------
Preparation for release of 1.0, which will only have bug fixes and
documentation updates.
* improve handling of daemon going down so that incoming connections
are not blocked. Also improve logging thereof. Fixes `#100`_.
* add facility to disable peer discovery and/or self announcement,
see `docs/ENVIRONMENT.rst`_.
* add FairCoin (thokon00)
Version 0.11.4
Version 1.0.13
--------------
* peer handling fixes / improvements based on suggestions of hsmiths
- improve mempool handling and height notifications
- add bitcoin-segwit as a new COIN
Version 0.11.3
Version 1.0.12
--------------
* fixed a typo in lib/peer.py pointed out by hsmiths
- handle legacy daemons, add support for Blackcoin and Peercoin (erasmospunk)
- implement history compression; can currently only be done from a script
with the server down
- Add dockerfile reference (followtheart)
- doc, runfile fixes (Henry, emilrus)
- add bip32 implementation, currently unused
- daemon compatibility improvements (erasmospunk)
- permit underscores in hostnames, updated Bitcoin server list
Version 0.11.2
Version 1.0.11
--------------
* Preliminary implementation of script hash subscriptions to enable
subscribing to updates of arbitrary scripts, not just those of
standard bitcoin addresses. I'll fully document once confirmed
working as expected.
Closes `#124`_.
Version 0.11.1
- disable IRC for bitcoin mainnet
- remove dead code, allow custom Daemon & BlockProcessor classes (erasmospunk)
- add SERVER_(SUB)VERSION to banner metavariables (LaoDC)
- masternode methods for Dash (TheLazier)
- allow multiple P2SH address versions, implement for Litecoin (pooler)
- update Bitcoin's TX_COUNT and block height (JWU42)
- update BU nolnet parameters
- fix diagnostic typo (anduck)
- Issues fixed: `#180`_
Version 1.0.10
--------------
* report unconfirmed parent tx status correctly, and notify if that
parent status changes. Fixes `#129`_.
Version 0.11.0
--------------
- add bauerj's installer docs
- segwit has activated on Litecoin. Make segwit deserialization the
default. Also as the first Segwit block probably will break old
electrum-server implementation servers, disable IRC and make
Litecoin mainnet and testnet use the peer-discovery protocol. If
you previously used "testnet-segwit" as your NET you should instead
use "testnet".
Version 1.0.9
-------------
- ignore peers not appearing in their features list
- validate hostnames in Env object
- added tests for env.py
- Einsteinium support and contrib script shebang fix (erasmospunk)
- set last_good only if successfully verified
Version 1.0.8
-------------
Minor peer-discovery tweaks:
* I intended that if a host and its IP address were both registered as
peers, that the real hostname replace the IP address. That wasn't
working properly and is fixed now.
* 1.0.6 no longer required a clearnet identity but part of the peer
discovery logic assumed one existed. That is now fixed.
Version 1.0.7
-------------
Improvements to proxy handling and peer discovery
* background async proxy detection loop. Removes responsibility for
proxy detection and maintenance from the peer manager.
* peer discovery waits for an initial proxy detection attempt to complete
before starting
* new feature: flag to force peer discovery to happen via the proxy.
This might be useful for someone exlusively running a Tor service
that doesn't want to reveal its IP address. See **FORCE_PROXY** in
`docs/ENVIRONMENT.rst`_ for details and caveats.
* other minor fixes and tweaks
Version 1.0.6
-------------
* updated to handle incompatibilities between aiohttp 1.0 and 2.0.
ElexctrumX should work with either for now; I will drop support for
1.0 in a few months. Fixes `#163`_.
* relax get_chunk restrictions for clients 1.8.3 and higher. Closes
`#162`_.
* **REPORT_HOST** no longer defaults to **HOST**. If not set, no
clearnet identity will be advertised.
* Add Viacoin support (romanornr)
Version 1.0.5
-------------
* the peer looping was actually just looping of logging output, not
connections. Hopefully fixed for good in this release. Closes `#160`_.
Version 1.0.4
-------------
* fix another unwanted loop in peer discovery, tweak diagnostics
Version 1.0.3
-------------
* fix a verification loop that happened occasionally with bad peers
Version 1.0.2
-------------
* stricter acceptance of add_peer requests: rate-limit onion peers,
and require incoming requests to resolve to the requesting IP address
* validate peer hostnames (closes `#157`_)
* verify height for all peers (closes `#152`_)
* various improvements to peer handling
* various documentation tweaks
* limit the maximum number of sessions based on the process's
open file soft limit (closes `#158`_)
* improved altcoin support for variable-length block headers and AuxPoW
(erasmospunk) (closes `#128`_ and `#83`_)
Version 1.0.1
-------------
* Rate-limit add_peer calls in a random way
* Fix discovery of base height in reorgs
* Don't permit common but invalid REPORT_HOST values
* Set reorg limit to 8000 blocks on testnet
* dogecoin / litecoin parameter fixes (erasmospunk, pooler)
* minor doc tweaks
* implementation of `docs/PEER_DISCOVERY.rst`_ for discovery of server
peers without using IRC. Closes `#104`_. Since all testnet peers
are ElectrumX servers, IRC advertising is now disabled on bitcoin
testnet.
Version 1.0
-----------
Thanks to bauerj, hsmiths and JWU42 for their help testing these
changes over the last month.
* you can now specify a tor proxy (or have it autodetected if local),
and if an incoming connection seems to be from the proxy a
tor-specific banner file is served. See **TOR_BANNER_FILE** in
`docs/ENVIRONMENT.rst`_.
* Minor doc tweaks only
**Neil Booth** kyuupichan@gmail.com https://github.com/kyuupichan
@ -222,14 +273,15 @@ Version 0.11.0
1BWwXJH3q6PRsizBkSGm2Uw4Sz1urZ5sCj
.. _#100: https://github.com/kyuupichan/electrumx/issues/100
.. _#104: https://github.com/kyuupichan/electrumx/issues/104
.. _#124: https://github.com/kyuupichan/electrumx/issues/124
.. _#129: https://github.com/kyuupichan/electrumx/issues/129
.. _#132: https://github.com/kyuupichan/electrumx/issues/132
.. _#135: https://github.com/kyuupichan/electrumx/issues/135
.. _#136: https://github.com/kyuupichan/electrumx/issues/136
.. _#138: https://github.com/kyuupichan/electrumx/issues/138
.. _#83: https://github.com/kyuupichan/electrumx/issues/83
.. _#128: https://github.com/kyuupichan/electrumx/issues/128
.. _#152: https://github.com/kyuupichan/electrumx/issues/152
.. _#157: https://github.com/kyuupichan/electrumx/issues/157
.. _#158: https://github.com/kyuupichan/electrumx/issues/158
.. _#160: https://github.com/kyuupichan/electrumx/issues/160
.. _#162: https://github.com/kyuupichan/electrumx/issues/162
.. _#163: https://github.com/kyuupichan/electrumx/issues/163
.. _#180: https://github.com/kyuupichan/electrumx/issues/180
.. _docs/HOWTO.rst: https://github.com/kyuupichan/electrumx/blob/master/docs/HOWTO.rst
.. _docs/ENVIRONMENT.rst: https://github.com/kyuupichan/electrumx/blob/master/docs/ENVIRONMENT.rst
.. _docs/PEER_DISCOVERY.rst: https://github.com/kyuupichan/electrumx/blob/master/docs/PEER_DISCOVERY.rst

76
compact_history.py

@ -0,0 +1,76 @@
#!/usr/bin/env python3
#
# Copyright (c) 2017, Neil Booth
#
# All rights reserved.
#
# See the file "LICENCE" for information about the copyright
# and warranty status of this software.
'''Script to compact the history database. This should save space and
will reset the flush counter to a low number, avoiding overflow when
the flush count reaches 65,536.
This needs to lock the database so ElectrumX must not be running -
shut it down cleanly first.
It is recommended you run this script with the same environment as
ElectrumX. However it is intended to be runnable with just
DB_DIRECTORY and COIN set (COIN defaults as for ElectrumX).
If you use daemon tools, you might run this script like so:
envdir /path/to/the/environment/directory ./compact_history.py
Depending on your hardware this script may take up to 6 hours to
complete; it logs progress regularly.
Compaction can be interrupted and restarted harmlessly and will pick
up where it left off. However, if you restart ElectrumX without
running the compaction to completion, it will not benefit and
subsequent compactions will restart from the beginning.
'''
import logging
import sys
import traceback
from os import environ
from server.env import Env
from server.db import DB
def compact_history():
if sys.version_info < (3, 5, 3):
raise RuntimeError('Python >= 3.5.3 is required to run ElectrumX')
environ['DAEMON_URL'] = '' # Avoid Env erroring out
env = Env()
db = DB(env)
assert not db.first_sync
# Continue where we left off, if interrupted
if db.comp_cursor == -1:
db.comp_cursor = 0
db.comp_flush_count = max(db.comp_flush_count, 1)
limit = 8 * 1000 * 1000
while db.comp_cursor != -1:
db._compact_history(limit)
def main():
logging.basicConfig(level=logging.INFO)
logging.info('Starting history compaction...')
try:
compact_history()
except Exception:
traceback.print_exc()
logging.critical('History compaction terminated abnormally')
else:
logging.info('History compaction complete')
if __name__ == '__main__':
main()

0
samples/daemontools/env/COIN → contrib/daemontools/env/COIN

0
samples/daemontools/env/DAEMON_URL → contrib/daemontools/env/DAEMON_URL

0
samples/daemontools/env/DB_DIRECTORY → contrib/daemontools/env/DB_DIRECTORY

0
samples/daemontools/env/ELECTRUMX → contrib/daemontools/env/ELECTRUMX

0
samples/daemontools/env/NETWORK → contrib/daemontools/env/NET

0
samples/daemontools/env/USERNAME → contrib/daemontools/env/USERNAME

0
samples/daemontools/log/run → contrib/daemontools/log/run

3
contrib/daemontools/run

@ -0,0 +1,3 @@
#!/bin/sh
echo "Launching ElectrumX server..."
exec 2>&1 envdir ./env /bin/sh -c 'setuidgid $USERNAME python3 $ELECTRUMX'

13
contrib/python3.6/python-3.6.sh

@ -0,0 +1,13 @@
#!/bin/sh
###########################
#Installation of Python 3.6
###########################
sudo add-apt-repository ppa:jonathonf/python-3.6
sudo apt-get update && sudo apt-get install python3.6 python3.6-dev
cd /home/username
git clone https://github.com/kyuupichan/electrumx.git
cd electrumx
sudo python3.6 setup.py install

26
contrib/raspberrypi3/install_electrumx.sh

@ -0,0 +1,26 @@
#!/bin/sh
###################
# install electrumx
###################
# upgrade raspbian to 'stretch' distribution for python 3.5 support
sudo echo 'deb http://mirrordirector.raspbian.org/raspbian/ testing main contrib non-free rpi' > /etc/apt/sources.list.d/stretch.list
sudo apt-get update
sudo apt-get dist-upgrade
sudo apt-get autoremove
# install electrumx dependencies
sudo apt-get install python3-pip
sudo apt-get install build-essential libc6-dev
sudo apt-get install libncurses5-dev libncursesw5-dev
sudo apt install libreadline6-dev/stable libreadline6/stable
sudo apt-get install libleveldb-dev
sudo apt-get install git
sudo pip3 install plyvel
sudo pip3 install irc
# install electrumx
git clone https://github.com/kyuupichan/electrumx.git
cd electrumx
sudo python3 setup.py install

37
contrib/raspberrypi3/run_electrumx.sh

@ -0,0 +1,37 @@
#!/bin/sh
###############
# run_electrumx
###############
# configure electrumx
export COIN=Bitcoin
export DAEMON_URL=http://rpcuser:rpcpassword@127.0.0.1
export NET=mainnet
export CACHE_MB=400
export DB_DIRECTORY=/home/username/.electrumx/db
export SSL_CERTFILE=/home/username/.electrumx/certfile.crt
export SSL_KEYFILE=/home/username/.electrumx/keyfile.key
export BANNER_FILE=/home/username/.electrumx/banner
export DONATION_ADDRESS=your-donation-address
# connectivity
export HOST=
export TCP_PORT=50001
export SSL_PORT=50002
# visibility
export IRC=
export IRC_NICK=hostname
export REPORT_HOST=hostname.com
export RPC_PORT=8000
# run electrumx
ulimit -n 10000
/usr/local/bin/electrumx_server.py 2>> /home/username/.electrumx/electrumx.log >> /home/username/.electrumx/electrumx.log &
######################
# auto-start electrumx
######################
# add this line to crontab -e
# @reboot /path/to/run_electrumx.sh

0
samples/systemd/electrumx.conf → contrib/systemd/electrumx.conf

0
samples/systemd/electrumx.service → contrib/systemd/electrumx.service

3
docs/AUTHORS

@ -1,2 +1,3 @@
Neil Booth: creator and maintainer
Johann Bauer: backend DB abstraction
Johann Bauer: backend DB abstraction
John Jegutanis: alt-chain integrations

83
docs/ENVIRONMENT.rst

@ -116,8 +116,10 @@ These environment variables are optional:
You can place several meta-variables in your banner file, which will be
replaced before serving to a client.
+ **$VERSION** is replaced with the ElectrumX version you are
runnning, such as *ElectrumX 0.9.22*.
+ **$SERVER_VERSION** is replaced with the ElectrumX version you are
runnning, such as *1.0.10*.
+ **$SERVER_SUBVERSION** is replaced with the ElectrumX user agent
string. For example, `ElectrumX 1.0.10`.
+ **$DAEMON_VERSION** is replaced with the daemon's version as a
dot-separated string. For example *0.12.1*.
+ **$DAEMON_SUBVERSION** is replaced with the daemon's user agent
@ -215,6 +217,7 @@ raise them.
functioning Electrum clients by default will send pings roughly
every 60 seconds.
Peer Discovery
--------------
@ -250,6 +253,15 @@ some of this.
peer discovery if it notices it is not present in the peer's
returned list.
* **FORCE_PROXY**
By default peer discovery happens over the clear internet. Set this
to non-empty to force peer discovery to be done via the proxy. This
might be useful if you are running a Tor service exclusively and
wish to keep your IP address private. **NOTE**: in such a case you
should leave **IRC** unset as IRC connections are *always* over the
normal internet.
* **TOR_PROXY_HOST**
The host where your Tor proxy is running. Defaults to *localhost*.
@ -264,59 +276,70 @@ some of this.
9150 (Tor browser bundle) and 1080 (socks).
IRC
---
Server Advertising
------------------
Use the following environment variables if you want to advertise
connectivity on IRC:
* **IRC**
Set to anything non-empty to advertise on IRC
* **IRC_NICK**
The nick to use when connecting to IRC. The default is a hash of
**REPORT_HOST**. Either way a prefix will be prepended depending on
**COIN** and **NET**.
These environment variables affect how your server is advertised, both
by peer discovery (if enabled) and IRC (if enabled).
* **REPORT_HOST**
The host to advertise. Defaults to **HOST**.
The clearnet host to advertise. If not set, no clearnet host is
advertised.
* **REPORT_TCP_PORT**
The TCP port to advertise. Defaults to **TCP_PORT**. '0' disables
publishing the port.
The clearnet TCP port to advertise if **REPORT_HOST** is set.
Defaults to **TCP_PORT**. '0' disables publishing a TCP port.
* **REPORT_SSL_PORT**
The SSL port to advertise. Defaults to **SSL_PORT**. '0' disables
publishing the port.
The clearnet SSL port to advertise if **REPORT_HOST** is set.
Defaults to **SSL_PORT**. '0' disables publishing an SSL port.
* **REPORT_HOST_TOR**
The tor address to advertise; must end with `.onion`. If set, an
additional connection to IRC happens with '_tor' appended to
**IRC_NICK**.
If you wish run a Tor service, this is the Tor host name to
advertise and must end with `.onion`.
* **REPORT_TCP_PORT_TOR**
The TCP port to advertise for Tor. Defaults to **REPORT_TCP_PORT**,
unless it is '0', otherwise **TCP_PORT**. '0' disables publishing
the port.
The Tor TCP port to advertise. The default is the clearnet
**REPORT_TCP_PORT**, unless disabled or it is '0', otherwise
**TCP_PORT**. '0' disables publishing a Tor TCP port.
* **REPORT_SSL_PORT_TOR**
The SSL port to advertise for Tor. Defaults to **REPORT_SSL_PORT**,
unless it is '0', otherwise **SSL_PORT**. '0' disables publishing
the port.
The Tor SSL port to advertise. The default is the clearnet
**REPORT_SSL_PORT**, unless disabled or it is '0', otherwise
**SSL_PORT**. '0' disables publishing a Tor SSL port.
**NOTE**: Certificate-Authority signed certificates don't work over
Tor, so you should set **REPORT_SSL_PORT_TOR** to 0 if yours is not
self-signed.
IRC
---
Use the following environment variables if you want to advertise
connectivity on IRC:
* **IRC**
Set to anything non-empty to advertise on IRC. ElectrumX connects
to IRC over the clear internet, always.
* **IRC_NICK**
The nick to use when connecting to IRC. The default is a hash of
**REPORT_HOST**. Either way a prefix will be prepended depending on
**COIN** and **NET**.
If **REPORT_HOST_TOR** is set, an additional connection to IRC
happens with '_tor' appended to **IRC_NICK**.
Cache
-----

36
docs/HOWTO.rst

@ -3,7 +3,7 @@ Prerequisites
=============
**ElectrumX** should run on any flavour of unix. I have run it
successfully on MaxOSX and DragonFlyBSD. It won't run out-of-the-box
successfully on MacOS and DragonFlyBSD. It won't run out-of-the-box
on Windows, but the changes required to make it do so should be
small - pull requests are welcome.
@ -46,7 +46,7 @@ recommend you install one of these and familiarise yourself with them.
The instructions below and sample run scripts assume `daemontools`;
adapting to `runit` should be trivial for someone used to either.
When building the database form the genesis block, ElectrumX has to
When building the database from the genesis block, ElectrumX has to
flush large quantities of data to disk and its DB. You will have a
better experience if the database directory is on an SSD than on an
HDD. Currently to around height 447,100 of the Bitcoin blockchain the
@ -66,7 +66,8 @@ was much worse.
You will need to install one of:
+ `plyvel <https://plyvel.readthedocs.io/en/latest/installation.html>`_ for LevelDB
+ `pyrocksdb <http://pyrocksdb.readthedocs.io/en/v0.4/installation.html>`_ for RocksDB
+ `python-rocksdb <https://pypi.python.org/pypi/python-rocksdb>`_ for RocksDB (`pip3 install python-rocksdb`)
+ `pyrocksdb <http://pyrocksdb.readthedocs.io/en/v0.4/installation.html>`_ for an unmaintained version that doesn't work with recent releases of RocksDB
Running
=======
@ -108,7 +109,7 @@ to at least 2,500.
Note that setting the limit in your shell does *NOT* affect ElectrumX
unless you are invoking ElectrumX directly from your shell. If you
are using `systemd`, you need to set it in the `.service` file (see
`samples/systemd/electrumx.service`_).
`contrib/systemd/electrumx.service`_).
Using daemontools
@ -136,7 +137,7 @@ you might do::
Then copy the all sample scripts from the ElectrumX source tree there::
cp -R /path/to/repo/electrumx/samples/daemontools ~/scripts/electrumx
cp -R /path/to/repo/electrumx/contrib/daemontools ~/scripts/electrumx
This copies 3 things: the top level server run script, a log/ directory
with the logger run script, an env/ directory.
@ -172,7 +173,7 @@ Using systemd
This repository contains a sample systemd unit file that you can use to
setup ElectrumX with systemd. Simply copy it to :code:`/etc/systemd/system`::
cp samples/systemd/electrumx.service /etc/systemd/system/
cp contrib/systemd/electrumx.service /etc/systemd/system/
The sample unit file assumes that the repository is located at
:code:`/home/electrumx/electrumx`. If that differs on your system, you need to
@ -199,6 +200,24 @@ minutes to flush cached data to disk during initial sync. You should
set TimeoutStopSec to *at least* 10 mins in your `.service` file.
Installing Python 3.6 under Ubuntu
----------------------------------
Many Ubuntu distributions have an incompatible Python version baked in.
Because of this, it is easier to install Python 3.6 rather than attempting
to update Python 3.5.2 to 3.5.3. See `contrib/python3.6/python-3.6.sh`_.
Installing on Raspberry Pi 3
----------------------------
To install on the Raspberry Pi 3 you will need to update to the "stretch" distribution.
See the full procedure in `contrib/raspberrypi3/install_electrumx.sh`_.
See also `contrib/raspberrypi3/run_electrumx.sh`_ for an easy way to configure and
launch electrumx.
Sync Progress
=============
@ -377,10 +396,13 @@ copy of your certificate and key in case you need to restore them.
.. _`ENVIRONMENT.rst`: https://github.com/kyuupichan/electrumx/blob/master/docs/ENVIRONMENT.rst
.. _`samples/systemd/electrumx.service`: https://github.com/kyuupichan/electrumx/blob/master/samples/systemd/electrumx.service
.. _`contrib/systemd/electrumx.service`: https://github.com/kyuupichan/electrumx/blob/master/contrib/systemd/electrumx.service
.. _`daemontools`: http://cr.yp.to/daemontools.html
.. _`runit`: http://smarden.org/runit/index.html
.. _`aiohttp`: https://pypi.python.org/pypi/aiohttp
.. _`pylru`: https://pypi.python.org/pypi/pylru
.. _`IRC`: https://pypi.python.org/pypi/irc
.. _`x11_hash`: https://pypi.python.org/pypi/x11_hash
.. _`contrib/python3.6/python-3.6.sh`: https://github.com/kyuupichan/electrumx/blob/master/contrib/python3.6/python-3.6.sh
.. _`contrib/raspberrypi3/install_electrumx.sh`: https://github.com/kyuupichan/electrumx/blob/master/contrib/raspberrypi3/install_electrumx.sh
.. _`contrib/raspberrypi3/run_electrumx.sh`: https://github.com/kyuupichan/electrumx/blob/master/contrib/raspberrypi3/run_electrumx.sh

56
docs/PEER_DISCOVERY.rst

@ -72,11 +72,12 @@ Maintaining the Peer Database
In order to keep its peer database up-to-date and fresh, if some time
has passed since the last successful connection to a peer, an Electrum
server should make an attempt to connect, choosing either the TCP or
SSL port. On connecting it should issue **server.peers.subscribe**
and **server.features** RPC calls to collect information about the
server and its peers, and if it is the first time connecting to this
peer, a **server.add_peer** call to advertise itself. Once this is
done and replies received it should terminate the connection.
SSL port. On connecting it should issue **server.peers.subscribe**,
**blockchain.headers.subscribe**, and **server.features** RPC calls to
collect information about the server and its peers. If the peer seems
to not know of you, you can issue a **server.add_peer** call to
advertise yourself. Once this is done and replies received it should
terminate the connection.
The peer database should view information obtained from an outgoing
connection as authoritative, and prefer it to information obtained
@ -84,13 +85,12 @@ from any other source.
On connecting, a server should confirm the peer is serving the same
network, ideally via the genesis block hash of the **server.features**
RPC call below. If the peer does not implement that call, perhaps
instead check the **blockchain.headers.subscribe** RPC call returns a
peer block height within a small number of the expected value. If a
peer is on the wrong network it should never be advertised to clients
or other peers. Such invalid peers should perhaps be remembered for a
short time to prevent redundant revalidation if other peers persist in
advertising them, and later forgotten.
RPC call below. Also the height reported by the peer should be within
a small number of the expected value. If a peer is on the wrong
network it should never be advertised to clients or other peers. Such
invalid peers should perhaps be remembered for a short time to prevent
redundant revalidation if other peers persist in advertising them, and
later forgotten.
If a connection attempt fails, subsequent reconnection attempts should
follow some kind of exponential backoff.
@ -200,3 +200,35 @@ the hard-coded peer list used to seed this process should suffice.
Any peer on IRC will report other peers on IRC, and so if any one of
them is known to any single peer implementing this protocol, they will
all become known to all peers quite rapidly.
Notes to Implementators
-----------------------
* it is very important to only accept peers that appear to be on the
same network. At a minimum the genesis hash should be compared (if
the peer supports the *server.features* RPC call), and also that the
peer's reported height is within a few blocks of your own server's
height.
* care should be taken with the *add_peer* call. Consider only
accepting it once per connection. Clearnet peer requests should
check the peer resolves to the requesting IP address, to prevent
attackers from being able to trigger arbitrary outgoing connections
from your server. This doesn't work for onion peers so they should
be rate-limited.
* it should be possible for a peer to change their port assignments -
presumably connecting to the old ports to perform checks will not
work.
* peer host names should be checked for validity before accepting
them; and *localhost* should probably be rejected. If it is an IP
address it should be a normal public one (not private, multicast or
unspecified).
* you should limit the number of new peers accepted from any single
source to at most a handful, to limit the effectiveness of malicious
peers wanting to trigger arbitrary outgoing connections or fill your
peer tables with junk data.
* in the response to *server.peers.subscribe* calls, consider limiting
the number of peers on similar IP subnets to protect against sybil
attacks, and in the case of onion servers the total returned.
* you should not advertise a peer's IP address if it also advertises a
hostname (avoiding duplicates).

106
docs/PROTOCOL.rst

@ -5,6 +5,8 @@ Electrum Protocol
Until now there was no written specification of the Electrum protocol
that I am aware of; this document is an attempt to fill that gap. It
is intended to be a reference for client and server authors alike.
[Since writing this I learnt there has been a skeleton protocol
description on docs.github.io].
I have attempted to ensure what is written is correct for the three
known server implementations: electrum-server, jelectrum and
@ -61,8 +63,8 @@ Protocol negotiation is not implemented in any client or server at
present to the best of my knowledge, so care is needed to ensure
current clients and servers continue to operate as expected.
Protocol versions are denoted by [major_number, minor_number] pairs,
for example protocol version 1.15 is [1, 15] as a pair.
Protocol versions are denoted by "m.n" strings, where *m* is the major
version number and *n* the minor version number. For example: "1.5".
A party to a connection will speak all protocol versions in a range,
say from `protocol_min` to `protocol_max`. This min and max may be
@ -189,7 +191,7 @@ Return the unconfirmed transactions of a bitcoin address.
transaction is a dictionary with keys *height* , *tx_hash* and
*fee*. *tx_hash* the transaction hash in hexadecimal, *height* is
`0` if all inputs are confirmed, and `-1` otherwise, and *fee* is
the transaction fee in coin units.
the transaction fee in minimum coin units as an integer.
**Response Examples**
@ -298,7 +300,7 @@ blockchain.block.get_header
Return the *deserialized header* [2]_ of the block at the given height.
blockchain.block.get_chunk(**height**)
blockchain.block.get_header(**height**)
**height**
@ -324,10 +326,10 @@ blockchain.block.get_chunk
==========================
Return a concatenated chunk of block headers. A chunk consists of a
fixed number of block headers over at the end of which difficulty is
retargeted.
fixed number of block headers over which difficulty is constant, and
at the end of which difficulty is retargeted.
So in the case of Bitcoin a chunk is 2,016 headers, each of 80 bytes,
In the case of Bitcoin a chunk is 2,016 headers, each of 80 bytes,
and chunk 5 is the block headers from height 10,080 to 12,095
inclusive. When encoded as hexadecimal, the response string is twice
as long, so for Bitcoin it is 322,560 bytes long, making this a
@ -558,7 +560,7 @@ deprecated.
**Response**
A Base58 address string, or *null*. If the transaction doesn't
exist, the index is out of range, or the output is not paid to and
exist, the index is out of range, or the output is not paid to an
address, *null* must be returned. If the output is spent *null* may
be returned.
@ -600,7 +602,7 @@ subscription and the server must send no notifications.
The first element is the IP address, the second is the host name
(which might also be an IP address), and the third is a list of
server features. Each feature and starts with a letter. 'v'
indicates the server minimum protocol version, 'p' its pruning limit
indicates the server maximum protocol version, 'p' its pruning limit
and is omitted if it does not prune, 't' is the TCP port number, and
's' is the SSL port number. If a port is not given for 's' or 't'
the default port for the coin network is implied. If 's' or 't' is
@ -615,7 +617,7 @@ following changes:
* improved semantics of `server.version` to aid protocol negotiation
* deprecated methods `blockchain.address.get_proof`,
'blockchain.utxo.get_address' and `blockchain.numblocks.subscribe`
`blockchain.utxo.get_address` and `blockchain.numblocks.subscribe`
have been removed.
* method `blockchain.transaction.get` no longer takes a *height*
argument
@ -631,7 +633,7 @@ server.version
Identify the client and inform the server the range of understood
protocol versions.
server.version(**client_name**, **protocol_version** = ((1, 1), (1, 1)))
server.version(**client_name**, **protocol_version** = ["1.1", "1,1"])
**client_name**
@ -639,31 +641,28 @@ protocol versions.
**protocol_verion**
Optional with default value ((1, 1), (1, 1)).
Optional with default value ["1.1", "1,1"].
It must be a pair [`protocol_min`, `protocol_max`], each of which is
itself a [major_version, minor_version] pair.
If a string was passed it should be interpreted as `protocol_min` and
`protocol_max` both being [1, 0].
a string.
The server should use the highest protocol version both support:
protocol_version_to_use = min(client.protocol_max, server.protocol_max)
If this is below
If this is below the value
min(client.protocol_min, server.protocol_min)
there is no protocol version in common and the server must close the
connection. Otherwise it should send a response appropriate for that
protocol version.
then there is no protocol version in common and the server must close
the connection. Otherwise it should send a response appropriate for
that protocol version.
**Response**
A pair
A string
[identifying_string, protocol_version]
"m.n"
identifying the server and the protocol version that will be used
for future communication.
@ -672,7 +671,7 @@ protocol version.
::
server.version('2.7.11', ((1, 0), (2, 0)))
server.version('2.7.11', ["1.0", "2.0"])
server.add_peer
@ -704,37 +703,62 @@ Get a list of features and services supported by the server.
The following features MUST be reported by the server. Additional
key-value pairs may be returned.
* **hosts**
* **hosts**
An dictionary, keyed by host name, that this server can be reached
at. Normally this will only have a single entry; other entries can
be used in case there are other connection routes (e.g. Tor).
The value for a host is itself a dictionary, with the following
optional keys:
* **ssl_port**
An integer. Omit or set to *null* if SSL connectivity is not
provided.
* **tcp_port**
An integer. Omit or set to *null* if TCP connectivity is not
provided.
A server should ignore information provided about any host other
than the one it connected to.
* **genesis_hash**
A dictionary of host names the server can be reached at. Each
value is a dictionary with keys "ssl_port" and "tcp_port" at which
the given host can be reached. If there is no open port for a
transport, its value should be *null*.
The hash of the genesis block. This is used to detect if a peer is
connected to one serving a different network.
* **server_version**
* **server_version**
The same identifying string as returned in response to *server.version*.
A string that identifies the server software. Should be the same as
the response to **server.version** RPC call.
* **protocol_version**
* **protocol_max**
* **protocol_min**
A pair [`protocol_min`, `protocol_max`] of the protocols supported
by the server, each of which is itself a [major_version,
minor_version] pair.
Strings that are the minimum and maximum Electrum protcol versions
this server speaks. The maximum value should be the same as what
would suffix the letter **v** in the IRC real name. Example: "1.1".
* **pruning**
* **pruning**
The history pruning limit of the server as an integer. If the
server does not prune return *null*.
An integer, the pruning limit. Omit or set to *null* if there is no
pruning limit. Should be the same as what would suffix the letter
**p** in the IRC real name.
**Example Response**
::
{
"server_version": "ElectrumX 0.10.14",
"protocol_version": [[1, 0], [1, 1]],
"hosts": {"14.3.140.101": {"ssl_port": 50002, "tcp_port": 50001}},
"pruning": null
"genesis_hash": "000000000933ea01ad0ee984209779baaec3ced90fa3f408719526f8d77f4943",
"hosts": {"14.3.140.101": {"tcp_port": 51001, "ssl_port": 51002}},
"protocol_max": "1.0",
"protocol_min": "1.0",
"pruning": null,
"server_version": "ElectrumX 1.0.1"
}
.. _JSON RPC 1.0: http://json-rpc.org/wiki/specification

2
electrumx_server.py

@ -33,7 +33,7 @@ def main_loop():
raise RuntimeError('Python >= 3.5.3 is required to run ElectrumX')
if os.geteuid() == 0:
raise RuntimeError('DO NOT RUN AS ROOT! Create an unpriveleged user '
raise RuntimeError('DO NOT RUN AS ROOT! Create an unprivileged user '
'account and use that')
loop = asyncio.get_event_loop()

399
lib/coins.py

@ -30,6 +30,7 @@ Anything coin-specific should go in this file and be subclassed where
necessary for appropriate handling.
'''
from collections import namedtuple
import re
import struct
from decimal import Decimal
@ -38,7 +39,14 @@ from hashlib import sha256
import lib.util as util
from lib.hash import Base58, hash160, double_sha256, hash_to_str
from lib.script import ScriptPubKey
from lib.tx import Deserializer, DeserializerSegWit
from lib.tx import Deserializer, DeserializerSegWit, DeserializerAuxPow, \
DeserializerZcash, DeserializerTxTime, DeserializerReddcoin
from server.block_processor import BlockProcessor
from server.daemon import Daemon, DashDaemon, LegacyRPCDaemon
from server.session import ElectrumX, DashElectrumX
Block = namedtuple("Block", "header transactions")
class CoinError(Exception):
@ -53,10 +61,18 @@ class Coin(object):
RPC_URL_REGEX = re.compile('.+@(\[[0-9a-fA-F:]+\]|[^:]+)(:[0-9]+)?')
VALUE_PER_COIN = 100000000
CHUNK_SIZE = 2016
HASHX_LEN = 11
BASIC_HEADER_SIZE = 80
STATIC_BLOCK_HEADERS = True
SESSIONCLS = ElectrumX
DESERIALIZER = Deserializer
DAEMON = Daemon
BLOCK_PROCESSOR = BlockProcessor
XPUB_VERBYTES = bytes('????', 'utf-8')
XPRV_VERBYTES = bytes('????', 'utf-8')
IRC_PREFIX = None
IRC_SERVER = "irc.freenode.net"
IRC_PORT = 6667
HASHX_LEN = 11
# Peer discovery
PEER_DEFAULT_PORTS = {'t': '50001', 's': '50002'}
PEERS = []
@ -66,12 +82,14 @@ class Coin(object):
'''Return a coin class given name and network.
Raise an exception if unrecognised.'''
req_attrs = ('TX_COUNT', 'TX_COUNT_HEIGHT', 'TX_PER_BLOCK',
'IRC_CHANNEL')
req_attrs = ['TX_COUNT', 'TX_COUNT_HEIGHT', 'TX_PER_BLOCK']
for coin in util.subclasses(Coin):
if (coin.NAME.lower() == name.lower() and
coin.NET.lower() == net.lower()):
missing = [attr for attr in req_attrs
coin_req_attrs = req_attrs.copy()
if coin.IRC_PREFIX is not None:
coin_req_attrs.append('IRC_CHANNEL')
missing = [attr for attr in coin_req_attrs
if not hasattr(coin, attr)]
if missing:
raise CoinError('coin {} missing {} attributes'
@ -138,7 +156,7 @@ class Coin(object):
def lookup_xverbytes(verbytes):
'''Return a (is_xpub, coin_class) pair given xpub/xprv verbytes.'''
# Order means BTC testnet will override NMC testnet
for coin in Coin.coin_classes():
for coin in util.subclasses(Coin):
if verbytes == coin.XPUB_VERBYTES:
return True, coin
if verbytes == coin.XPRV_VERBYTES:
@ -165,7 +183,7 @@ class Coin(object):
def P2SH_address_from_hash160(cls, hash160):
'''Return a coin address given a hash160.'''
assert len(hash160) == 20
return Base58.encode_check(cls.P2SH_VERBYTE + hash160)
return Base58.encode_check(cls.P2SH_VERBYTES[0] + hash160)
@classmethod
def multisig_address(cls, m, pubkeys):
@ -208,13 +226,13 @@ class Coin(object):
if verbyte == cls.P2PKH_VERBYTE:
return ScriptPubKey.P2PKH_script(hash_bytes)
if verbyte == cls.P2SH_VERBYTE:
if verbyte in cls.P2SH_VERBYTES:
return ScriptPubKey.P2SH_script(hash_bytes)
raise CoinError('invalid address: {}'.format(address))
@classmethod
def prvkey_WIF(cls, privkey_bytes, compressed):
def privkey_WIF(cls, privkey_bytes, compressed):
'''Return the private key encoded in Wallet Import Format.'''
payload = bytearray(cls.WIF_BYTE) + privkey_bytes
if compressed:
@ -232,29 +250,32 @@ class Coin(object):
return header[4:36]
@classmethod
def header_offset(cls, height):
def static_header_offset(cls, height):
'''Given a header height return its offset in the headers file.
If header sizes change at some point, this is the only code
that needs updating.'''
return height * 80
assert cls.STATIC_BLOCK_HEADERS
return height * cls.BASIC_HEADER_SIZE
@classmethod
def header_len(cls, height):
def static_header_len(cls, height):
'''Given a header height return its length.'''
return cls.header_offset(height + 1) - cls.header_offset(height)
return cls.static_header_offset(height + 1) \
- cls.static_header_offset(height)
@classmethod
def block_header(cls, block, height):
'''Returns the block header given a block and its height.'''
return block[:cls.header_len(height)]
return block[:cls.static_header_len(height)]
@classmethod
def block_txs(cls, block, height):
'''Returns a list of (deserialized_tx, tx_hash) pairs given a
def block_full(cls, block, height):
'''Returns (header, [(deserialized_tx, tx_hash), ...]) given a
block and its height.'''
deserializer = cls.deserializer()
return deserializer(block[cls.header_len(height):]).read_block()
header = cls.block_header(block, height)
txs = cls.DESERIALIZER(block[len(header):]).read_tx_block()
return Block(header, txs)
@classmethod
def decimal_value(cls, value):
@ -280,9 +301,21 @@ class Coin(object):
'nonce': nonce,
}
class AuxPowMixin(object):
STATIC_BLOCK_HEADERS = False
DESERIALIZER = DeserializerAuxPow
@classmethod
def deserializer(cls):
return Deserializer
def header_hash(cls, header):
'''Given a header return hash'''
return double_sha256(header[:cls.BASIC_HEADER_SIZE])
@classmethod
def block_header(cls, block, height):
'''Return the AuxPow block header bytes'''
block = cls.DESERIALIZER(block)
return block.read_header(height, cls.BASIC_HEADER_SIZE)
class Bitcoin(Coin):
@ -292,47 +325,49 @@ class Bitcoin(Coin):
XPUB_VERBYTES = bytes.fromhex("0488b21e")
XPRV_VERBYTES = bytes.fromhex("0488ade4")
P2PKH_VERBYTE = bytes.fromhex("00")
P2SH_VERBYTE = bytes.fromhex("05")
P2SH_VERBYTES = [bytes.fromhex("05")]
WIF_BYTE = bytes.fromhex("80")
GENESIS_HASH = ('000000000019d6689c085ae165831e93'
'4ff763ae46a2a6c172b3f1b60a8ce26f')
TX_COUNT = 156335304
TX_COUNT_HEIGHT = 429972
TX_COUNT = 217380620
TX_COUNT_HEIGHT = 464000
TX_PER_BLOCK = 1800
IRC_PREFIX = "E_"
IRC_CHANNEL = "#electrum"
RPC_PORT = 8332
PEERS = [
'btc.smsys.me s995',
'ca6ulp2j2mpsft3y.onion s t',
'electrum.be s t',
'electrum.trouth.net p10000 s t',
'ELECTRUM.not.fyi p1000 s t',
'electrum.vom-stausee.de s t',
'electrum3.hachre.de p10000 s t',
'electrum.hsmiths.com s t',
'erbium1.sytes.net s t',
'fdkbwjykvl2f3hup.onion p10000 s t',
'h.1209k.com p10000 s t',
'fdkhv2bb7hqel2e7.onion s t',
'h.1209k.com s t',
'helicarrier.bauerj.eu s t',
'hsmiths4fyqlw5xw.onion s t',
'ozahtqwp25chjdjd.onion s t',
'us11.einfachmalnettsein.de s t',
'ELEX01.blackpole.online s t',
'electrum_abc.criptolayer.net s50012',
]
class BitcoinSegwit(Bitcoin):
NET = "bitcoin-segwit"
DESERIALIZER = DeserializerSegWit
class BitcoinTestnet(Bitcoin):
SHORTNAME = "XTN"
NET = "testnet"
IRC_PREFIX = None
XPUB_VERBYTES = bytes.fromhex("043587cf")
XPRV_VERBYTES = bytes.fromhex("04358394")
P2PKH_VERBYTE = bytes.fromhex("6f")
P2SH_VERBYTE = bytes.fromhex("c4")
P2SH_VERBYTES = [bytes.fromhex("c4")]
WIF_BYTE = bytes.fromhex("ef")
GENESIS_HASH = ('000000000933ea01ad0ee984209779ba'
'aec3ced90fa3f408719526f8d77f4943')
REORG_LIMIT = 4000
REORG_LIMIT = 8000
TX_COUNT = 12242438
TX_COUNT_HEIGHT = 1035428
TX_PER_BLOCK = 21
@ -357,22 +392,19 @@ class BitcoinTestnetSegWit(BitcoinTestnet):
bitcoind on testnet, you must use this class as your "COIN".
'''
NET = "testnet-segwit"
@classmethod
def deserializer(cls):
return DeserializerSegWit
DESERIALIZER = DeserializerSegWit
class BitcoinNolnet(Bitcoin):
'''Bitcoin Unlimited nolimit testnet.'''
NET = "nolnet"
GENESIS_HASH = ('00000000e752e935119102b142b5c27a'
'346a023532a42edcf7c8ffd0a22206e9')
GENESIS_HASH = ('0000000057e31bd2066c939a63b7b862'
'3bd0f10d8c001304bdfc1a7902ae6d35')
REORG_LIMIT = 8000
TX_COUNT = 195106
TX_COUNT_HEIGHT = 24920
TX_PER_BLOCK = 8
TX_COUNT = 583589
TX_COUNT_HEIGHT = 8617
TX_PER_BLOCK = 50
IRC_PREFIX = "EN_"
RPC_PORT = 28332
PEER_DEFAULT_PORTS = {'t': '52001', 's': '52002'}
@ -385,43 +417,104 @@ class Litecoin(Coin):
NAME = "Litecoin"
SHORTNAME = "LTC"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("019da462")
XPRV_VERBYTES = bytes.fromhex("019d9cfe")
XPUB_VERBYTES = bytes.fromhex("019d9cfe")
XPRV_VERBYTES = bytes.fromhex("019da462")
P2PKH_VERBYTE = bytes.fromhex("30")
P2SH_VERBYTE = bytes.fromhex("05")
P2SH_VERBYTES = [bytes.fromhex("32"), bytes.fromhex("05")]
WIF_BYTE = bytes.fromhex("b0")
GENESIS_HASH = ('12a765e31ffd4059bada1e25190f6e98'
'c99d9714d334efa41a195a7e7e04bfe2')
DESERIALIZER = DeserializerSegWit
TX_COUNT = 8908766
TX_COUNT_HEIGHT = 1105256
TX_PER_BLOCK = 10
IRC_PREFIX = "EL_"
IRC_CHANNEL = "#electrum-ltc"
RPC_PORT = 9332
REORG_LIMIT = 800
PEERS = [
'elec.luggs.co s444',
'electrum-ltc.bysh.me s t',
'electrum-ltc.ddns.net s t',
'electrum.cryptomachine.com p1000 s t',
'electrum.ltc.xurious.com s t',
'eywr5eubdbbe2laq.onion s50008 t50007',
]
class LitecoinTestnet(Litecoin):
SHORTNAME = "XLT"
NET = "testnet"
XPUB_VERBYTES = bytes.fromhex("0436f6e1")
XPRV_VERBYTES = bytes.fromhex("0436ef7d")
XPUB_VERBYTES = bytes.fromhex("0436ef7d")
XPRV_VERBYTES = bytes.fromhex("0436f6e1")
P2PKH_VERBYTE = bytes.fromhex("6f")
P2SH_VERBYTE = bytes.fromhex("c4")
P2SH_VERBYTES = [bytes.fromhex("3a"), bytes.fromhex("c4")]
WIF_BYTE = bytes.fromhex("ef")
GENESIS_HASH = ('f5ae71e26c74beacc88382716aced69c'
'ddf3dffff24f384e1808905e0188f68f')
GENESIS_HASH = ('4966625a4b2851d9fdee139e56211a0d'
'88575f59ed816ff5e6a63deb4e3e29a0')
TX_COUNT = 21772
TX_COUNT_HEIGHT = 20800
TX_PER_BLOCK = 2
RPC_PORT = 19332
REORG_LIMIT = 4000
PEER_DEFAULT_PORTS = {'t': '51001', 's': '51002'}
PEERS = [
'electrum-ltc.bysh.me s t',
'electrum.ltc.xurious.com s t',
]
class Viacoin(AuxPowMixin, Coin):
NAME="Viacoin"
SHORTNAME = "VIA"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("47")
P2SH_VERBYTES = [bytes.fromhex("21")]
WIF_BYTE = bytes.fromhex("c7")
GENESIS_HASH = ('4e9b54001f9976049830128ec0331515'
'eaabe35a70970d79971da1539a400ba1')
TX_COUNT = 113638
TX_COUNT_HEIGHT = 3473674
TX_PER_BLOCK = 30
IRC_PREFIX = "E_"
IRC_CHANNEL="#vialectrum"
RPC_PORT = 5222
REORG_LIMIT = 5000
PEERS = [
'vialectrum.bitops.me s t',
'server.vialectrum.org s t',
'vialectrum.viacoin.net s t',
'viax1.bitops.me s t',
]
class ViacoinTestnet(Viacoin):
SHORTNAME = "TVI"
NET = "testnet"
P2PKH_VERBYTE = bytes.fromhex("7f")
P2SH_VERBYTES = [bytes.fromhex("c4")]
WIF_BYTE = bytes.fromhex("ff")
GENESIS_HASH = ('00000007199508e34a9ff81e6ec0c477'
'a4cccff2a4767a8eee39c11db367b008')
RPC_PORT = 25222
REORG_LIMIT = 2500
PEER_DEFAULT_PORTS = {'t': '51001', 's': '51002'}
PEERS = [
'vialectrum.bysh.me s t',
]
class ViacoinTestnetSegWit(ViacoinTestnet):
NET = "testnet-segwit"
DESERIALIZER = DeserializerSegWit
# Source: namecoin.org
class Namecoin(Coin):
class Namecoin(AuxPowMixin, Coin):
NAME = "Namecoin"
SHORTNAME = "NMC"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("d7dd6370")
XPRV_VERBYTES = bytes.fromhex("d7dc6e31")
P2PKH_VERBYTE = bytes.fromhex("34")
P2SH_VERBYTE = bytes.fromhex("0d")
P2SH_VERBYTES = [bytes.fromhex("0d")]
WIF_BYTE = bytes.fromhex("e4")
GENESIS_HASH = ('000000000062b72c5e2ceb45fbc8587e'
'807c155b0da735e6483dfba2f0a9c770')
@ -436,25 +529,21 @@ class NamecoinTestnet(Namecoin):
NAME = "Namecoin"
SHORTNAME = "XNM"
NET = "testnet"
XPUB_VERBYTES = bytes.fromhex("043587cf")
XPRV_VERBYTES = bytes.fromhex("04358394")
P2PKH_VERBYTE = bytes.fromhex("6f")
P2SH_VERBYTE = bytes.fromhex("c4")
P2SH_VERBYTES = [bytes.fromhex("c4")]
WIF_BYTE = bytes.fromhex("ef")
GENESIS_HASH = ('00000007199508e34a9ff81e6ec0c477'
'a4cccff2a4767a8eee39c11db367b008')
# For DOGE there is disagreement across sites like bip32.org and
# pycoin. Taken from bip32.org and bitmerchant on github
class Dogecoin(Coin):
class Dogecoin(AuxPowMixin, Coin):
NAME = "Dogecoin"
SHORTNAME = "DOGE"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("02facafd")
XPRV_VERBYTES = bytes.fromhex("02fac398")
P2PKH_VERBYTE = bytes.fromhex("1e")
P2SH_VERBYTE = bytes.fromhex("16")
P2SH_VERBYTES = [bytes.fromhex("16")]
WIF_BYTE = bytes.fromhex("9e")
GENESIS_HASH = ('1a91e3dace36e2be3bf030a65679fe82'
'1aa1d6ef92e7c9902eb318182c355691')
@ -470,10 +559,8 @@ class DogecoinTestnet(Dogecoin):
NAME = "Dogecoin"
SHORTNAME = "XDT"
NET = "testnet"
XPUB_VERBYTES = bytes.fromhex("0432a9a8")
XPRV_VERBYTES = bytes.fromhex("0432a243")
P2PKH_VERBYTE = bytes.fromhex("71")
P2SH_VERBYTE = bytes.fromhex("c4")
P2SH_VERBYTES = [bytes.fromhex("c4")]
WIF_BYTE = bytes.fromhex("f1")
GENESIS_HASH = ('bb0a78264637406b6360aad926284d54'
'4d7049f45189db5664f3c4d07350559e')
@ -489,7 +576,7 @@ class Dash(Coin):
GENESIS_HASH = ('00000ffd590b1485b3caadc19b22e637'
'9c733355108f107a430458cdf3407ab6')
P2PKH_VERBYTE = bytes.fromhex("4c")
P2SH_VERBYTE = bytes.fromhex("10")
P2SH_VERBYTES = [bytes.fromhex("10")]
WIF_BYTE = bytes.fromhex("cc")
TX_COUNT_HEIGHT = 569399
TX_COUNT = 2157510
@ -505,6 +592,8 @@ class Dash(Coin):
'electrum.dash.siampm.com s t',
'wl4sfwq2hwxnodof.onion s t',
]
SESSIONCLS = DashElectrumX
DAEMON = DashDaemon
@classmethod
def header_hash(cls, header):
@ -521,7 +610,7 @@ class DashTestnet(Dash):
GENESIS_HASH = ('00000bafbc94add76cb75e2ec9289483'
'7288a481e5c005f6563d91623bf8bc2c')
P2PKH_VERBYTE = bytes.fromhex("8c")
P2SH_VERBYTE = bytes.fromhex("13")
P2SH_VERBYTES = [bytes.fromhex("13")]
WIF_BYTE = bytes.fromhex("ef")
TX_COUNT_HEIGHT = 101619
TX_COUNT = 132681
@ -534,14 +623,12 @@ class DashTestnet(Dash):
]
class Argentum(Coin):
class Argentum(AuxPowMixin, Coin):
NAME = "Argentum"
SHORTNAME = "ARG"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488b21e")
XPRV_VERBYTES = bytes.fromhex("0488ade4")
P2PKH_VERBYTE = bytes.fromhex("17")
P2SH_VERBYTE = bytes.fromhex("05")
P2SH_VERBYTES = [bytes.fromhex("05")]
WIF_BYTE = bytes.fromhex("97")
GENESIS_HASH = ('88c667bc63167685e4e4da058fffdfe8'
'e007e5abffd6855de52ad59df7bb0bb2')
@ -556,10 +643,8 @@ class Argentum(Coin):
class ArgentumTestnet(Argentum):
SHORTNAME = "XRG"
NET = "testnet"
XPUB_VERBYTES = bytes.fromhex("043587cf")
XPRV_VERBYTES = bytes.fromhex("04358394")
P2PKH_VERBYTE = bytes.fromhex("6f")
P2SH_VERBYTE = bytes.fromhex("c4")
P2SH_VERBYTES = [bytes.fromhex("c4")]
WIF_BYTE = bytes.fromhex("ef")
REORG_LIMIT = 2000
@ -568,13 +653,12 @@ class DigiByte(Coin):
NAME = "DigiByte"
SHORTNAME = "DGB"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488b21e")
XPRV_VERBYTES = bytes.fromhex("0488ade4")
P2PKH_VERBYTE = bytes.fromhex("1E")
P2SH_VERBYTE = bytes.fromhex("05")
P2SH_VERBYTES = [bytes.fromhex("05")]
WIF_BYTE = bytes.fromhex("80")
GENESIS_HASH = ('7497ea1b465eb39f1c8f507bc877078f'
'e016d6fcb6dfad3a64c98dcc6e1e8496')
DESERIALIZER = DeserializerSegWit
TX_COUNT = 1046018
TX_COUNT_HEIGHT = 1435000
TX_PER_BLOCK = 1000
@ -585,10 +669,8 @@ class DigiByte(Coin):
class DigiByteTestnet(DigiByte):
NET = "testnet"
XPUB_VERBYTES = bytes.fromhex("043587cf")
XPRV_VERBYTES = bytes.fromhex("04358394")
P2PKH_VERBYTE = bytes.fromhex("6f")
P2SH_VERBYTE = bytes.fromhex("c4")
P2SH_VERBYTES = [bytes.fromhex("c4")]
WIF_BYTE = bytes.fromhex("ef")
GENESIS_HASH = ('b5dca8039e300198e5fe7cd23bdd1728'
'e2a444af34c447dbd0916fa3430a68c2')
@ -602,13 +684,12 @@ class FairCoin(Coin):
NAME = "FairCoin"
SHORTNAME = "FAIR"
NET = "mainnet"
XPUB_VERBYTES = bytes.fromhex("0488b21e")
XPRV_VERBYTES = bytes.fromhex("0488ade4")
P2PKH_VERBYTE = bytes.fromhex("5f")
P2SH_VERBYTE = bytes.fromhex("24")
P2SH_VERBYTES = [bytes.fromhex("24")]
WIF_BYTE = bytes.fromhex("df")
GENESIS_HASH=('1f701f2b8de1339dc0ec908f3fb6e9b0'
'b870b6f20ba893e120427e42bbc048d7')
GENESIS_HASH = ('1f701f2b8de1339dc0ec908f3fb6e9b0'
'b870b6f20ba893e120427e42bbc048d7')
BASIC_HEADER_SIZE = 108
TX_COUNT = 1000
TX_COUNT_HEIGHT = 1000
TX_PER_BLOCK = 1
@ -622,22 +703,14 @@ class FairCoin(Coin):
]
@classmethod
def header_offset(cls, height):
'''Given a header height return its offset in the headers file.
If header sizes change at some point, this is the only code
that needs updating.'''
return height * 108
@classmethod
def block_txs(cls, block, height):
'''Returns a list of (deserialized_tx, tx_hash) pairs given a
def block_full(cls, block, height):
'''Returns (header, [(deserialized_tx, tx_hash), ...]) given a
block and its height.'''
if height == 0:
return []
deserializer = cls.deserializer()
return deserializer(block[cls.header_len(height):]).read_block()
if height > 0:
return super().block_full(block, height)
else:
return Block(cls.block_header(block, height), [])
@classmethod
def electrum_header(cls, header, height):
@ -652,3 +725,135 @@ class FairCoin(Coin):
'timestamp': timestamp,
'creatorId': creatorId,
}
class Zcash(Coin):
NAME = "Zcash"
SHORTNAME = "ZEC"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("1CB8")
P2SH_VERBYTES = [bytes.fromhex("1CBD")]
WIF_BYTE = bytes.fromhex("80")
GENESIS_HASH = ('00040fe8ec8471911baa1db1266ea15d'
'd06b4a8a5c453883c000b031973dce08')
STATIC_BLOCK_HEADERS = False
BASIC_HEADER_SIZE = 140 # Excluding Equihash solution
DESERIALIZER = DeserializerZcash
TX_COUNT = 329196
TX_COUNT_HEIGHT = 68379
TX_PER_BLOCK = 5
IRC_PREFIX = "E_"
IRC_CHANNEL = "#electrum-zcash"
RPC_PORT = 8232
REORG_LIMIT = 800
@classmethod
def electrum_header(cls, header, height):
version, = struct.unpack('<I', header[:4])
timestamp, bits = struct.unpack('<II', header[100:108])
return {
'block_height': height,
'version': version,
'prev_block_hash': hash_to_str(header[4:36]),
'merkle_root': hash_to_str(header[36:68]),
'timestamp': timestamp,
'bits': bits,
'nonce': hash_to_str(header[108:140]),
}
@classmethod
def block_header(cls, block, height):
'''Return the block header bytes'''
block = cls.DESERIALIZER(block)
return block.read_header(height, cls.BASIC_HEADER_SIZE)
class Einsteinium(Coin):
NAME = "Einsteinium"
SHORTNAME = "EMC2"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("21")
P2SH_VERBYTES = [bytes.fromhex("05")]
WIF_BYTE = bytes.fromhex("a1")
GENESIS_HASH = ('4e56204bb7b8ac06f860ff1c845f03f9'
'84303b5b97eb7b42868f714611aed94b')
TX_COUNT = 2087559
TX_COUNT_HEIGHT = 1358517
TX_PER_BLOCK = 2
IRC_PREFIX = "E_"
IRC_CHANNEL = "#electrum-emc2"
RPC_PORT = 41879
REORG_LIMIT = 2000
class Blackcoin(Coin):
NAME = "Blackcoin"
SHORTNAME = "BLK"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("19")
P2SH_VERBYTES = [bytes.fromhex("55")]
WIF_BYTE = bytes.fromhex("99")
GENESIS_HASH = ('000001faef25dec4fbcf906e6242621d'
'f2c183bf232f263d0ba5b101911e4563')
DESERIALIZER = DeserializerTxTime
DAEMON = LegacyRPCDaemon
TX_COUNT = 4594999
TX_COUNT_HEIGHT = 1667070
TX_PER_BLOCK = 3
IRC_PREFIX = "E_"
IRC_CHANNEL = "#electrum-blk"
RPC_PORT = 15715
REORG_LIMIT = 5000
HEADER_HASH = None
@classmethod
def header_hash(cls, header):
'''Given a header return the hash.'''
if cls.HEADER_HASH is None:
import scrypt
cls.HEADER_HASH = lambda x: scrypt.hash(x, x, 1024, 1, 1, 32)
version, = struct.unpack('<I', header[:4])
if version > 6:
return super().header_hash(header)
else:
return cls.HEADER_HASH(header);
class Peercoin(Coin):
NAME = "Peercoin"
SHORTNAME = "PPC"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("37")
P2SH_VERBYTES = [bytes.fromhex("75")]
WIF_BYTE = bytes.fromhex("b7")
GENESIS_HASH = ('0000000032fe677166d54963b62a4677'
'd8957e87c508eaa4fd7eb1c880cd27e3')
DESERIALIZER = DeserializerTxTime
DAEMON = LegacyRPCDaemon
TX_COUNT = 1207356
TX_COUNT_HEIGHT = 306425
TX_PER_BLOCK = 4
IRC_PREFIX = "E_"
IRC_CHANNEL = "#electrum-ppc"
RPC_PORT = 9902
REORG_LIMIT = 5000
class Reddcoin(Coin):
NAME = "Reddcoin"
SHORTNAME = "RDD"
NET = "mainnet"
P2PKH_VERBYTE = bytes.fromhex("3d")
P2SH_VERBYTES = [bytes.fromhex("05")]
WIF_BYTE = bytes.fromhex("bd")
GENESIS_HASH = ('b868e0d95a3c3c0e0dadc67ee587aaf9'
'dc8acbf99e3b4b3110fad4eb74c1decc')
DESERIALIZER = DeserializerReddcoin
TX_COUNT = 5413508
TX_COUNT_HEIGHT = 1717382
TX_PER_BLOCK = 3
IRC_PREFIX = "E_"
IRC_CHANNEL = "#electrum-rdd"
RPC_PORT = 45443

10
lib/hash.py

@ -34,13 +34,11 @@ from lib.util import bytes_to_int, int_to_bytes
def sha256(x):
'''Simple wrapper of hashlib sha256.'''
assert isinstance(x, (bytes, bytearray, memoryview))
return hashlib.sha256(x).digest()
def ripemd160(x):
'''Simple wrapper of hashlib ripemd160.'''
assert isinstance(x, (bytes, bytearray, memoryview))
h = hashlib.new('ripemd160')
h.update(x)
return h.digest()
@ -63,13 +61,15 @@ def hash160(x):
return ripemd160(sha256(x))
def hash_to_str(x):
def hash_to_hex_str(x):
'''Convert a big-endian binary hash to displayed hex string.
Display form of a binary hash is reversed and converted to hex.
'''
return bytes(reversed(x)).hex()
# Temporary
hash_to_str = hash_to_hex_str
def hex_str_to_hash(x):
'''Convert a displayed hex string to a binary hash.'''
@ -98,7 +98,7 @@ class Base58(object):
def decode(txt):
"""Decodes txt into a big-endian bytearray."""
if not isinstance(txt, str):
raise Base58Error('a string is required')
raise TypeError('a string is required')
if not txt:
raise Base58Error('string cannot be empty')
@ -151,7 +151,5 @@ class Base58(object):
def encode_check(payload):
"""Encodes a payload bytearray (which includes the version byte(s))
into a Base58Check string."""
assert isinstance(payload, (bytes, bytearray, memoryview))
be_bytes = payload + double_sha256(payload)[:4]
return Base58.encode(be_bytes)

47
lib/peer.py

@ -28,7 +28,7 @@
import re
from ipaddress import ip_address
from lib.util import cachedproperty
from lib.util import cachedproperty, is_valid_hostname
class Peer(object):
@ -38,19 +38,18 @@ class Peer(object):
ATTRS = ('host', 'features',
# metadata
'source', 'ip_addr', 'good_ports',
'last_connect', 'last_try', 'try_count')
PORTS = ('ssl_port', 'tcp_port')
FEATURES = PORTS + ('pruning', 'server_version',
'protocol_min', 'protocol_max')
'last_good', 'last_try', 'try_count')
FEATURES = ('pruning', 'server_version', 'protocol_min', 'protocol_max')
# This should be set by the application
DEFAULT_PORTS = {}
def __init__(self, host, features, source='unknown', ip_addr=None,
good_ports=[], last_connect=0, last_try=0, try_count=0):
good_ports=[], last_good=0, last_try=0, try_count=0):
'''Create a peer given a host name (or IP address as a string),
a dictionary of features, and a record of the source.'''
assert isinstance(host, str)
assert isinstance(features, dict)
assert host in features.get('hosts', {})
self.host = host
self.features = features.copy()
# Canonicalize / clean-up
@ -60,7 +59,11 @@ class Peer(object):
self.source = source
self.ip_addr = ip_addr
self.good_ports = good_ports.copy()
self.last_connect = last_connect
# last_good represents the last connection that was
# successful *and* successfully verified, at which point
# try_count is set to 0. Failure to connect or failure to
# verify increment the try_count.
self.last_good = last_good
self.last_try = last_try
self.try_count = try_count
# Transient, non-persisted metadata
@ -95,22 +98,32 @@ class Peer(object):
return tuple(int(part) for part in vstr.split('.'))
def matches(self, peers):
'''Return peers whose host matches the given peer's host or IP
address. This results in our favouring host names over IP
addresses.
'''Return peers whose host matches our hostname or IP address.
Additionally include all peers whose IP address matches our
hostname if that is an IP address.
'''
candidates = (self.host.lower(), self.ip_addr)
return [peer for peer in peers if peer.host.lower() in candidates]
return [peer for peer in peers
if peer.host.lower() in candidates
or peer.ip_addr == self.host]
def __str__(self):
return self.host
def update_features(self, features):
'''Update features in-place.'''
tmp = Peer(self.host, features)
self.features = tmp.features
for feature in self.FEATURES:
setattr(self, feature, getattr(tmp, feature))
try:
tmp = Peer(self.host, features)
except Exception:
pass
else:
self.update_features_from_peer(tmp)
def update_features_from_peer(self, peer):
if peer != self:
self.features = peer.features
for feature in self.FEATURES:
setattr(self, feature, getattr(peer, feature))
def connection_port_pairs(self):
'''Return a list of (kind, port) pairs to try when making a
@ -146,7 +159,7 @@ class Peer(object):
if ip:
return ((ip.is_global or ip.is_private)
and not (ip.is_multicast or ip.is_unspecified))
return True
return is_valid_hostname(self.host)
@cachedproperty
def is_public(self):
@ -154,7 +167,7 @@ class Peer(object):
if ip:
return self.is_valid and not ip.is_private
else:
return self.host != 'localhost'
return self.is_valid and self.host != 'localhost'
@cachedproperty
def ip_address(self):

120
lib/socks.py

@ -137,44 +137,104 @@ class Socks(util.LoggedClass):
class SocksProxy(util.LoggedClass):
def __init__(self, host, port, loop=None):
'''Host can be an IPv4 address, IPv6 address, or a host name.'''
'''Host can be an IPv4 address, IPv6 address, or a host name.
Port can be None, in which case one is auto-detected.'''
super().__init__()
# Host and port of the proxy
self.host = host
self.port = port
self.try_ports = [port, 9050, 9150, 1080]
self.errors = 0
self.ip_addr = None
self.lost_event = asyncio.Event()
self.tried_event = asyncio.Event()
self.loop = loop or asyncio.get_event_loop()
async def create_connection(self, protocol_factory, host, port, ssl=None):
'''All arguments are as to asyncio's create_connection method.'''
if self.port is None:
proxy_ports = [9050, 9150, 1080]
else:
proxy_ports = [self.port]
for proxy_port in proxy_ports:
address = (self.host, proxy_port)
sock = socket.socket()
sock.setblocking(False)
try:
await self.loop.sock_connect(sock, address)
except OSError as e:
if proxy_port == proxy_ports[-1]:
raise
continue
self.set_lost()
async def auto_detect_loop(self):
'''Try to detect a proxy at regular intervals until one is found.
If one is found, do nothing until one is lost.'''
while True:
await self.lost_event.wait()
self.lost_event.clear()
tries = 0
while True:
tries += 1
log_failure = tries % 10 == 1
await self.detect_proxy(log_failure=log_failure)
if self.is_up():
break
await asyncio.sleep(600)
def is_up(self):
'''Returns True if we have a good proxy.'''
return self.port is not None
def set_lost(self):
'''Called when the proxy appears lost/down.'''
self.port = None
self.lost_event.set()
async def connect_via_proxy(self, host, port, proxy_address=None):
'''Connect to a (host, port) pair via the proxy. Returns the
connected socket on success.'''
proxy_address = proxy_address or (self.host, self.port)
sock = socket.socket()
sock.setblocking(False)
try:
await self.loop.sock_connect(sock, proxy_address)
socks = Socks(self.loop, sock, host, port)
await socks.handshake()
return sock
except Exception:
sock.close()
raise
async def detect_proxy(self, host='www.google.com', port=80,
log_failure=True):
'''Attempt to detect a proxy by establishing a connection through it
to the given target host / port pair.
'''
if self.is_up():
return
sock = None
for proxy_port in self.try_ports:
if proxy_port is None:
continue
paddress = (self.host, proxy_port)
try:
await socks.handshake()
if self.port is None:
self.ip_addr = sock.getpeername()[0]
self.port = proxy_port
self.logger.info('detected proxy at {} ({})'
.format(util.address_string(address),
self.ip_addr))
sock = await self.connect_via_proxy(host, port, paddress)
break
except Exception as e:
sock.close()
raise
if log_failure:
self.logger.info('failed to detect proxy at {}: {}'
.format(util.address_string(paddress), e))
self.tried_event.set()
# Failed all ports?
if sock is None:
return
peername = sock.getpeername()
sock.close()
self.ip_addr = peername[0]
self.port = proxy_port
self.errors = 0
self.logger.info('detected proxy at {} ({})'
.format(util.address_string(paddress), self.ip_addr))
async def create_connection(self, protocol_factory, host, port, ssl=None):
'''All arguments are as to asyncio's create_connection method.'''
try:
sock = await self.connect_via_proxy(host, port)
self.errors = 0
except Exception:
self.errors += 1
# If we have 3 consecutive errors, consider the proxy undetected
if self.errors == 3:
self.set_lost()
raise
hostname = host if ssl else None
return await self.loop.create_connection(

136
lib/tx.py

@ -1,4 +1,5 @@
# Copyright (c) 2016-2017, Neil Booth
# Copyright (c) 2017, the ElectrumX authors
#
# All rights reserved.
#
@ -55,13 +56,6 @@ class TxInput(namedtuple("TxInput", "prev_hash prev_idx script sequence")):
return (self.prev_hash == TxInput.ZERO and
self.prev_idx == TxInput.MINUS_1)
@cachedproperty
def script_sig_info(self):
# No meaning for coinbases
if self.is_coinbase:
return None
return Script.parse_script_sig(self.script)
def __str__(self):
script = self.script.hex()
prev_hash = hash_to_str(self.prev_hash)
@ -70,12 +64,7 @@ class TxInput(namedtuple("TxInput", "prev_hash prev_idx script sequence")):
class TxOutput(namedtuple("TxOutput", "value pk_script")):
'''Class representing a transaction output.'''
@cachedproperty
def pay_to(self):
return Script.parse_pk_script(self.pk_script)
pass
class Deserializer(object):
'''Deserializes blocks into transactions.
@ -105,10 +94,10 @@ class Deserializer(object):
self._read_le_uint32() # locktime
), double_sha256(self.binary[start:self.cursor])
def read_block(self):
def read_tx_block(self):
'''Returns a list of (deserialized_tx, tx_hash) pairs.'''
read_tx = self.read_tx
txs = [read_tx() for n in range(self._read_varint())]
txs = [read_tx() for _ in range(self._read_varint())]
# Some coins have excess data beyond the end of the transactions
return txs
@ -134,6 +123,11 @@ class Deserializer(object):
self._read_varbytes(), # pk_script
)
def _read_byte(self):
cursor = self.cursor
self.cursor += 1
return self.binary[cursor]
def _read_nbytes(self, n):
cursor = self.cursor
self.cursor = end = cursor + n
@ -193,11 +187,6 @@ class DeserializerSegWit(Deserializer):
# https://bitcoincore.org/en/segwit_wallet_dev/#transaction-serialization
def _read_byte(self):
cursor = self.cursor
self.cursor += 1
return self.binary[cursor]
def _read_witness(self, fields):
read_witness_field = self._read_witness_field
return [read_witness_field() for i in range(fields)]
@ -237,3 +226,110 @@ class DeserializerSegWit(Deserializer):
return TxSegWit(version, marker, flag, inputs,
outputs, witness, locktime), double_sha256(orig_ser)
class DeserializerAuxPow(Deserializer):
VERSION_AUXPOW = (1 << 8)
def read_header(self, height, static_header_size):
'''Return the AuxPow block header bytes'''
start = self.cursor
version = self._read_le_uint32()
if version & self.VERSION_AUXPOW:
# We are going to calculate the block size then read it as bytes
self.cursor = start
self.cursor += static_header_size # Block normal header
self.read_tx() # AuxPow transaction
self.cursor += 32 # Parent block hash
merkle_size = self._read_varint()
self.cursor += 32 * merkle_size # Merkle branch
self.cursor += 4 # Index
merkle_size = self._read_varint()
self.cursor += 32 * merkle_size # Chain merkle branch
self.cursor += 4 # Chain index
self.cursor += 80 # Parent block header
header_end = self.cursor
else:
header_end = static_header_size
self.cursor = start
return self._read_nbytes(header_end)
class TxJoinSplit(namedtuple("Tx", "version inputs outputs locktime")):
'''Class representing a JoinSplit transaction.'''
@cachedproperty
def is_coinbase(self):
return self.inputs[0].is_coinbase if len(self.inputs) > 0 else False
class DeserializerZcash(Deserializer):
def read_header(self, height, static_header_size):
'''Return the block header bytes'''
start = self.cursor
# We are going to calculate the block size then read it as bytes
self.cursor += static_header_size
solution_size = self._read_varint()
self.cursor += solution_size
header_end = self.cursor
self.cursor = start
return self._read_nbytes(header_end)
def read_tx(self):
start = self.cursor
base_tx = TxJoinSplit(
self._read_le_int32(), # version
self._read_inputs(), # inputs
self._read_outputs(), # outputs
self._read_le_uint32() # locktime
)
if base_tx.version >= 2:
joinsplit_size = self._read_varint()
if joinsplit_size > 0:
self.cursor += joinsplit_size * 1802 # JSDescription
self.cursor += 32 # joinSplitPubKey
self.cursor += 64 # joinSplitSig
return base_tx, double_sha256(self.binary[start:self.cursor])
class TxTime(namedtuple("Tx", "version time inputs outputs locktime")):
'''Class representing transaction that has a time field.'''
@cachedproperty
def is_coinbase(self):
return self.inputs[0].is_coinbase
class DeserializerTxTime(Deserializer):
def read_tx(self):
start = self.cursor
return TxTime(
self._read_le_int32(), # version
self._read_le_uint32(), # time
self._read_inputs(), # inputs
self._read_outputs(), # outputs
self._read_le_uint32(), # locktime
), double_sha256(self.binary[start:self.cursor])
class DeserializerReddcoin(Deserializer):
def read_tx(self):
start = self.cursor
version = self._read_le_int32()
inputs = self._read_inputs()
outputs = self._read_outputs()
locktime = self._read_le_uint32()
if version > 1:
time = self._read_le_uint32()
else:
time = 0
return TxTime(
version,
time,
inputs,
outputs,
locktime,
), double_sha256(self.binary[start:self.cursor])

28
lib/util.py

@ -31,8 +31,10 @@ import array
import inspect
from ipaddress import ip_address
import logging
import re
import sys
from collections import Container, Mapping
from struct import pack
class LoggedClass(object):
@ -155,6 +157,20 @@ def int_to_bytes(value):
return value.to_bytes((value.bit_length() + 7) // 8, 'big')
def int_to_varint(value):
'''Converts an integer to a Bitcoin-like varint bytes'''
if value < 0:
raise Exception("attempt to write size < 0")
elif value < 253:
return pack('<B', value)
elif value < 2**16:
return b'\xfd' + pack('<H', value)
elif value < 2**32:
return b'\xfe' + pack('<I', value)
elif value < 2**64:
return b'\xff' + pack('<Q', value)
def increment_byte_string(bs):
'''Return the lexicographically next byte string of the same length.
@ -241,3 +257,15 @@ def address_string(address):
if host.version == 6:
fmt = '[{}]:{:d}'
return fmt.format(host, port)
# See http://stackoverflow.com/questions/2532053/validate-a-hostname-string
# Note underscores are valid in domain names, but strictly invalid in host
# names. We ignore that distinction.
SEGMENT_REGEX = re.compile("(?!-)[A-Z_\d-]{1,63}(?<!-)$", re.IGNORECASE)
def is_valid_hostname(hostname):
if len(hostname) > 255:
return False
# strip exactly one dot from the right, if present
if hostname and hostname[-1] == ".":
hostname = hostname[:-1]
return all(SEGMENT_REGEX.match(x) for x in hostname.split("."))

3
samples/daemontools/run

@ -1,3 +0,0 @@
j#!/bin/sh
echo "Launching ElectrumX server..."
exec 2>&1 envdir ./env /bin/sh -c 'envuidgid $USERNAME python3 $ELECTRUMX'

74
server/block_processor.py

@ -1,4 +1,5 @@
# Copyright (c) 2016-2017, Neil Booth
# Copyright (c) 2017, the ElectrumX authors
#
# All rights reserved.
#
@ -28,7 +29,6 @@ class Prefetcher(LoggedClass):
def __init__(self, bp):
super().__init__()
self.bp = bp
self.caught_up = False
# Access to fetched_height should be protected by the semaphore
self.fetched_height = None
self.semaphore = asyncio.Semaphore()
@ -83,7 +83,14 @@ class Prefetcher(LoggedClass):
Repeats until the queue is full or caught up.
'''
daemon = self.bp.daemon
daemon_height = await daemon.height(self.bp.caught_up_event.is_set())
# If caught up, refresh the mempool before the current height
caught_up = self.bp.caught_up_event.is_set()
if caught_up:
mempool = await daemon.mempool_hashes()
else:
mempool = []
daemon_height = await daemon.height()
with await self.semaphore:
while self.cache_size < self.min_cache_size:
# Try and catch up all blocks but limit to room in cache.
@ -93,14 +100,15 @@ class Prefetcher(LoggedClass):
count = min(daemon_height - self.fetched_height, cache_room)
count = min(500, max(count, 0))
if not count:
if not self.caught_up:
self.caught_up = True
if caught_up:
self.bp.set_mempool_hashes(mempool)
else:
self.bp.on_prefetcher_first_caught_up()
return False
first = self.fetched_height + 1
hex_hashes = await daemon.block_hex_hashes(first, count)
if self.caught_up:
if caught_up:
self.logger.info('new block height {:,d} hash {}'
.format(first + count-1, hex_hashes[-1]))
blocks = await daemon.raw_blocks(hex_hashes)
@ -120,7 +128,7 @@ class Prefetcher(LoggedClass):
else:
self.ave_size = (size + (10 - count) * self.ave_size) // 10
self.bp.on_prefetched_blocks(blocks, first)
self.bp.on_prefetched_blocks(blocks, first, mempool)
self.cache_size += size
self.fetched_height += count
@ -141,6 +149,11 @@ class BlockProcessor(server.db.DB):
def __init__(self, env, controller, daemon):
super().__init__(env)
# An incomplete compaction needs to be cancelled otherwise
# restarting it will corrupt the history
self.cancel_history_compaction()
self.daemon = daemon
self.controller = controller
@ -182,9 +195,10 @@ class BlockProcessor(server.db.DB):
'''Add the task to our task queue.'''
self.task_queue.put_nowait(task)
def on_prefetched_blocks(self, blocks, first):
def on_prefetched_blocks(self, blocks, first, mempool):
'''Called by the prefetcher when it has prefetched some blocks.'''
self.add_task(partial(self.check_and_advance_blocks, blocks, first))
self.add_task(partial(self.check_and_advance_blocks, blocks, first,
mempool))
def on_prefetcher_first_caught_up(self):
'''Called by the prefetcher when it first catches up.'''
@ -219,7 +233,10 @@ class BlockProcessor(server.db.DB):
self.open_dbs()
self.caught_up_event.set()
async def check_and_advance_blocks(self, blocks, first):
def set_mempool_hashes(self, mempool):
self.controller.mempool.set_hashes(mempool)
async def check_and_advance_blocks(self, blocks, first, mempool):
'''Process the list of blocks passed. Detects and handles reorgs.'''
self.prefetcher.processing_blocks(blocks)
if first != self.height + 1:
@ -231,20 +248,21 @@ class BlockProcessor(server.db.DB):
.format(len(blocks), first, self.height + 1))
return
headers = [self.coin.block_header(block, first + n)
for n, block in enumerate(blocks)]
blocks = [self.coin.block_full(block, first + n)
for n, block in enumerate(blocks)]
headers = [b.header for b in blocks]
hprevs = [self.coin.header_prevhash(h) for h in headers]
chain = [self.tip] + [self.coin.header_hash(h) for h in headers[:-1]]
if hprevs == chain:
start = time.time()
await self.controller.run_in_executor(self.advance_blocks,
blocks, headers)
await self.controller.run_in_executor(self.advance_blocks, blocks)
if not self.first_sync:
s = '' if len(blocks) == 1 else 's'
self.logger.info('processed {:,d} block{} in {:.1f}s'
.format(len(blocks), s,
time.time() - start))
self.set_mempool_hashes(mempool)
elif hprevs[0] != chain[0]:
await self.reorg_chain()
else:
@ -291,11 +309,13 @@ class BlockProcessor(server.db.DB):
The hashes are returned in order of increasing height.'''
def match_pos(hashes1, hashes2):
def diff_pos(hashes1, hashes2):
'''Returns the index of the first difference in the hash lists.
If both lists match returns their length.'''
for n, (hash1, hash2) in enumerate(zip(hashes1, hashes2)):
if hash1 == hash2:
if hash1 != hash2:
return n
return -1
return len(hashes)
if count is None:
# A real reorg
@ -305,9 +325,9 @@ class BlockProcessor(server.db.DB):
hashes = self.fs_block_hashes(start, count)
hex_hashes = [hash_to_str(hash) for hash in hashes]
d_hex_hashes = await self.daemon.block_hex_hashes(start, count)
n = match_pos(hex_hashes, d_hex_hashes)
if n >= 0:
start += n + 1
n = diff_pos(hex_hashes, d_hex_hashes)
if n > 0:
start += n
break
count = min(count * 2, start)
start -= count
@ -329,7 +349,7 @@ class BlockProcessor(server.db.DB):
self.wall_time += now - self.last_flush
self.last_flush = now
self.last_flush_tx_count = self.tx_count
self.write_state(batch)
self.write_utxo_state(batch)
def assert_flushed(self):
'''Asserts state is fully flushed.'''
@ -477,21 +497,21 @@ class BlockProcessor(server.db.DB):
if utxo_MB + hist_MB >= self.cache_MB or hist_MB >= self.cache_MB // 5:
self.flush(utxo_MB >= self.cache_MB * 4 // 5)
def advance_blocks(self, blocks, headers):
def advance_blocks(self, blocks):
'''Synchronously advance the blocks.
It is already verified they correctly connect onto our tip.
'''
block_txs = self.coin.block_txs
min_height = self.min_undo_height(self.daemon.cached_height())
height = self.height
for block in blocks:
height += 1
undo_info = self.advance_txs(block_txs(block, height))
undo_info = self.advance_txs(block.transactions)
if height >= min_height:
self.undo_infos.append((undo_info, height))
headers = [block.header for block in blocks]
self.height = height
self.headers.extend(headers)
self.tip = self.coin.header_hash(headers[-1])
@ -566,14 +586,14 @@ class BlockProcessor(server.db.DB):
coin = self.coin
for block in blocks:
# Check and update self.tip
header = coin.block_header(block, self.height)
header_hash = coin.header_hash(header)
block_full = coin.block_full(block, self.height)
header_hash = coin.header_hash(block_full.header)
if header_hash != self.tip:
raise ChainError('backup block {} not tip {} at height {:,d}'
.format(hash_to_str(header_hash),
hash_to_str(self.tip), self.height))
self.tip = coin.header_prevhash(header)
self.backup_txs(coin.block_txs(block, self.height))
self.tip = coin.header_prevhash(block_full.header)
self.backup_txs(block_full.transactions)
self.height -= 1
self.tx_counts.pop()

31
server/controller.py

@ -11,7 +11,6 @@ import os
import ssl
import time
import traceback
import warnings
from bisect import bisect_left
from collections import defaultdict
from concurrent.futures import ThreadPoolExecutor
@ -19,15 +18,14 @@ from functools import partial
import pylru
from lib.jsonrpc import JSONRPC, JSONSessionBase, RPCError
from lib.jsonrpc import JSONSessionBase, RPCError
from lib.hash import double_sha256, hash_to_str, hex_str_to_hash
from lib.peer import Peer
import lib.util as util
from server.block_processor import BlockProcessor
from server.daemon import Daemon, DaemonError
from server.daemon import DaemonError
from server.mempool import MemPool
from server.peers import PeerManager
from server.session import LocalRPC, ElectrumX
from server.session import LocalRPC
class Controller(util.LoggedClass):
@ -49,8 +47,8 @@ class Controller(util.LoggedClass):
self.loop.set_default_executor(self.executor)
self.start_time = time.time()
self.coin = env.coin
self.daemon = Daemon(env.coin.daemon_urls(env.daemon_url))
self.bp = BlockProcessor(env, self, self.daemon)
self.daemon = self.coin.DAEMON(env.coin.daemon_urls(env.daemon_url))
self.bp = self.coin.BLOCK_PROCESSOR(env, self, self.daemon)
self.mempool = MemPool(self.bp, self)
self.peer_mgr = PeerManager(env, self)
self.env = env
@ -250,7 +248,7 @@ class Controller(util.LoggedClass):
server.close()
async def start_server(self, kind, *args, **kw_args):
protocol_class = LocalRPC if kind == 'RPC' else ElectrumX
protocol_class = LocalRPC if kind == 'RPC' else self.coin.SESSIONCLS
protocol_factory = partial(protocol_class, self, kind)
server = self.loop.create_server(protocol_factory, *args, **kw_args)
@ -311,7 +309,7 @@ class Controller(util.LoggedClass):
self.header_cache.clear()
# Make a copy; self.sessions can change whilst await-ing
sessions = [s for s in self.sessions if isinstance(s, ElectrumX)]
sessions = [s for s in self.sessions if isinstance(s, self.coin.SESSIONCLS)]
for session in sessions:
await session.notify(self.bp.db_height, touched)
@ -499,19 +497,21 @@ class Controller(util.LoggedClass):
fmt = ('{:<30} {:<6} {:>5} {:>5} {:<17} {:>3} '
'{:>3} {:>8} {:>11} {:>11} {:>5} {:>20} {:<15}')
yield fmt.format('Host', 'Status', 'TCP', 'SSL', 'Server', 'Min',
'Max', 'Pruning', 'Last Conn', 'Last Try',
'Max', 'Pruning', 'Last Good', 'Last Try',
'Tries', 'Source', 'IP Address')
for item in data:
features = item['features']
yield fmt.format(item['host'][:30],
hostname = item['host']
host = features['hosts'][hostname]
yield fmt.format(hostname[:30],
item['status'],
features['tcp_port'] or '',
features['ssl_port'] or '',
host.get('tcp_port') or '',
host.get('ssl_port') or '',
features['server_version'] or 'unknown',
features['protocol_min'],
features['protocol_max'],
features['pruning'] or '',
time_fmt(item['last_connect']),
time_fmt(item['last_good']),
time_fmt(item['last_try']),
item['try_count'],
item['source'][:20],
@ -861,8 +861,7 @@ class Controller(util.LoggedClass):
if not raw_tx:
return None
raw_tx = bytes.fromhex(raw_tx)
deserializer = self.coin.deserializer()
tx, tx_hash = deserializer(raw_tx).read_tx()
tx, tx_hash = self.coin.DESERIALIZER(raw_tx).read_tx()
if index >= len(tx.outputs):
return None
return self.coin.address_from_script(tx.outputs[index].pk_script)

114
server/daemon.py

@ -12,10 +12,14 @@ import asyncio
import json
import time
import traceback
from calendar import timegm
from struct import pack
from time import strptime
import aiohttp
import lib.util as util
from lib.hash import hex_str_to_hash
class DaemonError(Exception):
@ -34,13 +38,24 @@ class Daemon(util.LoggedClass):
super().__init__()
self.set_urls(urls)
self._height = None
self._mempool_hashes = set()
self.mempool_refresh_event = asyncio.Event()
# Limit concurrent RPC calls to this number.
# See DEFAULT_HTTP_WORKQUEUE in bitcoind, which is typically 16
self.workqueue_semaphore = asyncio.Semaphore(value=10)
self.down = False
self.last_error_time = 0
self.req_id = 0
# assignment of asyncio.TimeoutError are essentially ignored
if aiohttp.__version__.startswith('1.'):
self.ClientHttpProcessingError = aiohttp.ClientHttpProcessingError
self.ClientPayloadError = asyncio.TimeoutError
else:
self.ClientHttpProcessingError = asyncio.TimeoutError
self.ClientPayloadError = aiohttp.ClientPayloadError
def next_req_id(self):
'''Retrns the next request ID.'''
self.req_id += 1
return self.req_id
def set_urls(self, urls):
'''Set the URLS to the given list, and switch to the first one.'''
@ -68,9 +83,13 @@ class Daemon(util.LoggedClass):
return True
return False
def client_session(self):
'''An aiohttp client session.'''
return aiohttp.ClientSession()
async def _send_data(self, data):
async with self.workqueue_semaphore:
async with aiohttp.ClientSession() as session:
async with self.client_session() as session:
async with session.post(self.url(), data=data) as resp:
# If bitcoind can't find a tx, for some reason
# it returns 500 but fills out the JSON.
@ -114,10 +133,12 @@ class Daemon(util.LoggedClass):
.format(result[0], result[1]))
except asyncio.TimeoutError:
log_error('timeout error.')
except aiohttp.ClientHttpProcessingError:
log_error('HTTP error.')
except aiohttp.ServerDisconnectedError:
log_error('disconnected.')
except self.ClientHttpProcessingError:
log_error('HTTP error.')
except self.ClientPayloadError:
log_error('payload encoding error.')
except aiohttp.ClientConnectionError:
log_error('connection problem - is your daemon running?')
except self.DaemonWarmingUpError:
@ -145,7 +166,7 @@ class Daemon(util.LoggedClass):
raise self.DaemonWarmingUpError
raise DaemonError(err)
payload = {'method': method}
payload = {'method': method, 'id': self.next_req_id()}
if params:
payload['params'] = params
return await self._send(payload, processor)
@ -164,7 +185,8 @@ class Daemon(util.LoggedClass):
return [item['result'] for item in result]
raise DaemonError(errs)
payload = [{'method': method, 'params': p} for p in params_iterable]
payload = [{'method': method, 'params': p, 'id': self.next_req_id()}
for p in params_iterable]
if payload:
return await self._send(payload, processor)
return []
@ -186,7 +208,7 @@ class Daemon(util.LoggedClass):
return [bytes.fromhex(block) for block in blocks]
async def mempool_hashes(self):
'''Update our record of the daemon's mempool hashes.'''
'''Return a list of the daemon's mempool hashes.'''
return await self._send_single('getrawmempool')
async def estimatefee(self, params):
@ -221,20 +243,80 @@ class Daemon(util.LoggedClass):
'''Broadcast a transaction to the network.'''
return await self._send_single('sendrawtransaction', params)
async def height(self, mempool=False):
async def height(self):
'''Query the daemon for its current height.'''
self._height = await self._send_single('getblockcount')
if mempool:
self._mempool_hashes = set(await self.mempool_hashes())
self.mempool_refresh_event.set()
return self._height
def cached_mempool_hashes(self):
'''Return the cached mempool hashes.'''
return self._mempool_hashes
def cached_height(self):
'''Return the cached daemon height.
If the daemon has not been queried yet this returns None.'''
return self._height
class DashDaemon(Daemon):
async def masternode_broadcast(self, params):
'''Broadcast a transaction to the network.'''
return await self._send_single('masternodebroadcast', params)
async def masternode_list(self, params ):
'''Return the masternode status.'''
return await self._send_single('masternodelist', params)
class LegacyRPCDaemon(Daemon):
'''Handles connections to a daemon at the given URL.
This class is useful for daemons that don't have the new 'getblock'
RPC call that returns the block in hex, the workaround is to manually
recreate the block bytes. The recreated block bytes may not be the exact
as in the underlying blockchain but it is good enough for our indexing
purposes.'''
async def raw_blocks(self, hex_hashes):
'''Return the raw binary blocks with the given hex hashes.'''
params_iterable = ((h, False) for h in hex_hashes)
block_info = await self._send_vector('getblock', params_iterable)
blocks = []
for i in block_info:
raw_block = await self.make_raw_block(i)
blocks.append(raw_block)
# Convert hex string to bytes
return blocks
async def make_raw_header(self, b):
pbh = b.get('previousblockhash')
if pbh is None:
pbh = '0' * 64
header = pack('<L', b.get('version')) \
+ hex_str_to_hash(pbh) \
+ hex_str_to_hash(b.get('merkleroot')) \
+ pack('<L', self.timestamp_safe(b['time'])) \
+ pack('<L', int(b.get('bits'), 16)) \
+ pack('<L', int(b.get('nonce')))
return header
async def make_raw_block(self, b):
'''Construct a raw block'''
header = await self.make_raw_header(b)
transactions = []
if b.get('height') > 0:
transactions = await self.getrawtransactions(b.get('tx'), False)
raw_block = header
num_txs = len(transactions)
if num_txs > 0:
raw_block += util.int_to_varint(num_txs)
raw_block += b''.join(transactions)
else:
raw_block += b'\x00'
return raw_block
def timestamp_safe(self, t):
return t if isinstance(t, int) else timegm(strptime(t, "%Y-%m-%d %H:%M:%S %Z"))

459
server/db.py

@ -1,4 +1,5 @@
# Copyright (c) 2016, Neil Booth
# Copyright (c) 2017, the ElectrumX authors
#
# All rights reserved.
#
@ -44,6 +45,14 @@ class DB(util.LoggedClass):
self.env = env
self.coin = env.coin
# Setup block header size handlers
if self.coin.STATIC_BLOCK_HEADERS:
self.header_offset = self.coin.static_header_offset
self.header_len = self.coin.static_header_len
else:
self.header_offset = self.dynamic_header_offset
self.header_len = self.dynamic_header_len
self.logger.info('switching current directory to {}'
.format(env.db_dir))
os.chdir(env.db_dir)
@ -51,6 +60,9 @@ class DB(util.LoggedClass):
self.db_class = db_class(self.env.db_engine)
self.logger.info('using {} for DB backend'.format(self.env.db_engine))
# For history compaction
self.max_hist_row_entries = 12500
self.utxo_db = None
self.open_dbs()
self.clean_db()
@ -61,6 +73,12 @@ class DB(util.LoggedClass):
self.headers_file = util.LogicalFile('meta/headers', 2, 16000000)
self.tx_counts_file = util.LogicalFile('meta/txcounts', 2, 2000000)
self.hashes_file = util.LogicalFile('meta/hashes', 4, 16000000)
if not self.coin.STATIC_BLOCK_HEADERS:
self.headers_offsets_file = util.LogicalFile(
'meta/headers_offsets', 2, 16000000)
# Write the offset of the genesis block
if self.headers_offsets_file.read(0, 8) != b'\x00' * 8:
self.headers_offsets_file.write(0, b'\x00' * 8)
# tx_counts[N] has the cumulative number of txs at the end of
# height N. So tx_counts[0] is 1 - the genesis coinbase
@ -119,58 +137,11 @@ class DB(util.LoggedClass):
self.logger.info('height: {:,d}'.format(self.db_height))
self.logger.info('tip: {}'.format(hash_to_str(self.db_tip)))
self.logger.info('tx count: {:,d}'.format(self.db_tx_count))
self.logger.info('flush count: {:,d}'.format(self.flush_count))
if self.first_sync:
self.logger.info('sync time so far: {}'
.format(util.formatted_time(self.wall_time)))
def read_utxo_state(self):
state = self.utxo_db.get(b'state')
if not state:
self.db_height = -1
self.db_tx_count = 0
self.db_tip = b'\0' * 32
self.db_version = max(self.DB_VERSIONS)
self.utxo_flush_count = 0
self.wall_time = 0
self.first_sync = True
else:
state = ast.literal_eval(state.decode())
if not isinstance(state, dict):
raise self.DBError('failed reading state from DB')
self.db_version = state['db_version']
if self.db_version not in self.DB_VERSIONS:
raise self.DBError('your DB version is {} but this software '
'only handles versions {}'
.format(self.db_version, self.DB_VERSIONS))
# backwards compat
genesis_hash = state['genesis']
if isinstance(genesis_hash, bytes):
genesis_hash = genesis_hash.decode()
if genesis_hash != self.coin.GENESIS_HASH:
raise self.DBError('DB genesis hash {} does not match coin {}'
.format(state['genesis_hash'],
self.coin.GENESIS_HASH))
self.db_height = state['height']
self.db_tx_count = state['tx_count']
self.db_tip = state['tip']
self.utxo_flush_count = state['utxo_flush_count']
self.wall_time = state['wall_time']
self.first_sync = state['first_sync']
def write_state(self, batch):
'''Write (UTXO) state to the batch.'''
state = {
'genesis': self.coin.GENESIS_HASH,
'height': self.db_height,
'tx_count': self.db_tx_count,
'tip': self.db_tip,
'utxo_flush_count': self.utxo_flush_count,
'wall_time': self.wall_time,
'first_sync': self.first_sync,
'db_version': self.db_version,
}
batch.put(b'state', repr(state).encode())
def clean_db(self):
'''Clean out stale DB items.
@ -179,11 +150,35 @@ class DB(util.LoggedClass):
undo information.
'''
if self.flush_count < self.utxo_flush_count:
raise self.DBError('DB corrupt: flush_count < utxo_flush_count')
# Might happen at end of compaction as both DBs cannot be
# updated atomically
self.utxo_flush_count = self.flush_count
if self.flush_count > self.utxo_flush_count:
self.clear_excess_history(self.utxo_flush_count)
self.clear_excess_undo_info()
def fs_update_header_offsets(self, offset_start, height_start, headers):
if self.coin.STATIC_BLOCK_HEADERS:
return
offset = offset_start
offsets = []
for h in headers:
offset += len(h)
offsets.append(pack("<Q", offset))
# For each header we get the offset of the next header, hence we
# start writing from the next height
pos = (height_start + 1) * 8
self.headers_offsets_file.write(pos, b''.join(offsets))
def dynamic_header_offset(self, height):
assert not self.coin.STATIC_BLOCK_HEADERS
offset, = unpack('<Q', self.headers_offsets_file.read(height * 8, 8))
return offset
def dynamic_header_len(self, height):
return self.dynamic_header_offset(height + 1)\
- self.dynamic_header_offset(height)
def fs_update(self, fs_height, headers, block_tx_hashes):
'''Write headers, the tx_count array and block tx hashes to disk.
@ -191,24 +186,26 @@ class DB(util.LoggedClass):
updated. These arrays are all append only, so in a crash we
just pick up again from the DB height.
'''
blocks_done = len(self.headers)
blocks_done = len(headers)
height_start = fs_height + 1
new_height = fs_height + blocks_done
prior_tx_count = (self.tx_counts[fs_height] if fs_height >= 0 else 0)
cur_tx_count = self.tx_counts[-1] if self.tx_counts else 0
txs_done = cur_tx_count - prior_tx_count
assert len(self.tx_hashes) == blocks_done
assert len(block_tx_hashes) == blocks_done
assert len(self.tx_counts) == new_height + 1
hashes = b''.join(block_tx_hashes)
assert len(hashes) % 32 == 0
assert len(hashes) // 32 == txs_done
# Write the headers, tx counts, and tx hashes
offset = self.coin.header_offset(fs_height + 1)
offset = self.header_offset(height_start)
self.headers_file.write(offset, b''.join(headers))
offset = (fs_height + 1) * self.tx_counts.itemsize
self.fs_update_header_offsets(offset, height_start, headers)
offset = height_start * self.tx_counts.itemsize
self.tx_counts_file.write(offset,
self.tx_counts[fs_height + 1:].tobytes())
self.tx_counts[height_start:].tobytes())
offset = prior_tx_count * 32
self.hashes_file.write(offset, hashes)
@ -220,8 +217,8 @@ class DB(util.LoggedClass):
raise self.DBError('{:,d} headers starting at {:,d} not on disk'
.format(count, start))
if disk_count:
offset = self.coin.header_offset(start)
size = self.coin.header_offset(start + disk_count) - offset
offset = self.header_offset(start)
size = self.header_offset(start + disk_count) - offset
return self.headers_file.read(offset, size)
return b''
@ -241,7 +238,7 @@ class DB(util.LoggedClass):
offset = 0
headers = []
for n in range(count):
hlen = self.coin.header_len(height + n)
hlen = self.header_len(height + n)
headers.append(headers_concat[offset:offset + hlen])
offset += hlen
@ -254,6 +251,93 @@ class DB(util.LoggedClass):
assert isinstance(limit, int) and limit >= 0
return limit
# -- Undo information
def min_undo_height(self, max_height):
'''Returns a height from which we should store undo info.'''
return max_height - self.env.reorg_limit + 1
def undo_key(self, height):
'''DB key for undo information at the given height.'''
return b'U' + pack('>I', height)
def read_undo_info(self, height):
'''Read undo information from a file for the current height.'''
return self.utxo_db.get(self.undo_key(height))
def flush_undo_infos(self, batch_put, undo_infos):
'''undo_infos is a list of (undo_info, height) pairs.'''
for undo_info, height in undo_infos:
batch_put(self.undo_key(height), b''.join(undo_info))
def clear_excess_undo_info(self):
'''Clear excess undo info. Only most recent N are kept.'''
prefix = b'U'
min_height = self.min_undo_height(self.db_height)
keys = []
for key, hist in self.utxo_db.iterator(prefix=prefix):
height, = unpack('>I', key[-4:])
if height >= min_height:
break
keys.append(key)
if keys:
with self.utxo_db.write_batch() as batch:
for key in keys:
batch.delete(key)
self.logger.info('deleted {:,d} stale undo entries'
.format(len(keys)))
# -- UTXO database
def read_utxo_state(self):
state = self.utxo_db.get(b'state')
if not state:
self.db_height = -1
self.db_tx_count = 0
self.db_tip = b'\0' * 32
self.db_version = max(self.DB_VERSIONS)
self.utxo_flush_count = 0
self.wall_time = 0
self.first_sync = True
else:
state = ast.literal_eval(state.decode())
if not isinstance(state, dict):
raise self.DBError('failed reading state from DB')
self.db_version = state['db_version']
if self.db_version not in self.DB_VERSIONS:
raise self.DBError('your DB version is {} but this software '
'only handles versions {}'
.format(self.db_version, self.DB_VERSIONS))
# backwards compat
genesis_hash = state['genesis']
if isinstance(genesis_hash, bytes):
genesis_hash = genesis_hash.decode()
if genesis_hash != self.coin.GENESIS_HASH:
raise self.DBError('DB genesis hash {} does not match coin {}'
.format(state['genesis_hash'],
self.coin.GENESIS_HASH))
self.db_height = state['height']
self.db_tx_count = state['tx_count']
self.db_tip = state['tip']
self.utxo_flush_count = state['utxo_flush_count']
self.wall_time = state['wall_time']
self.first_sync = state['first_sync']
def write_utxo_state(self, batch):
'''Write (UTXO) state to the batch.'''
state = {
'genesis': self.coin.GENESIS_HASH,
'height': self.db_height,
'tx_count': self.db_tx_count,
'tip': self.db_tip,
'utxo_flush_count': self.utxo_flush_count,
'wall_time': self.wall_time,
'first_sync': self.first_sync,
'db_version': self.db_version,
}
batch.put(b'state', repr(state).encode())
def get_balance(self, hashX):
'''Returns the confirmed balance of an address.'''
return sum(utxo.value for utxo in self.get_utxos(hashX, limit=None))
@ -277,24 +361,6 @@ class DB(util.LoggedClass):
tx_hash, height = self.fs_tx_hash(tx_num)
yield UTXO(tx_num, tx_pos, tx_hash, height, value)
def db_hashX(self, tx_hash, idx_packed):
'''Return (hashX, tx_num_packed) for the given TXO.
Both are None if not found.'''
# Key: b'h' + compressed_tx_hash + tx_idx + tx_num
# Value: hashX
prefix = b'h' + tx_hash[:4] + idx_packed
# Find which entry, if any, the TX_HASH matches.
for db_key, hashX in self.utxo_db.iterator(prefix=prefix):
tx_num_packed = db_key[-4:]
tx_num, = unpack('<I', tx_num_packed)
hash, height = self.fs_tx_hash(tx_num)
if hash == tx_hash:
return hashX, tx_num_packed
return None, None
def db_utxo_lookup(self, tx_hash, tx_idx):
'''Given a prevout return a (hashX, value) pair.
@ -302,7 +368,7 @@ class DB(util.LoggedClass):
mempool code.
'''
idx_packed = pack('<H', tx_idx)
hashX, tx_num_packed = self.db_hashX(tx_hash, idx_packed)
hashX, tx_num_packed = self._db_hashX(tx_hash, idx_packed)
if not hashX:
# This can happen when the daemon is a block ahead of us
# and has mempool txs spending outputs from that new block
@ -318,42 +384,23 @@ class DB(util.LoggedClass):
value, = unpack('<Q', db_value)
return hashX, value
# -- Undo information
def min_undo_height(self, max_height):
'''Returns a height from which we should store undo info.'''
return max_height - self.env.reorg_limit + 1
def undo_key(self, height):
'''DB key for undo information at the given height.'''
return b'U' + pack('>I', height)
def read_undo_info(self, height):
'''Read undo information from a file for the current height.'''
return self.utxo_db.get(self.undo_key(height))
def _db_hashX(self, tx_hash, idx_packed):
'''Return (hashX, tx_num_packed) for the given TXO.
def flush_undo_infos(self, batch_put, undo_infos):
'''undo_infos is a list of (undo_info, height) pairs.'''
for undo_info, height in undo_infos:
batch_put(self.undo_key(height), b''.join(undo_info))
Both are None if not found.'''
# Key: b'h' + compressed_tx_hash + tx_idx + tx_num
# Value: hashX
prefix = b'h' + tx_hash[:4] + idx_packed
def clear_excess_undo_info(self):
'''Clear excess undo info. Only most recent N are kept.'''
prefix = b'U'
min_height = self.min_undo_height(self.db_height)
keys = []
for key, hist in self.utxo_db.iterator(prefix=prefix):
height, = unpack('>I', key[-4:])
if height >= min_height:
break
keys.append(key)
# Find which entry, if any, the TX_HASH matches.
for db_key, hashX in self.utxo_db.iterator(prefix=prefix):
tx_num_packed = db_key[-4:]
tx_num, = unpack('<I', tx_num_packed)
hash, height = self.fs_tx_hash(tx_num)
if hash == tx_hash:
return hashX, tx_num_packed
if keys:
with self.utxo_db.write_batch() as batch:
for key in keys:
batch.delete(key)
self.logger.info('deleted {:,d} stale undo entries'
.format(len(keys)))
return None, None
# -- History database
@ -378,7 +425,12 @@ class DB(util.LoggedClass):
self.logger.info('deleted excess history entries')
def write_history_state(self, batch):
state = {'flush_count': self.flush_count}
'''Write state to hist_db.'''
state = {
'flush_count': self.flush_count,
'comp_flush_count': self.comp_flush_count,
'comp_cursor': self.comp_cursor,
}
# History entries are not prefixed; the suffix \0\0 ensures we
# look similar to other entries and aren't interfered with
batch.put(b'state\0\0', repr(state).encode())
@ -390,8 +442,12 @@ class DB(util.LoggedClass):
if not isinstance(state, dict):
raise self.DBError('failed reading state from history DB')
self.flush_count = state['flush_count']
self.comp_flush_count = state.get('comp_flush_count', -1)
self.comp_cursor = state.get('comp_cursor', -1)
else:
self.flush_count = 0
self.comp_flush_count = -1
self.comp_cursor = -1
def flush_history(self, history):
self.flush_count += 1
@ -432,13 +488,11 @@ class DB(util.LoggedClass):
return nremoves
def get_history(self, hashX, limit=1000):
'''Generator that returns an unpruned, sorted list of (tx_hash,
height) tuples of confirmed transactions that touched the address,
earliest in the blockchain first. Includes both spending and
receiving transactions. By default yields at most 1000 entries.
Set limit to None to get them all.
'''
def get_history_txnums(self, hashX, limit=1000):
'''Generator that returns an unpruned, sorted list of tx_nums in the
history of a hashX. Includes both spending and receiving
transactions. By default yields at most 1000 entries. Set
limit to None to get them all. '''
limit = self._resolve_limit(limit)
for key, hist in self.hist_db.iterator(prefix=hashX):
a = array.array('I')
@ -446,5 +500,174 @@ class DB(util.LoggedClass):
for tx_num in a:
if limit == 0:
return
yield self.fs_tx_hash(tx_num)
yield tx_num
limit -= 1
def get_history(self, hashX, limit=1000):
'''Generator that returns an unpruned, sorted list of (tx_hash,
height) tuples of confirmed transactions that touched the address,
earliest in the blockchain first. Includes both spending and
receiving transactions. By default yields at most 1000 entries.
Set limit to None to get them all.
'''
for tx_num in self.get_history_txnums(hashX, limit):
yield self.fs_tx_hash(tx_num)
#
# History compaction
#
# comp_cursor is a cursor into compaction progress.
# -1: no compaction in progress
# 0-65535: Compaction in progress; all prefixes < comp_cursor have
# been compacted, and later ones have not.
# 65536: compaction complete in-memory but not flushed
#
# comp_flush_count applies during compaction, and is a flush count
# for history with prefix < comp_cursor. flush_count applies
# to still uncompacted history. It is -1 when no compaction is
# taking place. Key suffixes up to and including comp_flush_count
# are used, so a parallel history flush must first increment this
#
# When compaction is complete and the final flush takes place,
# flush_count is reset to comp_flush_count, and comp_flush_count to -1
def _flush_compaction(self, cursor, write_items, keys_to_delete):
'''Flush a single compaction pass as a batch.'''
# Update compaction state
if cursor == 65536:
self.flush_count = self.comp_flush_count
self.comp_cursor = -1
self.comp_flush_count = -1
else:
self.comp_cursor = cursor
# History DB. Flush compacted history and updated state
with self.hist_db.write_batch() as batch:
# Important: delete first! The keyspace may overlap.
for key in keys_to_delete:
batch.delete(key)
for key, value in write_items:
batch.put(key, value)
self.write_history_state(batch)
# If compaction was completed also update the UTXO flush count
if cursor == 65536:
self.utxo_flush_count = self.flush_count
with self.utxo_db.write_batch() as batch:
self.write_utxo_state(batch)
def _compact_hashX(self, hashX, hist_map, hist_list,
write_items, keys_to_delete):
'''Compres history for a hashX. hist_list is an ordered list of
the histories to be compressed.'''
# History entries (tx numbers) are 4 bytes each. Distribute
# over rows of up to 50KB in size. A fixed row size means
# future compactions will not need to update the first N - 1
# rows.
max_row_size = self.max_hist_row_entries * 4
full_hist = b''.join(hist_list)
nrows = (len(full_hist) + max_row_size - 1) // max_row_size
if nrows > 4:
self.log_info('hashX {} is large: {:,d} entries across {:,d} rows'
.format(hash_to_str(hashX), len(full_hist) // 4,
nrows));
# Find what history needs to be written, and what keys need to
# be deleted. Start by assuming all keys are to be deleted,
# and then remove those that are the same on-disk as when
# compacted.
write_size = 0
keys_to_delete.update(hist_map)
for n, chunk in enumerate(util.chunks(full_hist, max_row_size)):
key = hashX + pack('>H', n)
if hist_map.get(key) == chunk:
keys_to_delete.remove(key)
else:
write_items.append((key, chunk))
write_size += len(chunk)
assert n + 1 == nrows
self.comp_flush_count = max(self.comp_flush_count, n)
return write_size
def _compact_prefix(self, prefix, write_items, keys_to_delete):
'''Compact all history entries for hashXs beginning with the
given prefix. Update keys_to_delete and write.'''
prior_hashX = None
hist_map = {}
hist_list = []
key_len = self.coin.HASHX_LEN + 2
write_size = 0
for key, hist in self.hist_db.iterator(prefix=prefix):
# Ignore non-history entries
if len(key) != key_len:
continue
hashX = key[:-2]
if hashX != prior_hashX and prior_hashX:
write_size += self._compact_hashX(prior_hashX, hist_map,
hist_list, write_items,
keys_to_delete)
hist_map.clear()
hist_list.clear()
prior_hashX = hashX
hist_map[key] = hist
hist_list.append(hist)
if prior_hashX:
write_size += self._compact_hashX(prior_hashX, hist_map, hist_list,
write_items, keys_to_delete)
return write_size
def _compact_history(self, limit):
'''Inner loop of history compaction. Loops until limit bytes have
been processed.
'''
keys_to_delete = set()
write_items = [] # A list of (key, value) pairs
write_size = 0
# Loop over 2-byte prefixes
cursor = self.comp_cursor
while write_size < limit and cursor < 65536:
prefix = pack('>H', cursor)
write_size += self._compact_prefix(prefix, write_items,
keys_to_delete)
cursor += 1
max_rows = self.comp_flush_count + 1
self._flush_compaction(cursor, write_items, keys_to_delete)
self.log_info('history compaction: wrote {:,d} rows ({:.1f} MB), '
'removed {:,d} rows, largest: {:,d}, {:.1f}% complete'
.format(len(write_items), write_size / 1000000,
len(keys_to_delete), max_rows,
100 * cursor / 65536))
return write_size
async def compact_history(self, loop):
'''Start a background history compaction and reset the flush count if
its getting high.
'''
# Do nothing if during initial sync or if a compaction hasn't
# been initiated
if self.first_sync or self.comp_cursor == -1:
return
self.comp_flush_count = max(self.comp_flush_count, 1)
limit = 50 * 1000 * 1000
while self.comp_cursor != -1:
locked = self.semaphore.locked
if self.semaphore.locked:
self.log_info('compact_history: waiting on semaphore...')
with await self.semaphore:
await loop.run_in_executor(None, self._compact_history, limit)
def cancel_history_compaction(self):
if self.comp_cursor != -1:
self.logger.warning('cancelling in-progress history compaction')
self.comp_flush_count = -1
self.comp_cursor = -1

124
server/env.py

@ -8,17 +8,19 @@
'''Class for handling environment configuration and defaults.'''
import resource
from collections import namedtuple
from ipaddress import ip_address
from os import environ
from lib.coins import Coin
from lib.util import LoggedClass
import lib.util as lib_util
NetIdentity = namedtuple('NetIdentity', 'host tcp_port ssl_port nick_suffix')
class Env(LoggedClass):
class Env(lib_util.LoggedClass):
'''Wraps environment configuration.'''
class Error(Exception):
@ -27,14 +29,14 @@ class Env(LoggedClass):
def __init__(self):
super().__init__()
self.obsolete(['UTXO_MB', 'HIST_MB', 'NETWORK'])
self.db_dir = self.required('DB_DIRECTORY')
self.daemon_url = self.required('DAEMON_URL')
coin_name = self.default('COIN', 'Bitcoin')
network = self.default('NET', 'mainnet')
self.coin = Coin.lookup_coin_class(coin_name, network)
self.db_dir = self.required('DB_DIRECTORY')
self.cache_MB = self.integer('CACHE_MB', 1200)
self.host = self.default('HOST', 'localhost')
self.reorg_limit = self.integer('REORG_LIMIT', self.coin.REORG_LIMIT)
self.daemon_url = self.required('DAEMON_URL')
# Server stuff
self.tcp_port = self.integer('TCP_PORT', None)
self.ssl_port = self.integer('SSL_PORT', None)
@ -46,11 +48,12 @@ class Env(LoggedClass):
self.banner_file = self.default('BANNER_FILE', None)
self.tor_banner_file = self.default('TOR_BANNER_FILE',
self.banner_file)
self.anon_logs = self.default('ANON_LOGS', False)
self.anon_logs = self.boolean('ANON_LOGS', False)
self.log_sessions = self.integer('LOG_SESSIONS', 3600)
# Peer discovery
self.peer_discovery = bool(self.default('PEER_DISCOVERY', True))
self.peer_announce = bool(self.default('PEER_ANNOUNCE', True))
self.peer_discovery = self.boolean('PEER_DISCOVERY', True)
self.peer_announce = self.boolean('PEER_ANNOUNCE', True)
self.force_proxy = self.boolean('FORCE_PROXY', False)
self.tor_proxy_host = self.default('TOR_PROXY_HOST', 'localhost')
self.tor_proxy_port = self.integer('TOR_PROXY_PORT', None)
# The electrum client takes the empty string as unspecified
@ -59,45 +62,27 @@ class Env(LoggedClass):
# Server limits to help prevent DoS
self.max_send = self.integer('MAX_SEND', 1000000)
self.max_subs = self.integer('MAX_SUBS', 250000)
self.max_sessions = self.integer('MAX_SESSIONS', 1000)
self.max_sessions = self.sane_max_sessions()
self.max_session_subs = self.integer('MAX_SESSION_SUBS', 50000)
self.bandwidth_limit = self.integer('BANDWIDTH_LIMIT', 2000000)
self.session_timeout = self.integer('SESSION_TIMEOUT', 600)
# IRC
self.irc = self.default('IRC', False)
self.irc = self.boolean('IRC', False)
self.irc_nick = self.default('IRC_NICK', None)
# Identities
main_identity = NetIdentity(
self.default('REPORT_HOST', self.host),
self.integer('REPORT_TCP_PORT', self.tcp_port) or None,
self.integer('REPORT_SSL_PORT', self.ssl_port) or None,
''
)
if not main_identity.host.strip():
raise self.Error('IRC host is empty')
if main_identity.tcp_port == main_identity.ssl_port:
raise self.Error('IRC TCP and SSL ports are the same')
self.identities = [main_identity]
tor_host = self.default('REPORT_HOST_TOR', '')
if tor_host.endswith('.onion'):
self.identities.append(NetIdentity(
tor_host,
self.integer('REPORT_TCP_PORT_TOR',
main_identity.tcp_port
if main_identity.tcp_port else
self.tcp_port) or None,
self.integer('REPORT_SSL_PORT_TOR',
main_identity.ssl_port
if main_identity.ssl_port else
self.ssl_port) or None,
'_tor',
))
clearnet_identity = self.clearnet_identity()
tor_identity = self.tor_identity(clearnet_identity)
self.identities = [identity
for identity in (clearnet_identity, tor_identity)
if identity is not None]
def default(self, envvar, default):
return environ.get(envvar, default)
def boolean(self, envvar, default):
return bool(self.default(envvar, default))
def required(self, envvar):
value = environ.get(envvar)
if value is None:
@ -114,8 +99,77 @@ class Env(LoggedClass):
raise self.Error('cannot convert envvar {} value {} to an integer'
.format(envvar, value))
def sane_max_sessions(self):
'''Return the maximum number of sessions to permit. Normally this
is MAX_SESSIONS. However, to prevent open file exhaustion, ajdust
downwards if running with a small open file rlimit.'''
env_value = self.integer('MAX_SESSIONS', 1000)
nofile_limit = resource.getrlimit(resource.RLIMIT_NOFILE)[0]
# We give the DB 250 files; allow ElectrumX 100 for itself
value = max(0, min(env_value, nofile_limit - 350))
if value < env_value:
self.log_warning('lowered maximum seessions from {:,d} to {:,d} '
'because your open file limit is {:,d}'
.format(env_value, value, nofile_limit))
return value
def obsolete(self, envvars):
bad = [envvar for envvar in envvars if environ.get(envvar)]
if bad:
raise self.Error('remove obsolete environment variables {}'
.format(bad))
def clearnet_identity(self):
host = self.default('REPORT_HOST', None)
if host is None:
return None
try:
ip = ip_address(host)
except ValueError:
bad = (not lib_util.is_valid_hostname(host)
or host.lower() == 'localhost')
else:
bad = (ip.is_multicast or ip.is_unspecified
or (ip.is_private and (self.irc or self.peer_announce)))
if bad:
raise self.Error('"{}" is not a valid REPORT_HOST'.format(host))
tcp_port = self.integer('REPORT_TCP_PORT', self.tcp_port) or None
ssl_port = self.integer('REPORT_SSL_PORT', self.ssl_port) or None
if tcp_port == ssl_port:
raise self.Error('REPORT_TCP_PORT and REPORT_SSL_PORT '
'both resolve to {}'.format(tcp_port))
return NetIdentity(
host,
tcp_port,
ssl_port,
''
)
def tor_identity(self, clearnet):
host = self.default('REPORT_HOST_TOR', None)
if host is None:
return None
if not host.endswith('.onion'):
raise self.Error('tor host "{}" must end with ".onion"'
.format(host))
def port(port_kind):
'''Returns the clearnet identity port, if any and not zero,
otherwise the listening port.'''
result = 0
if clearnet:
result = getattr(clearnet, port_kind)
return result or getattr(self, port_kind)
tcp_port = self.integer('REPORT_TCP_PORT_TOR', port('tcp_port')) or None
ssl_port = self.integer('REPORT_SSL_PORT_TOR', port('ssl_port')) or None
if tcp_port == ssl_port:
raise self.Error('REPORT_TCP_PORT_TOR and REPORT_SSL_PORT_TOR '
'both resolve to {}'.format(tcp_port))
return NetIdentity(
host,
tcp_port,
ssl_port,
'_tor',
)

29
server/mempool.py

@ -37,6 +37,8 @@ class MemPool(util.LoggedClass):
self.controller = controller
self.coin = bp.coin
self.db = bp
self.hashes = set()
self.mempool_refresh_event = asyncio.Event()
self.touched = bp.touched
self.touched_event = asyncio.Event()
self.prioritized = set()
@ -49,6 +51,11 @@ class MemPool(util.LoggedClass):
initial mempool sync.'''
self.prioritized.add(tx_hash)
def set_hashes(self, hashes):
'''Save the list of mempool hashes.'''
self.hashes = set(hashes)
self.mempool_refresh_event.set()
def resync_daemon_hashes(self, unprocessed, unfetched):
'''Re-sync self.txs with the list of hashes in the daemon's mempool.
@ -59,8 +66,7 @@ class MemPool(util.LoggedClass):
hashXs = self.hashXs
touched = self.touched
hashes = self.daemon.cached_mempool_hashes()
gone = set(txs).difference(hashes)
gone = set(txs).difference(self.hashes)
for hex_hash in gone:
unfetched.discard(hex_hash)
unprocessed.pop(hex_hash, None)
@ -75,7 +81,7 @@ class MemPool(util.LoggedClass):
del hashXs[hashX]
touched.update(tx_hashXs)
new = hashes.difference(txs)
new = self.hashes.difference(txs)
unfetched.update(new)
for hex_hash in new:
txs[hex_hash] = None
@ -92,15 +98,14 @@ class MemPool(util.LoggedClass):
fetch_size = 800
process_some = self.async_process_some(unfetched, fetch_size // 2)
await self.daemon.mempool_refresh_event.wait()
await self.mempool_refresh_event.wait()
self.logger.info('beginning processing of daemon mempool. '
'This can take some time...')
next_log = 0
loops = -1 # Zero during initial catchup
while True:
# Avoid double notifications if processing a block
if self.touched and not self.processing_new_block():
if self.touched:
self.touched_event.set()
# Log progress / state
@ -120,10 +125,10 @@ class MemPool(util.LoggedClass):
try:
if not todo:
self.prioritized.clear()
await self.daemon.mempool_refresh_event.wait()
await self.mempool_refresh_event.wait()
self.resync_daemon_hashes(unprocessed, unfetched)
self.daemon.mempool_refresh_event.clear()
self.mempool_refresh_event.clear()
if unfetched:
count = min(len(unfetched), fetch_size)
@ -177,10 +182,6 @@ class MemPool(util.LoggedClass):
return process
def processing_new_block(self):
'''Return True if we're processing a new block.'''
return self.daemon.cached_height() > self.db.db_height
async def fetch_raw_txs(self, hex_hashes):
'''Fetch a list of mempool transactions.'''
raw_txs = await self.daemon.getrawtransactions(hex_hashes)
@ -198,7 +199,7 @@ class MemPool(util.LoggedClass):
not depend on the result remaining the same are fine.
'''
script_hashX = self.coin.hashX_from_script
deserializer = self.coin.deserializer()
deserializer = self.coin.DESERIALIZER
db_utxo_lookup = self.db.db_utxo_lookup
txs = self.txs
@ -270,7 +271,7 @@ class MemPool(util.LoggedClass):
if hashX not in self.hashXs:
return []
deserializer = self.coin.deserializer()
deserializer = self.coin.DESERIALIZER
hex_hashes = self.hashXs[hashX]
raw_txs = await self.daemon.getrawtransactions(hex_hashes)
result = []

310
server/peers.py

@ -10,6 +10,7 @@
import ast
import asyncio
import random
import socket
import ssl
import time
from collections import defaultdict, Counter
@ -56,6 +57,8 @@ class PeerSession(JSONSession):
self.peer_mgr = peer_mgr
self.kind = kind
self.failed = False
self.bad = False
self.remote_peers = None
self.log_prefix = '[{}] '.format(self.peer)
async def wait_on_items(self):
@ -80,6 +83,8 @@ class PeerSession(JSONSession):
self.send_request(self.on_version, 'server.version',
[version.VERSION, proto_ver])
self.send_request(self.on_features, 'server.features')
self.send_request(self.on_headers, 'blockchain.headers.subscribe')
self.send_request(self.on_peers_subscribe, 'server.peers.subscribe')
def connection_lost(self, exc):
'''Handle disconnection.'''
@ -92,71 +97,30 @@ class PeerSession(JSONSession):
self.failed = True
self.log_error('server.peers.subscribe: {}'.format(error))
else:
self.check_remote_peers(result)
# Save for later analysis
self.remote_peers = result
self.close_if_done()
def check_remote_peers(self, updates):
'''When a peer gives us a peer update.
Each update is expected to be of the form:
[ip_addr, hostname, ['v1.0', 't51001', 's51002']]
'''
try:
real_names = [' '.join([u[1]] + u[2]) for u in updates]
peers = [Peer.from_real_name(real_name, str(self.peer))
for real_name in real_names]
except Exception:
self.log_error('bad server.peers.subscribe response')
return
self.peer_mgr.add_peers(peers)
# Announce ourself if not present. Don't if disabled or we
# are a non-public IP address.
if not self.peer_mgr.env.peer_announce:
return
my = self.peer_mgr.my_clearnet_peer()
if not my.is_public:
return
for peer in my.matches(peers):
if peer.tcp_port == my.tcp_port and peer.ssl_port == my.ssl_port:
return
self.log_info('registering ourself with server.add_peer')
self.send_request(self.on_add_peer, 'server.add_peer', [my.features])
def on_add_peer(self, result, error):
'''Handle the response to the add_peer message.'''
self.close_if_done()
def peer_verified(self, is_good):
'''Call when it has been determined whether or not the peer seems to
be on the same network.
'''
if is_good:
self.send_request(self.on_peers_subscribe,
'server.peers.subscribe')
else:
self.peer.mark_bad()
self.failed = True
'''We got a response the add_peer message.'''
# This is the last thing we were waiting for; shutdown the connection
self.shutdown_connection()
def on_features(self, features, error):
# Several peers don't implement this. If they do, check they are
# the same network with the genesis hash.
verified = False
if not error and isinstance(features, dict):
hosts = [host.lower() for host in features.get('hosts', {})]
our_hash = self.peer_mgr.env.coin.GENESIS_HASH
if our_hash != features.get('genesis_hash'):
self.peer_verified(False)
self.bad = True
self.log_warning('incorrect genesis hash')
else:
self.peer_verified(True)
elif self.peer.host.lower() in hosts:
self.peer.update_features(features)
verified = True
# For legacy peers not implementing features, check their height
# as a proxy to determining they're on our network
if not verified and not self.peer.bad:
self.send_request(self.on_headers, 'blockchain.headers.subscribe')
else:
self.bad = True
self.log_warning('ignoring - not listed in host list {}'
.format(hosts))
self.close_if_done()
def on_headers(self, result, error):
@ -165,16 +129,18 @@ class PeerSession(JSONSession):
self.failed = True
self.log_error('blockchain.headers.subscribe returned an error')
elif not isinstance(result, dict):
self.bad = True
self.log_error('bad blockchain.headers.subscribe response')
self.peer_verified(False)
else:
our_height = self.peer_mgr.controller.bp.db_height
their_height = result.get('block_height')
is_good = (isinstance(their_height, int) and
abs(our_height - their_height) <= 5)
self.peer_verified(is_good)
if not is_good:
self.log_warning('bad height {}'.format(their_height))
if not isinstance(their_height, int):
self.log_warning('invalid height {}'.format(their_height))
self.bad = True
elif abs(our_height - their_height) > 5:
self.log_warning('bad height {:,d} (ours: {:,d})'
.format(their_height, our_height))
self.bad = True
self.close_if_done()
def on_version(self, result, error):
@ -187,19 +153,54 @@ class PeerSession(JSONSession):
self.peer.features['server_version'] = result
self.close_if_done()
def check_remote_peers(self):
'''Check the peers list we got from a remote peer.
Each update is expected to be of the form:
[ip_addr, hostname, ['v1.0', 't51001', 's51002']]
Call add_peer if the remote doesn't appear to know about us.
'''
try:
real_names = [' '.join([u[1]] + u[2]) for u in self.remote_peers]
peers = [Peer.from_real_name(real_name, str(self.peer))
for real_name in real_names]
except Exception:
self.log_error('bad server.peers.subscribe response')
return
self.peer_mgr.add_peers(peers)
# Announce ourself if not present. Don't if disabled, we
# are a non-public IP address, or to ourselves.
if not self.peer_mgr.env.peer_announce:
return
if self.peer in self.peer_mgr.myselves:
return
my = self.peer_mgr.my_clearnet_peer()
if not my or not my.is_public:
return
for peer in my.matches(peers):
if peer.tcp_port == my.tcp_port and peer.ssl_port == my.ssl_port:
return
self.log_info('registering ourself with server.add_peer')
self.send_request(self.on_add_peer, 'server.add_peer', [my.features])
def close_if_done(self):
if not self.has_pending_requests():
is_good = not self.failed
self.peer_mgr.set_connection_status(self.peer, is_good)
if self.peer.is_tor:
how = 'via {} over Tor'.format(self.kind)
else:
how = 'via {} at {}'.format(self.kind,
self.peer_addr(anon=False))
status = 'verified' if is_good else 'failed to verify'
elapsed = time.time() - self.peer.last_try
self.log_info('{} {} in {:.1f}s'.format(status, how, elapsed))
self.close_connection()
if self.bad:
self.peer.mark_bad()
elif self.remote_peers:
self.check_remote_peers()
# We might now be waiting for an add_peer response
if not self.has_pending_requests():
self.shutdown_connection()
def shutdown_connection(self):
is_good = not (self.failed or self.bad)
self.peer_mgr.set_verification_status(self.peer, self.kind, is_good)
self.close_connection()
class PeerManager(util.LoggedClass):
@ -215,7 +216,10 @@ class PeerManager(util.LoggedClass):
self.env = env
self.controller = controller
self.loop = controller.loop
self.irc = IRC(env, self)
if env.irc and env.coin.IRC_PREFIX:
self.irc = IRC(env, self)
else:
self.irc = None
self.myselves = peers_from_env(env)
self.retry_event = asyncio.Event()
# Peers have one entry per hostname. Once connected, the
@ -224,14 +228,15 @@ class PeerManager(util.LoggedClass):
# any other peers with the same host name or IP address.
self.peers = set()
self.onion_peers = []
self.last_tor_retry_time = 0
self.tor_proxy = SocksProxy(env.tor_proxy_host, env.tor_proxy_port,
loop=self.loop)
self.permit_onion_peer_time = time.time()
self.proxy = SocksProxy(env.tor_proxy_host, env.tor_proxy_port,
loop=self.loop)
self.import_peers()
def my_clearnet_peer(self):
'''Returns the clearnet peer representing this server.'''
return [peer for peer in self.myselves if not peer.is_tor][0]
'''Returns the clearnet peer representing this server, if any.'''
clearnet = [peer for peer in self.myselves if not peer.is_tor]
return clearnet[0] if clearnet else None
def info(self):
'''The number of peers.'''
@ -251,9 +256,9 @@ class PeerManager(util.LoggedClass):
for peer in self.peers:
if peer.bad:
peer.status = PEER_BAD
elif peer.last_connect > cutoff:
elif peer.last_good > cutoff:
peer.status = PEER_GOOD
elif peer.last_connect:
elif peer.last_good:
peer.status = PEER_STALE
else:
peer.status = PEER_NEVER
@ -269,16 +274,16 @@ class PeerManager(util.LoggedClass):
return data
def peer_key(peer):
return (peer.bad, -peer.last_connect)
return (peer.bad, -peer.last_good)
return [peer_data(peer) for peer in sorted(self.peers, key=peer_key)]
def add_peers(self, peers, limit=3, check_ports=False, source=None):
def add_peers(self, peers, limit=2, check_ports=False, source=None):
'''Add a limited number of peers that are not already present.'''
retry = False
new_peers = []
for peer in peers:
if not peer.is_valid:
if not peer.is_public:
continue
matches = peer.matches(self.peers)
if not matches:
@ -297,23 +302,59 @@ class PeerManager(util.LoggedClass):
use_peers = new_peers[:limit]
else:
use_peers = new_peers
self.logger.info('accepted {:d}/{:d} new peers of {:d} from {}'
.format(len(use_peers), len(new_peers),
len(peers), source))
for n, peer in enumerate(use_peers):
self.logger.info('accepted new peer {:d}/{:d} {} from {} '
.format(n + 1, len(use_peers), peer, source))
self.peers.update(use_peers)
if retry:
self.retry_event.set()
def on_add_peer(self, features, source):
'''Add peers from an incoming connection.'''
def permit_new_onion_peer(self):
'''Accept a new onion peer only once per random time interval.'''
now = time.time()
if now < self.permit_onion_peer_time:
return False
self.permit_onion_peer_time = now + random.randrange(0, 1200)
return True
async def on_add_peer(self, features, source_info):
'''Add a peer (but only if the peer resolves to the source).'''
if not source_info:
self.log_info('ignored add_peer request: no source info')
return False
source = source_info[0]
peers = Peer.peers_from_features(features, source)
if peers:
hosts = [peer.host for peer in peers]
self.log_info('add_peer request from {} for {}'
.format(source, ', '.join(hosts)))
self.add_peers(peers, check_ports=True)
return bool(peers)
if not peers:
self.log_info('ignored add_peer request: no peers given')
return False
# Just look at the first peer, require it
peer = peers[0]
host = peer.host
if peer.is_tor:
permit = self.permit_new_onion_peer()
reason = 'rate limiting'
else:
try:
infos = await self.loop.getaddrinfo(host, 80,
type=socket.SOCK_STREAM)
except socket.gaierror:
permit = False
reason = 'address resolution failure'
else:
permit = any(source == info[-1][0] for info in infos)
reason = 'source-destination mismatch'
if permit:
self.log_info('accepted add_peer request from {} for {}'
.format(source, host))
self.add_peers([peer], check_ports=True)
else:
self.log_warning('rejected add_peer request from {} for {} ({})'
.format(source, host, reason))
return permit
def on_peers_subscribe(self, is_tor):
'''Returns the server peers as a list of (ip, host, details) tuples.
@ -324,13 +365,13 @@ class PeerManager(util.LoggedClass):
'''
cutoff = time.time() - STALE_SECS
recent = [peer for peer in self.peers
if peer.last_connect > cutoff and
if peer.last_good > cutoff and
not peer.bad and peer.is_public]
onion_peers = []
# Always report ourselves if valid (even if not public)
peers = set(myself for myself in self.myselves
if myself.last_connect > cutoff)
if myself.last_good > cutoff)
# Bucket the clearnet peers and select up to two from each
buckets = defaultdict(list)
@ -373,7 +414,14 @@ class PeerManager(util.LoggedClass):
if data:
version, items = ast.literal_eval(data)
if version == 1:
peers = [Peer.deserialize(item) for item in items]
peers = []
for item in items:
if 'last_connect' in item:
item['last_good'] = item.pop('last_connect')
try:
peers.append(Peer.deserialize(item))
except Exception:
pass
self.add_peers(peers, source='peers file', limit=None)
def import_peers(self):
@ -393,10 +441,12 @@ class PeerManager(util.LoggedClass):
def connect_to_irc(self):
'''Connect to IRC if not disabled.'''
if self.env.irc and self.env.coin.IRC_PREFIX:
if self.irc:
pairs = [(peer.real_name(), ident.nick_suffix) for peer, ident
in zip(self.myselves, self.env.identities)]
self.ensure_future(self.irc.start(pairs))
elif self.env.irc:
self.logger.info('IRC is disabled for this coin')
else:
self.logger.info('IRC is disabled')
@ -421,7 +471,14 @@ class PeerManager(util.LoggedClass):
self.logger.info('peer discovery is disabled')
return
self.logger.info('beginning peer discovery')
# Wait a few seconds after starting the proxy detection loop
# for proxy detection to succeed
self.ensure_future(self.proxy.auto_detect_loop())
await self.proxy.tried_event.wait()
self.logger.info('beginning peer discovery; force use of proxy: {}'
.format(self.env.force_proxy))
try:
while True:
timeout = self.loop.call_later(WAKEUP_SECS,
@ -445,29 +502,17 @@ class PeerManager(util.LoggedClass):
nearly_stale_time = (now - STALE_SECS) + WAKEUP_SECS * 2
def should_retry(peer):
# Try some Tor at startup to determine the proxy so we can
# serve the right banner file
if self.tor_proxy.port is None and self.is_coin_onion_peer(peer):
return True
# Retry a peer whose ports might have updated
if peer.other_port_pairs:
return True
# Retry a good connection if it is about to turn stale
if peer.try_count == 0:
return peer.last_connect < nearly_stale_time
return peer.last_good < nearly_stale_time
# Retry a failed connection if enough time has passed
return peer.last_try < now - WAKEUP_SECS * 2 ** peer.try_count
peers = [peer for peer in self.peers if should_retry(peer)]
# If we don't have a tor proxy drop tor peers, but retry
# occasionally
if self.tor_proxy.port is None:
if now < self.last_tor_retry_time + 3600:
peers = [peer for peer in peers if not peer.is_tor]
elif any(peer.is_tor for peer in peers):
self.last_tor_retry_time = now
for peer in peers:
peer.try_count += 1
pairs = peer.connection_port_pairs()
@ -481,8 +526,11 @@ class PeerManager(util.LoggedClass):
kind, port = port_pairs[0]
sslc = ssl.SSLContext(ssl.PROTOCOL_TLS) if kind == 'SSL' else None
if peer.is_tor:
create_connection = self.tor_proxy.create_connection
if self.env.force_proxy or peer.is_tor:
# Only attempt a proxy connection if the proxy is up
if not self.proxy.is_up():
return
create_connection = self.proxy.create_connection
else:
create_connection = self.loop.create_connection
@ -508,28 +556,42 @@ class PeerManager(util.LoggedClass):
if port_pairs:
self.retry_peer(peer, port_pairs)
else:
self.set_connection_status(peer, False)
self.maybe_forget_peer(peer)
def set_verification_status(self, peer, kind, good):
'''Called when a verification succeeded or failed.'''
now = time.time()
if self.env.force_proxy or peer.is_tor:
how = 'via {} over Tor'.format(kind)
else:
how = 'via {} at {}'.format(kind, peer.ip_addr)
status = 'verified' if good else 'failed to verify'
elapsed = now - peer.last_try
self.log_info('{} {} {} in {:.1f}s'.format(status, peer, how, elapsed))
def set_connection_status(self, peer, good):
'''Called when a connection succeeded or failed.'''
if good:
peer.try_count = 0
peer.last_connect = time.time()
peer.last_good = now
peer.source = 'peer'
# Remove matching IP addresses
for match in peer.matches(self.peers):
if match != peer and peer.host == peer.ip_addr:
self.peers.remove(match)
# At most 2 matches if we're a host name, potentially several if
# we're an IP address (several instances can share a NAT).
matches = peer.matches(self.peers)
for match in matches:
if match.ip_address:
if len(matches) > 1:
self.peers.remove(match)
elif peer.host in match.features['hosts']:
match.update_features_from_peer(peer)
else:
self.maybe_forget_peer(peer)
def maybe_forget_peer(self, peer):
'''Forget the peer if appropriate, e.g. long-term unreachable.'''
if peer.bad:
forget = peer.last_connect < time.time() - STALE_SECS // 2
if peer.last_good and not peer.bad:
try_limit = 10
else:
try_limit = 10 if peer.last_connect else 3
forget = peer.try_count >= try_limit
try_limit = 3
forget = peer.try_count >= try_limit
if forget:
desc = 'bad' if peer.bad else 'unreachable'

104
server/session.py

@ -34,6 +34,7 @@ class SessionBase(JSONSession):
self.env = controller.env
self.daemon = self.bp.daemon
self.client = 'unknown'
self.client_version = (1, )
self.protocol_version = '1.0'
self.anon_logs = self.env.anon_logs
self.last_delay = 0
@ -45,7 +46,6 @@ class SessionBase(JSONSession):
self.bw_time = self.start_time
self.bw_interval = 3600
self.bw_used = 0
self.peer_added = False
def close_connection(self):
'''Call this to close the connection.'''
@ -194,15 +194,10 @@ class ElectrumX(SessionBase):
self.subscribe_height = True
return self.height()
def add_peer(self, features):
'''Add a peer.'''
if self.peer_added:
return False
async def add_peer(self, features):
'''Add a peer (but only if the peer resolves to the source).'''
peer_mgr = self.controller.peer_mgr
peer_info = self.peer_info()
source = peer_info[0] if peer_info else 'unknown'
self.peer_added = peer_mgr.on_add_peer(features, source)
return self.peer_added
return await peer_mgr.on_add_peer(features, self.peer_info())
def peers_subscribe(self):
'''Return the server peers as a list of (ip, host, details) tuples.'''
@ -268,22 +263,23 @@ class ElectrumX(SessionBase):
index: the chunk index'''
index = self.controller.non_negative_integer(index)
self.chunk_indices.append(index)
self.chunk_indices = self.chunk_indices[-5:]
# -2 allows backing up a single chunk but no more.
if index <= max(self.chunk_indices[:-2], default=-1):
msg = ('chunk indices not advancing (wrong network?): {}'
.format(self.chunk_indices))
# INVALID_REQUEST triggers a disconnect
raise RPCError(msg, JSONRPC.INVALID_REQUEST)
if self.client_version < (2, 8, 3):
self.chunk_indices.append(index)
self.chunk_indices = self.chunk_indices[-5:]
# -2 allows backing up a single chunk but no more.
if index <= max(self.chunk_indices[:-2], default=-1):
msg = ('chunk indices not advancing (wrong network?): {}'
.format(self.chunk_indices))
# use INVALID_REQUEST to trigger a disconnect
raise RPCError(msg, JSONRPC.INVALID_REQUEST)
return self.controller.get_chunk(index)
def is_tor(self):
'''Try to detect if the connection is to a tor hidden service we are
running.'''
tor_proxy = self.controller.peer_mgr.tor_proxy
proxy = self.controller.peer_mgr.proxy
peer_info = self.peer_info()
return peer_info and peer_info[0] == tor_proxy.ip_addr
return peer_info and peer_info[0] == proxy.ip_addr
async def replaced_banner(self, banner):
network_info = await self.controller.daemon_request('getnetworkinfo')
@ -292,8 +288,11 @@ class ElectrumX(SessionBase):
minor, revision = divmod(minor, 10000)
revision //= 100
daemon_version = '{:d}.{:d}.{:d}'.format(major, minor, revision)
server_version = version.VERSION.split()[-1]
for pair in [
('$VERSION', version.VERSION),
('$VERSION', version.VERSION), # legacy
('$SERVER_VERSION', server_version),
('$SERVER_SUBVERSION', version.VERSION),
('$DAEMON_VERSION', daemon_version),
('$DAEMON_SUBVERSION', network_info['subversion']),
('$DONATION_ADDRESS', self.env.donation_address),
@ -329,6 +328,11 @@ class ElectrumX(SessionBase):
'''
if client_name:
self.client = str(client_name)[:17]
try:
self.client_version = tuple(int(part) for part
in self.client.split('.'))
except Exception:
pass
if protocol_version is not None:
self.protocol_version = protocol_version
return version.VERSION
@ -383,3 +387,63 @@ class LocalRPC(SessionBase):
def request_handler(self, method):
'''Return the async handler for the given request method.'''
return self.controller.rpc_handlers.get(method)
class DashElectrumX(ElectrumX):
'''A TCP server that handles incoming Electrum Dash connections.'''
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.electrumx_handlers['masternode.announce.broadcast'] = self.masternode_announce_broadcast
self.electrumx_handlers['masternode.subscribe'] = self.masternode_subscribe
self.mns = set()
async def notify(self, height, touched):
'''Notify the client about changes in masternode list.'''
await super().notify(height, touched)
for masternode in self.mns:
status = await self.daemon.masternode_list(['status', masternode])
payload = {
'id': None,
'method': 'masternode.subscribe',
'params': [masternode],
'result': status.get(masternode),
}
self.send_binary(self.encode_payload(payload))
def server_version(self, client_name=None, protocol_version=None):
'''Returns the server version as a string.
Force version string response for Electrum-Dash 2.6.4 client caused by
https://github.com/dashpay/electrum-dash/commit/638cf6c0aeb7be14a85ad98f873791cb7b49ee29
'''
default_return = super().server_version(client_name, protocol_version)
if self.client == '2.6.4':
return '1.0'
return default_return
# Masternode command handlers
async def masternode_announce_broadcast(self, signmnb):
'''Pass through the masternode announce message to be broadcast by the daemon.'''
try:
mnb_info = await self.daemon.masternode_broadcast(['relay', signmnb])
return mnb_info
except DaemonError as e:
error = e.args[0]
message = error['message']
self.log_info('masternode_broadcast: {}'.format(message))
return (
'The masternode broadcast was rejected. ({})\n[{}]'
.format(message, signmnb)
)
async def masternode_subscribe(self, vin):
'''Returns the status of masternode.'''
result = await self.daemon.masternode_list(['status', vin])
if result is not None:
self.mns.add(vin)
return result.get(vin)
return None

2
server/version.py

@ -1,5 +1,5 @@
# Server name and protocol versions
VERSION = 'ElectrumX 1.0'
VERSION = 'ElectrumX 1.0.13'
PROTOCOL_MIN = '1.0'
PROTOCOL_MAX = '1.0'

17
tests/blocks/bitcoin_mainnet_100000.json

@ -0,0 +1,17 @@
{
"hash": "000000000003ba27aa200b1cecaad478d2b00432346c3f1f3986da1afd33e506",
"size": 957,
"height": 100000,
"merkleroot": "f3e94742aca4b5ef85488dc37c06c3282295ffec960994b2c0d5ac2a25a95766",
"tx": [
"8c14f0db3df150123e6f3dbbf30f8b955a8249b62ac1d1ff16284aefa3d06d87",
"fff2525b8931402dd09222c50775608f75787bd2b87e56995a7bdd30f79702c4",
"6359f0868171b1d194cbee1af2f16ea598ae8fad666d9b012c8ed2b79a236ec4",
"e9a66845e05d5abc0ad04ec80f774a7e585c6e8db975962d069a522137b80c1d"
],
"time": 1293623863,
"nonce": 274148111,
"bits": "1b04864c",
"previousblockhash": "000000000002d01c1fccc21636b607dfd930d31d01c3a62104612a1719011250",
"block": "0100000050120119172a610421a6c3011dd330d9df07b63616c2cc1f1cd00200000000006657a9252aacd5c0b2940996ecff952228c3067cc38d4885efb5a4ac4247e9f337221b4d4c86041b0f2b57100401000000010000000000000000000000000000000000000000000000000000000000000000ffffffff08044c86041b020602ffffffff0100f2052a010000004341041b0e8c2567c12536aa13357b79a073dc4444acb83c4ec7a0e2f99dd7457516c5817242da796924ca4e99947d087fedf9ce467cb9f7c6287078f801df276fdf84ac000000000100000001032e38e9c0a84c6046d687d10556dcacc41d275ec55fc00779ac88fdf357a187000000008c493046022100c352d3dd993a981beba4a63ad15c209275ca9470abfcd57da93b58e4eb5dce82022100840792bc1f456062819f15d33ee7055cf7b5ee1af1ebcc6028d9cdb1c3af7748014104f46db5e9d61a9dc27b8d64ad23e7383a4e6ca164593c2527c038c0857eb67ee8e825dca65046b82c9331586c82e0fd1f633f25f87c161bc6f8a630121df2b3d3ffffffff0200e32321000000001976a914c398efa9c392ba6013c5e04ee729755ef7f58b3288ac000fe208010000001976a914948c765a6914d43f2a7ac177da2c2f6b52de3d7c88ac000000000100000001c33ebff2a709f13d9f9a7569ab16a32786af7d7e2de09265e41c61d078294ecf010000008a4730440220032d30df5ee6f57fa46cddb5eb8d0d9fe8de6b342d27942ae90a3231e0ba333e02203deee8060fdc70230a7f5b4ad7d7bc3e628cbe219a886b84269eaeb81e26b4fe014104ae31c31bf91278d99b8377a35bbce5b27d9fff15456839e919453fc7b3f721f0ba403ff96c9deeb680e5fd341c0fc3a7b90da4631ee39560639db462e9cb850fffffffff0240420f00000000001976a914b0dcbf97eabf4404e31d952477ce822dadbe7e1088acc060d211000000001976a9146b1281eec25ab4e1e0793ff4e08ab1abb3409cd988ac0000000001000000010b6072b386d4a773235237f64c1126ac3b240c84b917a3909ba1c43ded5f51f4000000008c493046022100bb1ad26df930a51cce110cf44f7a48c3c561fd977500b1ae5d6b6fd13d0b3f4a022100c5b42951acedff14abba2736fd574bdb465f3e6f8da12e2c5303954aca7f78f3014104a7135bfe824c97ecc01ec7d7e336185c81e2aa2c41ab175407c09484ce9694b44953fcb751206564a9c24dd094d42fdbfdd5aad3e063ce6af4cfaaea4ea14fbbffffffff0140420f00000000001976a91439aa3d569e06a1d7926dc4be1193c99bf2eb9ee088ac00000000"
}

19
tests/blocks/digibyte_mainnet_4394891.json

File diff suppressed because one or more lines are too long

19
tests/blocks/dogecoin_mainnet_371337.json

@ -0,0 +1,19 @@
{
"hash": "60323982f9c5ff1b5a954eac9dc1269352835f47c2c5222691d80f0d50dcf053",
"size": 1704,
"height": 371337,
"merkleroot": "ee27b8fb782a5bfb99c975f0d4686440b9af9e16846603e5f2830e0b6fbf158a",
"tx": [
"4547b14bc16db4184fa9f141d645627430dd3dfa662d0e6f418fba497091da75",
"a965dba2ed06827ed9a24f0568ec05b73c431bc7f0fb6913b144e62db7faa519",
"5e3ab18cb7ba3abc44e62fb3a43d4c8168d00cf0a2e0f8dbeb2636bb9a212d12",
"f022935ac7c4c734bd2c9c6a780f8e7280352de8bd358d760d0645b7fe734a93",
"ec063cc8025f9f30a6ed40fc8b1fe63b0cbd2ea2c62664eb26b365e6243828ca",
"02c16e3389320da3e77686d39773dda65a1ecdf98a2ef9cfb938c9f4b58f7a40"
],
"time": 1410464577,
"nonce": 0,
"bits": "1b364184",
"previousblockhash": "46a8b109fb016fa41abd17a19186ca78d39c60c020c71fcd2690320d47036f0d",
"block": "020162000d6f03470d329026cd1fc720c0609cd378ca8691a117bd1aa46f01fb09b1a8468a15bf6f0b0e83f2e5036684169eafb9406468d4f075c999fb5b2a78fbb827ee41fb11548441361b0000000001000000010000000000000000000000000000000000000000000000000000000000000000ffffffff380345bf09fabe6d6d980ba42120410de0554d42a5b5ee58167bcd86bf7591f429005f24da45fb51cf0800000000000000cdb1f1ff0e000000ffffffff01800c0c2a010000001976a914aa3750aa18b8a0f3f0590731e1fab934856680cf88ac00000000b3e64e02fff596209c498f1b18f798d62f216f11c8462bf3922319000000000003a979a636db2450363972d211aee67b71387a3daaa3051be0fd260c5acd4739cd52a418d29d8a0e56c8714c95a0dc24e1c9624480ec497fe2441941f3fee8f9481a3370c334178415c83d1d0c2deeec727c2330617a47691fc5e79203669312d100000000036fa40307b3a439538195245b0de56a2c1db6ba3a64f8bdd2071d00bc48c841b5e77b98e5c7d6f06f92dec5cf6d61277ecb9a0342406f49f34c51ee8ce4abd678038129485de14238bd1ca12cd2de12ff0e383aee542d90437cd664ce139446a00000000002000000d2ec7dfeb7e8f43fe77aba3368df95ac2088034420402730ee0492a2084217083411b3fc91033bfdeea339bc11b9efc986e161c703e07a9045338c165673f09940fb11548b54021b58cc9ae50601000000010000000000000000000000000000000000000000000000000000000000000000ffffffff0d0389aa050101062f503253482fffffffff010066f33caf050000232102b73438165461b826b30a46078f211aa005d1e7e430b1e0ed461678a5fe516c73ac000000000100000001ef2e86aa5f027e13d7fc1f0bd4a1fc677d698e42850680634ccd1834668ff320010000006b483045022100fcf5dc43afa85978a71e76a9f4c11cd6bf2a7d5677212f9001ad085d420a5d3a022068982e1e53e94fc6007cf8b60ff3919bcaf7f0b70fefb79112cb840777d8c7cf0121022b050b740dd02c1b4e1e7cdbffe6d836d987c9db4c4db734b58526f08942193bffffffff02004e7253000000001976a91435cb1f77e88e96fb3094d84e8d3b7789a092636d88ac00d4b7e8b00700001976a9146ca1f634daa4efc7871abab945c7cefd282b481f88ac0000000001000000010a6c24bbc92fd0ec32bb5b0a051c44eba0c1325f0b24d9523c109f8bb1281f49000000006a4730440220608577619fb3a0b826f09df5663ffbf121c8e0164f43b73d9affe2f9e4576bd0022040782c9a7df0a20afe1a7e3578bf27e1331c862253af21ced4fde5ef1b44b787012103e4f91ad831a87cc532249944bc7138a355f7d0aac25dc4737a8701181ce680a5ffffffff010019813f0d0000001976a91481db1aa49ebc6a71cad96949eb28e22af85eb0bd88ac0000000001000000017b82db0f644ecff378217d9b8dc0de8817eaf85ceefacab23bf344e2e495dca5010000006b483045022100f07ced6bfdbd6cdeb8b2c8fc92b9803f5798754b5b6c454c8f084198bea303f402205616f84d7ec882af9c34a3fd2457ca3fb81ec5a463a963a6e684edee427d4525012102c056b10494520dbd7b37e2e6bb8f72f98d73a609a926901221bfb114fa1d5a80ffffffff02f0501a22000000001976a914ca63ded8b23d0252158a3bdc816747ef89fb438988ac80b65ea1350700001976a914fb26a7c16ace531a8e7bbd925e46c67c3150c1c888ac000000000100000001c9bdba900e1579ebf4e44415fe8b9abec57a763f8c70a30604bea7fbe7c55d42000000006a47304402204ccbeeace0630e72102fdaf0836e41f8f6dcdde6a178f0fbc2d96a4d17a1df8f02207e4a91203a2abd87fdddee96510482ef96535741b6c17a1acae93c977ad248e5012103e0747583a342b76a5de9c21db138b9640d49b4f3b67a306d3b3f217416d49b55ffffffff020058850c020000001976a9144417c63a91208a02a5f46a0f7a2b806adc7d19a788ac0042dc06030000001976a9147b61c5adef0d559e5acf2901c2989294624b651988ac0000000001000000017c1423b198dfc3da37ae9a5fc11a3720e4343b3049d3b289b8285eb04595c04b000000006b483045022100b0c1cb9608bf644d7a8916bf61f36ced95bd045e97612804ca774f60e05e7bde022017c12255eecc474c8d8b05d0910013b2df8703af68212cf0962b6b8ee0e101ee01210341e154088c23b8ea943bca94c1d4f65361668a242b168522f00199365414b46affffffff01019891ad000000001976a91481db1aa49ebc6a71cad96949eb28e22af85eb0bd88ac00000000"
}

18
tests/blocks/litecoin_mainnet_900000.json

@ -0,0 +1,18 @@
{
"hash": "545127eacc261629ae25ada99c7aadc1a929aed2da32f95ef866333f37c11e49",
"size": 1132,
"height": 900000,
"merkleroot": "11929e3e325f6346e9d24c0373dafbafcaaa7837aa862f33b7c529d457ca1229",
"tx": [
"ad21fe3e94fd3da9a0920ed2fd112f7c805ac1b80274f4d999da3d2a5c6bd733",
"ea3b27388e968c413ef6af47be2843d649979e9b721331f593287b8d486be230",
"3b6b555a86471c5e5ee3d07838df04a6802f83b6f37c79922b86ef1983262d5e",
"026f93ffe84775b6c42b660944d25f7224c31b1175db837b664db32cd42e2300",
"7c274e298aa6feae7a0590dffca92d31b1f5f3697b26c6ceb477efc43f0afe39"
],
"time": 1449526456,
"nonce": 685998084,
"bits": "1b014ec5",
"previousblockhash": "93819e801bbdaec2698e3dda35e12be0a0004759c635924fda7f007a358848be",
"block": "03000000be4888357a007fda4f9235c6594700a0e02be135da3d8e69c2aebd1b809e81932912ca57d429c5b7332f86aa3778aacaaffbda73034cd2e946635f323e9e9211b8046656c54e011b0480e3280501000000010000000000000000000000000000000000000000000000000000000000000000ffffffff6403a0bb0de4b883e5bda9e7a59ee4bb99e9b1bcfabe6d6d227c509f30b3ac49948323ce5974f89f6261ed913a701fc137bc08ead15179b940000000f09f909f4d696e6564206279206c6a6a38380000000000000000000000000000000000000000000000510000000176fa0795000000001976a914aa3750aa18b8a0f3f0590731e1fab934856680cf88ac5aa5893301000000015992d44c8d8790727c91055ce305e115373ff7fe32d632edc3f9939914b7d810000000006a47304402203ea789e265999b19b2155e4eb6135a50773d45836e1abb00a4959126c323e25d02207db24c9069683a6e4fc850a700717da08bf2c3ea80e8f3ee1ac75c1b702198800121033567eb9b5281b320bd8f20718b205e1808e7c0432d41991bdfad3eb5b53c49f9ffffffff02427e8837000000001976a914bc8d35412e239d91f9c95548afa15e22f094be3688ac0027b929000000001976a914b5e82238517f926b14467fbf8f90812b0eec8e5288ac000000000100000001ad3d610da30df966af2407b45bf0236a782f1e4444b829bf59da1679ceb16733000000006a47304402206b32468586635a1965fbb1c186799f1ccfce13549bd098845b97e75ea8bff473022021f35faf6e67428d51e58ed1895f9db2d40337d04e1b8819154c2bc71b0446af012102a740669302896fc4bdba32a951a67f95b3369fbc2ac97f1fda559999866d623bffffffff0245781300000000001976a9146f67216770c0af807e0597896a8c8ec306994e7b88ac80841e00000000001976a914b5e8223ec1e89b386cb5beb1c30cf165ac84e46388ac000000000100000001520f304eec49a1a9eeb0682da600b436a8dd43efc97ff4ed6ac2bcf0912e5caa000000006a473044022040218475e180db66cf71aa56668145b4f4d4d0a93b0e3777985039d87a53f881022047aaef5b4e262365c2dd2d7e1cbdf3016ff22468faef6104e4397540c199dfc6012103418a46f4534e7ec8a98146da6431550c370069777cacfdfbccc7a01f31abd1d0ffffffff02505bd425000000001976a9149f74e62f0f92663525050b56ad8b180048b4e80488ac408d1c1b000000001976a9149f7044d46304c187dc08d05864aeccb5a044e45588ac00000000010000000139c9bb7efca3fdd77ae18adf87614827d1c0bb1803a0d50ae42342e524ca99b7000000006a47304402205b75fd27c33c89346bc778d1369549b27f41ed0ded4947a19fb2884363a8ee7502206672bb1bd4e4a2a89cba62d1c5a93e1a6ae042f379e57380aebf14a693b42bea0121024f5b70c3309c77762c1b487f804c9666f5302545d7555d1808b63fdc9c17f840ffffffff01f3247d00000000001976a9149a20d4f533a7d7670cf14c77107dfd1eefddbd5388ac00000000"
}

14
tests/blocks/namecoin_mainnet_19200.json

@ -0,0 +1,14 @@
{
"hash": "d8a7c3e01e1e95bcee015e6fcc7583a2ca60b79e5a3aa0a171eddd344ada903d",
"size": 678,
"height": 19200,
"merkleroot": "88afdfdcc78f778f701835b62e432d3ba7d55b3e59ac4e7cab08d6bc49655c0f",
"tx": [
"88afdfdcc78f778f701835b62e432d3ba7d55b3e59ac4e7cab08d6bc49655c0f"
],
"time": 1318066829,
"nonce": 0,
"bits": "1b00b269",
"previousblockhash": "000000000000b19f0ad5cd46859fe8c9662e8828d8a75ff6da73167ac09a9036",
"block": "0101010036909ac07a1673daf65fa7d828882e66c9e89f8546cdd50a9fb10000000000000f5c6549bcd608ab7c4eac593e5bd5a73b2d432eb63518708f778fc7dcdfaf888d1a904e69b2001b0000000001000000010000000000000000000000000000000000000000000000000000000000000000ffffffff35045dee091a014d522cfabe6d6dd8a7c3e01e1e95bcee015e6fcc7583a2ca60b79e5a3aa0a171eddd344ada903d0100000000000000ffffffff0160a0102a01000000434104f8bbe97ed2acbc5bba11c68f6f1a0313f918f3d3c0e8475055e351e3bf442f8c8dcee682d2457bdc5351b70dd9e34026766eba18b06eaee2e102efd1ab634667ac00000000a903ef9de1918e4b44f6176a30c0e7c7e3439c96fb597327473d00000000000005050ac4a1a1e1bce0c48e555b1a9f935281968c72d6379b24729ca0425a3fc3cb433cd348b35ea22806cf21c7b146489aef6989551eb5ad2373ab6121060f30341d648757c0217d43e66c57eaed64fc1820ec65d157f33b741965183a5e0c8506ac2602dfe2f547012d1cc75004d48f97aba46bd9930ff285c9f276f5bd09f356df19724579d65ec7cb62bf97946dfc6fb0e3b2839b7fdab37cdb60e55122d35b0000000000000000000100000008be13295c03e67cb70d00dae81ea06e78b9014e5ceb7d9ba504000000000000e0fd42db8ef6d783f079d126bea12e2d10c104c0927cd68f954d856f9e8111e59a23904e5dee091a1c6550860101000000010000000000000000000000000000000000000000000000000000000000000000ffffffff080469b2001b010152ffffffff0100f2052a0100000043410489fe91e62847575c98deeab020f65fdff17a3a870ebb05820b414f3d8097218ec9a65f1e0ae0ac35af7247bd79ed1f2a24675fffb5aa6f9620e1920ad4bf5aa6ac00000000"
}

15
tests/blocks/namecoin_mainnet_19204.json

@ -0,0 +1,15 @@
{
"hash": "000000000000122ff239e71146bf57aee28ad913931d672cd124255e91351660",
"size": 475,
"height": 19204,
"merkleroot": "45d5bc5330dad21dd4dcf0daadefef4ba826fe81e2dd2fa38a4a49ea06c97b1d",
"tx": [
"7752b6a596641bd90ae71d1bc054f3dd1ad36ce3fe7e7d3292ff8594feafb8a9",
"499dad7cd9e737c9f2f10bc4b3930b566d82288a8c02b68a50d2cf2694868bdd"
],
"time": 1318073606,
"nonce": 3373003561,
"bits": "1b00b269",
"previousblockhash": "07d6d85d2f33fae0b52d84a90757d1600fdb3f2cf2f31f2a32eef59172245af6",
"block": "01000100f65a247291f5ee322a1ff3f22c3fdb0f60d15707a9842db5e0fa332f5dd8d6071d7bc906ea494a8aa32fdde281fe26a84befefaddaf0dcd41dd2da3053bcd5450635904e69b2001b29f30bc90201000000010000000000000000000000000000000000000000000000000000000000000000ffffffff080469b2001b027829ffffffff0100f2052a0100000043410439cf5bc2e4b0d555178b3d19fa82d59aa998cc082086f874928af6e70c1093b300b6371d093ac9d41393e11907ed17d2489405e220a6f08feeecbce9f6cfcc0bac00000000010000000148efa1ba7512bbd538033b798da1064e724e21739f6bd8ea0c28e3d0d53136d6010000008c49304602210095cf1a495623ed7794746b7b0f2daa70a9783f635e24991487e8a6869b553c9b0221009dec7919115c3a84f03236c8aea6e175e8dd9ee3a6daa1f6c56ac1d6246ec5da014104d483cffe3907aefdb9a20dab73dd4c83f6d14d26bd9d21aeccd33b0be2068e4832fea66110606198728413ad88a6dd642bdd6ff72aefd79732a2375c3129f1fcffffffff0220ab6136000000001976a9141b5a80636dfa8c4e78c1d1150a2ba961d704911388ac0065cd1d000000001976a9143f47c122f3a71e70cb3a4c9d215e5ce7b740b96a88ac00000000"
}

15
tests/blocks/reddcoin_mainnet_1200000.json

@ -0,0 +1,15 @@
{
"hash": "bea68724bfcdc5d35bf9bbf9cb6680e196e5661afe95b2a205e74a2fe175ac79",
"size": 443,
"height": 1200000,
"merkleroot": "504b073c16d872d24f2c3de8a4c2c76d08df5056f3a4a8d0e32ff4220215a250",
"tx": [
"6aaad9725ae7beb40d80dac9c5300a8e1cf8783adb0ea41da4988b3476bda9b8",
"4a949402995c11b3306c0c91fd85edf0d3eb8dee4bf6bd07a241fa170156cd3c"
],
"time": 1463612841,
"nonce": 0,
"bits": "1c0a4691",
"previousblockhash": "438b564171da6fbbe6fd9d52c16ea2b1aa8c169951822225cf097d5da7cdba76",
"block": "0300000076bacda75d7d09cf2522825199168caab1a26ec1529dfde6bb6fda7141568b4350a2150222f42fe3d0a8a4f35650df086dc7c2a4e83d2c4fd272d8163c074b50a9f53c5791460a1c000000000202000000010000000000000000000000000000000000000000000000000000000000000000ffffffff020000ffffffff0100000000000000000000000000a9f53c570200000001a40cad8a9afe2888f746d762cb36649b5afd4e8ce4468fd8d08fc296d26dc4840100000048473044022036392ee6eb58c5a9a2a681692cabdc2b00166c374cfb711055bc2c4d6c61a1d40220475728eed260bf972ef44909f0d6fa282f17e92b5e57ee383c7171e8a3baee1f01ffffffff030000000000000000000056b12a38720000232102bee8ce24a99260fbb6c10f0b904498fa71ec08e51b531878d3f6568ef09acb91ac0ad6b22a38720000232102bee8ce24a99260fbb6c10f0b904498fa71ec08e51b531878d3f6568ef09acb91ac00000000a9f53c57473045022100fe801bae06c9db3076fad2f72930f76dbe1cae29a162447b13d0df749e5913df02203621013f87da4dbca08702d8c7975f702bad9df40902038b93e622a0dd9c0896"
}

20
tests/blocks/reddcoin_mainnet_8000.json

@ -0,0 +1,20 @@
{
"hash": "4889bb7d1ba24cc66c2d903f6643b0ade243aca5101a8aff87ab4c2ab2a15ec5",
"size": 1560,
"height": 80000,
"merkleroot": "193313cfa4d8a4bc15fb5526b69a87c922e0f6520295f66165358f0af6b5d637",
"tx": [
"ad01e368a301b855d5f4499bc787b161428d6994c4847c0b2813950630a73950",
"1799481d7fed61c029159d314f75f3d6f69a7f8c237443470394085307802782",
"8db4b2c62fca987462c482d24ce0b78d2a3dd3928d5d99112ccad75deb6ff7de",
"ab0a1e66e54c737be6ea2be2c61cd55879d33c0fc5d35aa6389487e06c809cfc",
"1bb3854ed7fe9905b5637d405cd0715e5cb6f5fe233304a1588c53bdcf60f593",
"08d3ccf77f30e62d8773669adea730698806516239933ac7c4285bcacdb37989",
"19cbdc4acfb07dc29c73f039d8f5db967ce30c0667fda60babc700a7c53c0b5f"
],
"time": 1396181239,
"nonce": 1368399360,
"bits": "1c0cc111",
"previousblockhash": "e34cfbf84095c64276ee098de50c3a1f863df9336968e9fb8a973bdd52e3ed04",
"block": "0200000004ede352dd3b978afbe9686933f93d861f3a0ce58d09ee7642c69540f8fb4ce337d6b5f60a8f356561f6950252f6e022c9879ab62655fb15bca4d8a4cf133319f708385311c10c1c001e90510701000000010000000000000000000000000000000000000000000000000000000000000000ffffffff2703803801062f503253482f04f608385308f800f159010000000d2f7374726174756d506f6f6c2f000000000100e63572180900001976a9148d41bc27ab2cc999338750edd4b0012bdb36f70288ac0000000001000000014b1a8085e29ca8ef2bf1de5f0637c6ef597b67223a087458e51e21168a0e44a3000000006b48304502200b781c255481e90f0e1d2fedbc1ffb42562434c324566444da8718a8a2c5182d022100f50faa7a9f7b90b4b805050c9731a79fb9c599ddfb3d84449d0cff7ee216bf59012103d7ab8ea88d09589410bdb93cd466d92f56985a3cff6d74dce3f033500135f0c5ffffffff02d72ea96581330e001976a91422758e8f790ea0e4ab87ad3990e8af86c77375c088ac1c1cab70190000001976a91434e880ed4cb32ebb1e0842b4f05efe562724f08788ac000000000100000001616c5b1a7ee823fa2d5347011b34e1ea027f9494823d37fb175eece8f852f987000000006a473044022000be9cf6677d879d170c597b8a465137577119ebc7d01773dc13df7af7e0bf1102202acfce90f478c0d179ab98d708f1e24f6dab4fe60c75893f8bad12991b30f41301210355dad820f63f1c315dc16c5afd9782e4d0b225ea29320a85576bc2c82fde6e7effffffff02ceb618fa97ac10001976a914e14548bfd2e14e0cabaf535c7c80a227238b35e188ac1c1cab70190000001976a914d2046a1ad1dbc32e69dae4da0a8730379105936e88ac000000000100000001a6b3081431b43c3247df88b3b6d123d2f2d7ba2095c6ef4f6532feb2c45f9210010000006b4830450221008fb902cc4130bae26439c47c13467a7d8a8c52ac2d88a200548f1e8f8b100b910220125b45cee0765389a59d4cca65482bdf79d3bc8fdaa5a0142e7829e4a2568124012103cdece1576249c8e05fb0aa2cbe61aa959330ff2f9e3c5cd2e5152e90650d9386ffffffff02bbba56d0d88606001976a91407499b20688a0b61b4a526681647de739dab818e88ac1c1cab70190000001976a9147085556af12556138277188e3958a869eeced02088ac000000000100000001fc9c806ce0879438a65ad3c50f3cd37958d51cc6e22beae67b734ce5661e0aab000000006c493046022100dca959b02a4dde588b3e5c3e71877797b97d7094a82cdd6b6b52c3d04a8c17c3022100938b2f70eed007d20ef9d7d055fc9b8785e71e3f0981558503fb3635b08aa6d40121039d216b71bad34246ceff262afe6df520761fc696fd9862c3f2f7e337ad93d881ffffffff0202386cc4f57e06001976a914ee343e816e6782262c3f6b1b9ec8f8c17d47a88c88acb9a1f405e30700001976a914ba81e33df7ba3d18728c6c206f8ad0b30b83b71988ac00000000010000000193f560cfbd538c58a1043323fef5b65c5e71d05c407d63b50599fed74e85b31b000000006a4730440220153f0a0a16e13943c4869e8f768c64e9f1844d14823f80878a6e44752a041c49022036ec13a307bafee74387048c3772cfb5ebdc138d70d6b4c256788a86db93ab5801210281232e155b37ebd64759ee4983962e9f8ccfd95e302d828de1406549e7c327a4ffffffff029014fad0166506001976a914b05959ea5dd831fd082488298466c9307a46f55b88ac72427cedde1900001976a914c2e3e90990f452c19ccef5df1cc3711c2e5d448288ac0000000001000000018979b3cdca5b28c4c73a9339625106886930a7de9a6673872de6307ff7ccd308000000006b483045022100ec50258bfec642e6c986192f338b7a1eec84c872d9b51ccc6f1c7329da20af77022047a6836d7c5f416c2eef6ef59fae9cc627ff80882897fe3eabd775e2a4a08533012102240bb70ae679cb25d60e2e0f90f98017eac7b6abbf1e00797ef930f02f0b98eeffffffff029e75ab66cd6306001976a9144c9ef3b178febefc62a0067e67e8434afe864a6788acf2bd5864490100001976a9143cde6d950e730b199c5857564afe7f222e139ead88ac00000000"
}

15
tests/blocks/zcash_mainnet_1000.json

File diff suppressed because one or more lines are too long

10
tests/test_addresses.py → tests/lib/test_addresses.py

@ -26,7 +26,7 @@
import pytest
from lib.coins import Litecoin, Bitcoin
from lib.coins import Litecoin, Bitcoin, Zcash
from lib.hash import Base58
addresses = [
@ -36,7 +36,11 @@ addresses = [
"a773db925b09add367dcc253c1f9bbc1d11ec6fd", "062d8515e50cb92b8a3a73"),
(Litecoin, "LNBAaWuZmipg29WXfz5dtAm1pjo8FEH8yg",
"206168f5322583ff37f8e55665a4789ae8963532", "b8cb80b26e8932f5b12a7e"),
(Litecoin, "3GxRZWkJufR5XA8hnNJgQ2gkASSheoBcmW",
(Litecoin, "MPAZsQAGrnGWKfQbtFJ2Dfw9V939e7D3E2",
"a773db925b09add367dcc253c1f9bbc1d11ec6fd", "062d8515e50cb92b8a3a73"),
(Zcash, "t1LppKe1sfPNDMysGSGuTjxoAsBcvvSYv5j",
"206168f5322583ff37f8e55665a4789ae8963532", "b8cb80b26e8932f5b12a7e"),
(Zcash, "t3Zq2ZrASszCg7oBbio7oXqnfR6dnSWqo76",
"a773db925b09add367dcc253c1f9bbc1d11ec6fd", "062d8515e50cb92b8a3a73"),
]
@ -60,7 +64,7 @@ def test_address_from_hash160(address):
verbyte, hash_bytes = raw[:verlen], raw[verlen:]
if coin.P2PKH_VERBYTE == verbyte:
assert coin.P2PKH_address_from_hash160(bytes.fromhex(hash)) == addr
elif coin.P2SH_VERBYTE == verbyte:
elif verbyte in coin.P2SH_VERBYTES:
assert coin.P2SH_address_from_hash160(bytes.fromhex(hash)) == addr
else:
raise Exception("Unknown version byte")

68
tests/lib/test_hash.py

@ -0,0 +1,68 @@
#
# Tests of lib/hash.py
#
import pytest
import lib.hash as lib_hash
def test_sha256():
assert lib_hash.sha256(b'sha256') == b'][\t\xf6\xdc\xb2\xd5:_\xff\xc6\x0cJ\xc0\xd5_\xab\xdfU`i\xd6c\x15E\xf4*\xa6\xe3P\x0f.'
with pytest.raises(TypeError):
lib_hash.sha256('sha256')
def ripemd160(x):
assert lib_hash.ripemd160(b'ripemd160') == b'\x903\x91\xa1\xc0I\x9e\xc8\xdf\xb5\x1aSK\xa5VW\xf9|W\xd5'
with pytest.raises(TypeError):
lib_hash.ripemd160('ripemd160')
def test_double_sha256():
assert lib_hash.double_sha256(b'double_sha256') == b'ksn\x8e\xb7\xb9\x0f\xf6\xd9\xad\x88\xd9#\xa1\xbcU(j1Bx\xce\xd5;s\xectL\xe7\xc5\xb4\x00'
def test_hmac_sha512():
assert lib_hash.hmac_sha512(b'key', b'message') == b"\xe4w8M|\xa2)\xdd\x14&\xe6Kc\xeb\xf2\xd3n\xbdm~f\x9ag5BNr\xeal\x01\xd3\xf8\xb5n\xb3\x9c6\xd8#/T'\x99\x9b\x8d\x1a?\x9c\xd1\x12\x8f\xc6\x9fMu\xb44!h\x10\xfa6~\x98"
def test_hash160():
assert lib_hash.hash160(b'hash_160') == b'\xb3\x96\x94\xfc\x978R\xa7)XqY\xbb\xdc\xeb\xac\xa7%\xb8$'
def test_hash_to_hex_str():
assert lib_hash.hash_to_hex_str(b'hash_to_str') == '7274735f6f745f68736168'
def test_hex_str_to_hash():
assert lib_hash.hex_str_to_hash('7274735f6f745f68736168') == b'hash_to_str'
def test_Base58_char_value():
chars = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
for value, c in enumerate(chars):
assert lib_hash.Base58.char_value(c) == value
for c in (' ', 'I', '0', 'l', 'O'):
with pytest.raises(lib_hash.Base58Error):
lib_hash.Base58.char_value(c)
def test_Base58_decode():
with pytest.raises(TypeError):
lib_hash.Base58.decode(b'foo')
with pytest.raises(lib_hash.Base58Error):
lib_hash.Base58.decode('')
assert lib_hash.Base58.decode('123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz') == b'\x00\x01\x11\xd3\x8e_\xc9\x07\x1f\xfc\xd2\x0bJv<\xc9\xaeO%+\xb4\xe4\x8f\xd6j\x83^%*\xda\x93\xffH\rm\xd4=\xc6*d\x11U\xa5'
assert lib_hash.Base58.decode('3i37NcgooY8f1S') == b'0123456789'
def test_Base58_encode():
with pytest.raises(TypeError):
lib_hash.Base58.encode('foo')
assert lib_hash.Base58.encode(b'') == ''
assert lib_hash.Base58.encode(b'\0') == '1'
assert lib_hash.Base58.encode(b'0123456789') == '3i37NcgooY8f1S'
def test_Base58_decode_check():
with pytest.raises(TypeError):
lib_hash.Base58.decode_check(b'foo')
assert lib_hash.Base58.decode_check('4t9WKfuAB8') == b'foo'
with pytest.raises(lib_hash.Base58Error):
lib_hash.Base58.decode_check('4t9WKfuAB9')
def test_Base58_encode_check():
with pytest.raises(TypeError):
lib_hash.Base58.encode_check('foo')
assert lib_hash.Base58.encode_check(b'foo') == '4t9WKfuAB8'

27
tests/test_util.py → tests/lib/test_util.py

@ -56,3 +56,30 @@ def test_increment_byte_string():
assert util.increment_byte_string(b'1') == b'2'
assert util.increment_byte_string(b'\x01\x01') == b'\x01\x02'
assert util.increment_byte_string(b'\xff\xff') is None
def test_is_valid_hostname():
is_valid_hostname = util.is_valid_hostname
assert not is_valid_hostname('')
assert is_valid_hostname('a')
assert is_valid_hostname('_')
# Hyphens
assert not is_valid_hostname('-b')
assert not is_valid_hostname('a.-b')
assert is_valid_hostname('a-b')
assert not is_valid_hostname('b-')
assert not is_valid_hostname('b-.c')
# Dots
assert is_valid_hostname('a.')
assert is_valid_hostname('foo1.Foo')
assert not is_valid_hostname('foo1..Foo')
assert is_valid_hostname('12Foo.Bar.Bax_')
assert is_valid_hostname('12Foo.Bar.Baz_12')
# 63 octets in part
assert is_valid_hostname('a.abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMN'
'OPQRSTUVWXYZ0123456789_.bar')
# Over 63 octets in part
assert not is_valid_hostname('a.abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMN'
'OPQRSTUVWXYZ0123456789_1.bar')
len255 = ('a' * 62 + '.') * 4 + 'abc'
assert is_valid_hostname(len255)
assert not is_valid_hostname(len255 + 'd')

131
tests/server/test_compaction.py

@ -0,0 +1,131 @@
# Test of compaction code in server/db.py
import array
from collections import defaultdict
from os import environ, urandom
from struct import pack
import random
from lib.hash import hash_to_str
from server.env import Env
from server.db import DB
def create_histories(db, hashX_count=100):
'''Creates a bunch of random transaction histories, and write them
to disk in a series of small flushes.'''
hashXs = [urandom(db.coin.HASHX_LEN) for n in range(hashX_count)]
mk_array = lambda : array.array('I')
histories = {hashX : mk_array() for hashX in hashXs}
this_history = defaultdict(mk_array)
tx_num = 0
while hashXs:
hash_indexes = set(random.randrange(len(hashXs))
for n in range(1 + random.randrange(4)))
for index in hash_indexes:
histories[hashXs[index]].append(tx_num)
this_history[hashXs[index]].append(tx_num)
tx_num += 1
# Occasionally flush and drop a random hashX if non-empty
if random.random() < 0.1:
db.flush_history(this_history)
this_history.clear()
index = random.randrange(0, len(hashXs))
if histories[hashXs[index]]:
del hashXs[index]
return histories
def check_hashX_compaction(db):
db.max_hist_row_entries = 40
row_size = db.max_hist_row_entries * 4
full_hist = array.array('I', range(100)).tobytes()
hashX = urandom(db.coin.HASHX_LEN)
pairs = ((1, 20), (26, 50), (56, 30))
cum = 0
hist_list = []
hist_map = {}
for flush_count, count in pairs:
key = hashX + pack('>H', flush_count)
hist = full_hist[cum * 4: (cum+count) * 4]
hist_map[key] = hist
hist_list.append(hist)
cum += count
write_items = []
keys_to_delete = set()
write_size = db._compact_hashX(hashX, hist_map, hist_list,
write_items, keys_to_delete)
# Check results for sanity
assert write_size == len(full_hist)
assert len(write_items) == 3
assert len(keys_to_delete) == 3
assert len(hist_map) == len(pairs)
for n, item in enumerate(write_items):
assert item == (hashX + pack('>H', n),
full_hist[n * row_size: (n + 1) * row_size])
for flush_count, count in pairs:
assert hashX + pack('>H', flush_count) in keys_to_delete
# Check re-compaction is null
hist_map = {key: value for key, value in write_items}
hist_list = [value for key, value in write_items]
write_items.clear()
keys_to_delete.clear()
write_size = db._compact_hashX(hashX, hist_map, hist_list,
write_items, keys_to_delete)
assert write_size == 0
assert len(write_items) == 0
assert len(keys_to_delete) == 0
assert len(hist_map) == len(pairs)
# Check re-compaction adding a single tx writes the one row
hist_list[-1] += array.array('I', [100]).tobytes()
write_size = db._compact_hashX(hashX, hist_map, hist_list,
write_items, keys_to_delete)
assert write_size == len(hist_list[-1])
assert write_items == [(hashX + pack('>H', 2), hist_list[-1])]
assert len(keys_to_delete) == 1
assert write_items[0][0] in keys_to_delete
assert len(hist_map) == len(pairs)
def check_written(db, histories):
for hashX, hist in histories.items():
db_hist = array.array('I', db.get_history_txnums(hashX, limit=None))
assert hist == db_hist
def compact_history(db):
'''Synchronously compact the DB history.'''
db.first_sync = False
db.comp_cursor = 0
db.comp_flush_count = max(db.comp_flush_count, 1)
limit = 5 * 1000
write_size = 0
while db.comp_cursor != -1:
write_size += db._compact_history(limit)
assert write_size != 0
def run_test(db_dir):
environ.clear()
environ['DB_DIRECTORY'] = db_dir
environ['DAEMON_URL'] = ''
env = Env()
db = DB(env)
# Test abstract compaction
check_hashX_compaction(db)
# Now test in with random data
histories = create_histories(db)
check_written(db, histories)
compact_history(db)
check_written(db, histories)
def test_compaction(tmpdir):
db_dir = str(tmpdir)
print('Temp dir: {}'.format(db_dir))
run_test(db_dir)

282
tests/server/test_env.py

@ -0,0 +1,282 @@
# Tests of server/env.py
import os
import random
import pytest
from server.env import Env, NetIdentity
import lib.coins as lib_coins
BASE_DAEMON_URL = 'http://username:password@hostname:321/'
BASE_DB_DIR = '/some/dir'
base_environ = {
'DB_DIRECTORY': BASE_DB_DIR,
'DAEMON_URL': BASE_DAEMON_URL,
}
def setup_base_env():
os.environ.clear()
os.environ.update(base_environ)
def assert_required(env_var):
setup_base_env()
os.environ.pop(env_var, None)
with pytest.raises(Env.Error):
Env()
def assert_default(env_var, attr, default):
setup_base_env()
e = Env()
assert getattr(e, attr) == default
os.environ[env_var] = 'foo'
e = Env()
assert getattr(e, attr) == 'foo'
def assert_integer(env_var, attr, default=''):
if default != '':
e = Env()
assert getattr(e, attr) == default
value = random.randrange(5, 2000)
os.environ[env_var] = str(value) + '.1'
with pytest.raises(Env.Error):
Env()
os.environ[env_var] = str(value)
e = Env()
assert getattr(e, attr) == value
def assert_boolean(env_var, attr, default):
e = Env()
assert getattr(e, attr) == default
os.environ[env_var] = 'foo'
e = Env()
assert getattr(e, attr) == True
os.environ[env_var] = ''
e = Env()
assert getattr(e, attr) == False
def test_minimal():
setup_base_env()
Env()
def test_DB_DIRECTORY():
assert_required('DB_DIRECTORY')
setup_base_env()
e = Env()
assert e.db_dir == BASE_DB_DIR
def test_DAEMON_URL():
assert_required('DAEMON_URL')
setup_base_env()
e = Env()
assert e.daemon_url == BASE_DAEMON_URL
def test_COIN_NET():
'''Test COIN and NET defaults and redirection.'''
setup_base_env()
e = Env()
assert e.coin == lib_coins.Bitcoin
os.environ['NET'] = 'testnet'
e = Env()
assert e.coin == lib_coins.BitcoinTestnet
os.environ.pop('NET')
os.environ['COIN'] = 'Litecoin'
e = Env()
assert e.coin == lib_coins.Litecoin
os.environ['NET'] = 'testnet'
e = Env()
assert e.coin == lib_coins.LitecoinTestnet
def test_CACHE_MB():
assert_integer('CACHE_MB', 'cache_MB', 1200)
def test_HOST():
assert_default('HOST', 'host', 'localhost')
def test_REORG_LIMIT():
assert_integer('REORG_LIMIT', 'reorg_limit', lib_coins.Bitcoin.REORG_LIMIT)
def test_TCP_PORT():
assert_integer('TCP_PORT', 'tcp_port', None)
def test_SSL_PORT():
# Requires both SSL_CERTFILE and SSL_KEYFILE to be set
os.environ['SSL_PORT'] = '50002'
os.environ['SSL_CERTFILE'] = 'certfile'
with pytest.raises(Env.Error):
Env()
os.environ.pop('SSL_CERTFILE')
os.environ['SSL_KEYFILE'] = 'keyfile'
with pytest.raises(Env.Error):
Env()
os.environ['SSL_CERTFILE'] = 'certfile'
os.environ.pop('SSL_PORT')
assert_integer('SSL_PORT', 'ssl_port', None)
def test_RPC_PORT():
assert_integer('RPC_PORT', 'rpc_port', 8000)
def test_MAX_SUBSCRIPTIONS():
assert_integer('MAX_SUBSCRIPTIONS', 'max_subscriptions', 10000)
def test_LOG_SESSIONS():
assert_integer('LOG_SESSIONS', 'log_sessions', 3600)
def test_DONATION_ADDRESS():
assert_default('DONATION_ADDRESS', 'donation_address', '')
def test_DB_ENGINE():
assert_default('DB_ENGINE', 'db_engine', 'leveldb')
def test_MAX_SEND():
assert_integer('MAX_SEND', 'max_send', 1000000)
def test_MAX_SUBS():
assert_integer('MAX_SUBS', 'max_subs', 250000)
def test_MAX_SESSION_SUBS():
assert_integer('MAX_SESSION_SUBS', 'max_session_subs', 50000)
def test_BANDWIDTH_LIMIT():
assert_integer('BANDWIDTH_LIMIT', 'bandwidth_limit', 2000000)
def test_SESSION_TIMEOUT():
assert_integer('SESSION_TIMEOUT', 'session_timeout', 600)
def test_BANNER_FILE():
e = Env()
assert e.banner_file is None
assert e.tor_banner_file is None
os.environ['BANNER_FILE'] = 'banner_file'
e = Env()
assert e.banner_file == 'banner_file'
assert e.tor_banner_file == 'banner_file'
os.environ['TOR_BANNER_FILE'] = 'tor_banner_file'
e = Env()
assert e.banner_file == 'banner_file'
assert e.tor_banner_file == 'tor_banner_file'
def test_ANON_LOGS():
assert_boolean('ANON_LOGS', 'anon_logs', False)
def test_PEER_DISCOVERY():
assert_boolean('PEER_DISCOVERY', 'peer_discovery', True)
def test_PEER_ANNOUNCE():
assert_boolean('PEER_ANNOUNCE', 'peer_announce', True)
def test_FORCE_PROXY():
assert_boolean('FORCE_PROXY', 'force_proxy', False)
def test_TOR_PROXY_HOST():
assert_default('TOR_PROXY_HOST', 'tor_proxy_host', 'localhost')
def test_TOR_PROXY_PORT():
assert_integer('TOR_PROXY_PORT', 'tor_proxy_port', None)
def test_IRC():
assert_boolean('IRC', 'irc', False)
def test_IRC_NICK():
assert_default('IRC_NICK', 'irc_nick', None)
def test_clearnet_identity():
os.environ['REPORT_TCP_PORT'] = '456'
e = Env()
assert len(e.identities) == 0
os.environ['REPORT_HOST'] = '8.8.8.8'
e = Env()
assert len(e.identities) == 1
assert e.identities[0].host == '8.8.8.8'
os.environ['REPORT_HOST'] = 'localhost'
with pytest.raises(Env.Error):
Env()
os.environ['REPORT_HOST'] = ''
with pytest.raises(Env.Error):
Env()
os.environ['REPORT_HOST'] = '127.0.0.1'
with pytest.raises(Env.Error):
Env()
os.environ['REPORT_HOST'] = '0.0.0.0'
with pytest.raises(Env.Error):
Env()
os.environ['REPORT_HOST'] = '224.0.0.2'
with pytest.raises(Env.Error):
Env()
os.environ['REPORT_HOST'] = '$HOST'
with pytest.raises(Env.Error):
Env()
# Accept private IP, unless IRC or PEER_ANNOUNCE
os.environ.pop('IRC', None)
os.environ['PEER_ANNOUNCE'] = ''
os.environ['REPORT_HOST'] = '192.168.0.1'
os.environ['SSL_CERTFILE'] = 'certfile'
os.environ['SSL_KEYFILE'] = 'keyfile'
Env()
os.environ['IRC'] = 'OK'
with pytest.raises(Env.Error):
Env()
os.environ.pop('IRC', None)
os.environ['PEER_ANNOUNCE'] = 'OK'
with pytest.raises(Env.Error):
Env()
os.environ['REPORT_SSL_PORT'] = os.environ['REPORT_TCP_PORT']
with pytest.raises(Env.Error):
Env()
os.environ['REPORT_SSL_PORT'] = '457'
os.environ['REPORT_HOST'] = 'foo.com'
e = Env()
assert len(e.identities) == 1
ident = e.identities[0]
assert ident.host == 'foo.com'
assert ident.tcp_port == 456
assert ident.ssl_port == 457
assert ident.nick_suffix == ''
def test_tor_identity():
tor_host = 'something.onion'
os.environ.pop('REPORT_HOST', None)
os.environ.pop('REPORT_HOST_TOR', None)
e = Env()
assert len(e.identities) == 0
os.environ['REPORT_HOST_TOR'] = 'foo'
os.environ['REPORT_SSL_PORT_TOR'] = '123'
os.environ['TCP_PORT'] = '456'
with pytest.raises(Env.Error):
Env()
os.environ['REPORT_HOST_TOR'] = tor_host
e = Env()
assert len(e.identities) == 1
ident = e.identities[0]
assert ident.host == tor_host
assert ident.tcp_port == 456
assert ident.ssl_port == 123
assert ident.nick_suffix == '_tor'
os.environ['REPORT_TCP_PORT_TOR'] = os.environ['REPORT_SSL_PORT_TOR']
with pytest.raises(Env.Error):
Env()
os.environ['REPORT_HOST'] = 'foo.com'
os.environ['TCP_PORT'] = '456'
os.environ['SSL_PORT'] = '789'
os.environ['REPORT_TCP_PORT'] = '654'
os.environ['REPORT_SSL_PORT'] = '987'
os.environ['SSL_CERTFILE'] = 'certfile'
os.environ['SSL_KEYFILE'] = 'keyfile'
os.environ.pop('REPORT_TCP_PORT_TOR', None)
os.environ.pop('REPORT_SSL_PORT_TOR', None)
e = Env()
assert len(e.identities) == 2
ident = e.identities[1]
assert ident.host == tor_host
assert ident.tcp_port == 654
assert ident.ssl_port == 987
os.environ['REPORT_TCP_PORT_TOR'] = '234'
os.environ['REPORT_SSL_PORT_TOR'] = '432'
e = Env()
assert len(e.identities) == 2
ident = e.identities[1]
assert ident.host == tor_host
assert ident.tcp_port == 234
assert ident.ssl_port == 432

0
tests/test_storage.py → tests/server/test_storage.py

68
tests/test_blocks.py

@ -0,0 +1,68 @@
# Copyright (c) 2017, the ElectrumX authors
#
# All rights reserved.
#
# The MIT License (MIT)
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
# and warranty status of this software.
import json
import os
from binascii import unhexlify
import pytest
from lib.coins import Coin
from lib.hash import hex_str_to_hash
BLOCKS_DIR = os.path.join(
os.path.dirname(os.path.realpath(__file__)), 'blocks')
# Find out which db engines to test
# Those that are not installed will be skipped
blocks = []
for name in os.listdir(BLOCKS_DIR):
try:
name_parts = name.split("_")
coin = Coin.lookup_coin_class(name_parts[0], name_parts[1])
with open(os.path.join(BLOCKS_DIR, name)) as f:
blocks.append((coin, json.load(f)))
except Exception as e:
blocks.append(pytest.fail(name))
@pytest.fixture(params=blocks)
def block_details(request):
return request.param
def test_block(block_details):
coin, block_info = block_details
block = unhexlify(block_info['block'])
h, txs = coin.block_full(block, block_info['height'])
assert coin.header_hash(h) == hex_str_to_hash(block_info['hash'])
assert coin.header_prevhash(h) == hex_str_to_hash(block_info['previousblockhash'])
for n, tx in enumerate(txs):
_, txid = tx
assert txid == hex_str_to_hash(block_info['tx'][n])

397
tests/wallet/test_bip32.py

@ -0,0 +1,397 @@
#
# Tests of wallet/bip32.py
#
import pytest
import wallet.bip32 as bip32
from lib.coins import Bitcoin, CoinError
from lib.hash import Base58
MXPRV = 'xprv9s21ZrQH143K2gMVrSwwojnXigqHgm1khKZGTCm7K8w4PmuDEUrudk11ZBxhGPUiUeVcrfGLoZmt8rFNRDLp18jmKMcVma89z7PJd2Vn7R9'
MPRIVKEY = b';\xf4\xbfH\xd20\xea\x94\x01_\x10\x1b\xc3\xb0\xff\xc9\x17$?K\x02\xe5\x82R\xe5\xb3A\xdb\x87&E\x00'
MXPUB = 'xpub661MyMwAqRbcFARxxUUxAsjGGifn6Djc4YUsFbAisUU3GaEMn2BABYKVQTHrDtwvSfgY2bK8aFGyCNmB52SKjkFGP18sSRTNn1sCeez7Utd'
mpubkey, mpubcoin = bip32.from_extended_key_string(MXPUB)
mprivkey, mprivcoin = bip32.from_extended_key_string(MXPRV)
def test_from_extended_key():
# Tests the failure modes of from_extended_key.
with pytest.raises(TypeError):
bip32._from_extended_key('')
with pytest.raises(ValueError):
bip32._from_extended_key(b'')
with pytest.raises(CoinError):
bip32._from_extended_key(bytes(78))
# Invalid prefix byte
raw = Base58.decode_check(MXPRV)
with pytest.raises(ValueError):
bip32._from_extended_key(raw[:45] + b'\1' + raw[46:])
class TestPubKey(object):
def test_constructor(self):
cls = bip32.PubKey
raw_pubkey = b'\2' * 33
chain_code = bytes(32)
# Invalid constructions
with pytest.raises(TypeError):
cls(' ' * 33, chain_code, 0, 0)
with pytest.raises(ValueError):
cls(bytes(32), chain_code, -1, 0)
with pytest.raises(ValueError):
cls(bytes(33), chain_code, -1, 0)
with pytest.raises(ValueError):
cls(chain_code, chain_code, 0, 0)
with pytest.raises(TypeError):
cls(raw_pubkey, '0' * 32, 0, 0)
with pytest.raises(ValueError):
cls(raw_pubkey, bytes(31), 0, 0)
with pytest.raises(ValueError):
cls(raw_pubkey, chain_code, -1, 0)
with pytest.raises(ValueError):
cls(raw_pubkey, chain_code, 1 << 32, 0)
with pytest.raises(ValueError):
cls(raw_pubkey, chain_code, 0, -1)
with pytest.raises(ValueError):
cls(raw_pubkey, chain_code, 0, 256)
with pytest.raises(ValueError):
cls(b'\0' + b'\2' * 32, chain_code, 0, 0)
# These are OK
cls(b'\2' + b'\2' * 32, chain_code, 0, 0)
cls(b'\3' + b'\2' * 32, chain_code, 0, 0)
cls(raw_pubkey, chain_code, (1 << 32) - 1, 0)
cls(raw_pubkey, chain_code, 0, 255)
cls(raw_pubkey, chain_code, 0, 255, mpubkey)
# Construction from verifying key
dup = cls(mpubkey.verifying_key, chain_code, 0, 0)
assert mpubkey.ec_point() == dup.ec_point()
# Construction from raw pubkey bytes
pubkey = mpubkey.pubkey_bytes
dup = cls(pubkey, chain_code, 0, 0)
assert mpubkey.ec_point() == dup.ec_point()
# Construction from PubKey
with pytest.raises(TypeError):
cls(mpubkey, chain_code, 0, 0)
def test_from_extended_key_string(self):
assert mpubcoin == Bitcoin
assert mpubkey.n == 0
assert mpubkey.depth == 0
assert mpubkey.parent is None
assert mpubkey.chain_code == b'>V\x83\x92`\r\x17\xb3"\xa6\x7f\xaf\xc0\x930\xf7\x1e\xdc\x12i\x9c\xe4\xc0,a\x1a\x04\xec\x16\x19\xaeK'
assert mpubkey.ec_point().x() == 44977109961578369385937116592536468905742111247230478021459394832226142714624
def test_extended_key(self):
# Test argument validation
with pytest.raises(TypeError):
mpubkey._extended_key('foot', bytes(33))
with pytest.raises(ValueError):
mpubkey._extended_key(b'foo', bytes(33))
with pytest.raises(TypeError):
mpubkey._extended_key(bytes(4), ' ' * 33)
with pytest.raises(ValueError):
mpubkey._extended_key(b'foot', bytes(32))
mpubkey._extended_key(b'foot', bytes(33))
def test_extended_key_string(self):
# Implictly tests extended_key()
assert mpubkey.extended_key_string(Bitcoin) == MXPUB
chg_master = mpubkey.child(1)
chg5 = chg_master.child(5)
assert chg5.address(Bitcoin) == '1BsEFqGtcZnVBbPeimcfAFTitQdTLvUXeX'
assert chg5.extended_key_string(Bitcoin) == 'xpub6AzPNZ1SAS7zmSnj6gakQ6tAKPzRVdQzieL3eCnoeT3A89nJaJKuUYWoZuYp8xWhCs1gF9yXAwGg7zKYhvCfhk9jrb1bULhLkQCwtB1Nnn1'
ext_key_base58 = chg5.extended_key_string(Bitcoin)
assert ext_key_base58 == 'xpub6AzPNZ1SAS7zmSnj6gakQ6tAKPzRVdQzieL3eCnoeT3A89nJaJKuUYWoZuYp8xWhCs1gF9yXAwGg7zKYhvCfhk9jrb1bULhLkQCwtB1Nnn1'
# Check can recreate
dup, coin = bip32.from_extended_key_string(ext_key_base58)
assert coin is Bitcoin
assert dup.chain_code == chg5.chain_code
assert dup.n == chg5.n == 5
assert dup.depth == chg5.depth == 2
assert dup.ec_point() == chg5.ec_point()
def test_child(self):
'''Test child derivations agree with Electrum.'''
rec_master = mpubkey.child(0)
assert rec_master.address(Bitcoin) == '18zW4D1Vxx9jVPGzsFzgXj8KrSLHt7w2cg'
chg_master = mpubkey.child(1)
assert chg_master.parent is mpubkey
assert chg_master.address(Bitcoin) == '1G8YpbkZd7bySHjpdQK3kMcHhc6BvHr5xy'
rec0 = rec_master.child(0)
assert rec0.address(Bitcoin) == '13nASW7rdE2dnSycrAP9VePhRmaLg9ziaw'
rec19 = rec_master.child(19)
assert rec19.address(Bitcoin) == '15QrXnPQ8aS8yCpA5tJkyvXfXpw8F8k3fB'
chg0 = chg_master.child(0)
assert chg0.parent is chg_master
assert chg0.address(Bitcoin) == '1L6fNSVhWjuMKNDigA99CweGEWtcqqhzDj'
with pytest.raises(ValueError):
mpubkey.child(-1)
with pytest.raises(ValueError):
mpubkey.child(1 << 31)
# OK
mpubkey.child((1 << 31) - 1)
def test_address(self):
assert mpubkey.address(Bitcoin) == '1ENCpq6mbb1KYcaodGG7eTpSpYvPnDjFmU'
def test_identifier(self):
assert mpubkey.identifier() == b'\x92\x9c=\xb8\xd6\xe7\xebR\x90Td\x85\x1c\xa7\x0c\x8aE`\x87\xdd'
def test_fingerprint(self):
assert mpubkey.fingerprint() == b'\x92\x9c=\xb8'
def test_parent_fingerprint(self):
assert mpubkey.parent_fingerprint() == bytes(4)
child = mpubkey.child(0)
assert child.parent_fingerprint() == mpubkey.fingerprint()
def test_pubkey_bytes(self):
# Also tests _exponent_to_bytes
pubkey = mpubkey.pubkey_bytes
assert pubkey == b'\x02cp$a\x18\xa7\xc2\x18\xfdUt\x96\xeb\xb2\xb0\x86-Y\xc6Hn\x88\xf8>\x07\xfd\x12\xce\x8a\x88\xfb\x00'
class TestPrivKey(object):
def test_constructor(self):
# Includes full tests of _signing_key_from_privkey and
# _privkey_secret_exponent
cls = bip32.PrivKey
chain_code = bytes(32)
# These are invalid
with pytest.raises(TypeError):
cls('0' * 32, chain_code, 0, 0)
with pytest.raises(ValueError):
cls(b'0' * 31, chain_code, 0, 0)
with pytest.raises(ValueError):
cls(MPRIVKEY, chain_code, -1, 0)
with pytest.raises(ValueError):
cls(MPRIVKEY, chain_code, 1 << 32, 0)
with pytest.raises(ValueError):
cls(MPRIVKEY, chain_code, 0, -1)
with pytest.raises(ValueError):
cls(MPRIVKEY, chain_code, 0, 256)
# Invalid exponents
with pytest.raises(ValueError):
cls(bip32._exponent_to_bytes(0), chain_code, 0, 0)
with pytest.raises(ValueError):
cls(bip32._exponent_to_bytes(cls.CURVE.order), chain_code, 0, 0)
# These are good
cls(MPRIVKEY, chain_code, 0, 0)
cls(MPRIVKEY, chain_code, (1 << 32) - 1, 0)
cls(MPRIVKEY, chain_code, 0, 0)
cls(bip32._exponent_to_bytes(cls.CURVE.order - 1), chain_code, 0, 0)
privkey = cls(MPRIVKEY, chain_code, 0, 255)
# Construction with bad parent
with pytest.raises(TypeError):
cls(MPRIVKEY, chain_code, 0, 0, privkey.public_key)
# Construction from signing key
dup = cls(privkey.signing_key, chain_code, 0, 0)
assert dup.ec_point() == privkey.ec_point()
# Construction from PrivKey
with pytest.raises(TypeError):
cls(privkey, chain_code, 0, 0)
def test_secret_exponent(self):
assert mprivkey.secret_exponent() == 27118888947022743980605817563635166434451957861641813930891160184742578898176
def test_identifier(self):
assert mprivkey.identifier() == mpubkey.identifier()
def test_address(self):
assert mprivkey.address(Bitcoin) == mpubkey.address(Bitcoin)
def test_fingerprint(self):
assert mprivkey.fingerprint() == mpubkey.fingerprint()
def test_parent_fingerprint(self):
assert mprivkey.parent_fingerprint() == bytes(4)
child = mprivkey.child(0)
assert child.parent_fingerprint() == mprivkey.fingerprint()
def test_from_extended_key_string(self):
# Also tests privkey_bytes and public_key
assert mprivcoin is Bitcoin
assert mprivkey.privkey_bytes == MPRIVKEY
assert mprivkey.ec_point() == mpubkey.ec_point()
assert mprivkey.public_key.chain_code == mpubkey.chain_code
assert mprivkey.public_key.n == mpubkey.n
assert mprivkey.public_key.depth == mpubkey.depth
def test_extended_key(self):
# Test argument validation
with pytest.raises(TypeError):
mprivkey._extended_key('foot', bytes(33))
with pytest.raises(ValueError):
mprivkey._extended_key(b'foo', bytes(33))
with pytest.raises(TypeError):
mprivkey._extended_key(bytes(4), ' ' * 33)
with pytest.raises(ValueError):
mprivkey._extended_key(b'foot', bytes(32))
mprivkey._extended_key(b'foot', bytes(33))
def test_extended_key_string(self):
# Also tests extended_key, WIF and privkey_bytes
assert mprivkey.extended_key_string(Bitcoin) == MXPRV
chg_master = mprivkey.child(1)
chg5 = chg_master.child(5)
assert chg5.WIF(Bitcoin) == 'L5kTYMuajTGWdYiMoD4V8k6LS4Bg3HFMA5UGTfxG9Wh7UKu9CHFC'
ext_key_base58 = chg5.extended_key_string(Bitcoin)
assert ext_key_base58 == 'xprv9x12y3UYL4ZhYxiFzf3k2xwRmN9w6Ah9MRQSqpPC67WBFMTA2m1evkCKidz7UYBa5i8QwxmU9Ju7giqEmcPRXKXwzgAJwssNeZNQLPT3LAY'
# Check can recreate
dup, coin = bip32.from_extended_key_string(ext_key_base58)
assert coin is Bitcoin
assert dup.chain_code == chg5.chain_code
assert dup.n == chg5.n == 5
assert dup.depth == chg5.depth == 2
assert dup.ec_point() == chg5.ec_point()
def test_child(self):
'''Test child derivations agree with Electrum.'''
# Also tests WIF, address
rec_master = mprivkey.child(0)
assert rec_master.address(Bitcoin) == '18zW4D1Vxx9jVPGzsFzgXj8KrSLHt7w2cg'
chg_master = mprivkey.child(1)
assert chg_master.parent is mprivkey
assert chg_master.address(Bitcoin) == '1G8YpbkZd7bySHjpdQK3kMcHhc6BvHr5xy'
rec0 = rec_master.child(0)
assert rec0.WIF(Bitcoin) == 'L2M6WWMdu3YfWxvLGF76HZgHCA6idwVQx5QL91vfdqeZi8XAgWkz'
rec19 = rec_master.child(19)
assert rec19.WIF(Bitcoin) == 'KwMHa1fynU2J2iBGCuBZxumM2qDXHe5tVPU9VecNGQv3UCqnET7X'
chg0 = chg_master.child(0)
assert chg0.parent is chg_master
assert chg0.WIF(Bitcoin) == 'L4J1esD4rYuBHXwjg72yi7Rw4G3iF2yUHt7LN9trpC3snCppUbq8'
with pytest.raises(ValueError):
mprivkey.child(-1)
with pytest.raises(ValueError):
mprivkey.child(1 << 32)
# OK
mprivkey.child((1 << 32) - 1)
class TestVectors():
def test_vector1(self):
seed = bytes.fromhex("000102030405060708090a0b0c0d0e0f")
# Chain m
m = bip32.PrivKey.from_seed(seed)
xprv = m.extended_key_string(Bitcoin)
assert xprv == "xprv9s21ZrQH143K3QTDL4LXw2F7HEK3wJUD2nW2nRk4stbPy6cq3jPPqjiChkVvvNKmPGJxWUtg6LnF5kejMRNNU3TGtRBeJgk33yuGBxrMPHi"
xpub = m.public_key.extended_key_string(Bitcoin)
assert xpub == "xpub661MyMwAqRbcFtXgS5sYJABqqG9YLmC4Q1Rdap9gSE8NqtwybGhePY2gZ29ESFjqJoCu1Rupje8YtGqsefD265TMg7usUDFdp6W1EGMcet8"
# Chain m/0H
m1 = m.child(0 + m.HARDENED)
xprv = m1.extended_key_string(Bitcoin)
assert xprv == "xprv9uHRZZhk6KAJC1avXpDAp4MDc3sQKNxDiPvvkX8Br5ngLNv1TxvUxt4cV1rGL5hj6KCesnDYUhd7oWgT11eZG7XnxHrnYeSvkzY7d2bhkJ7"
xpub = m1.public_key.extended_key_string(Bitcoin)
assert xpub == "xpub68Gmy5EdvgibQVfPdqkBBCHxA5htiqg55crXYuXoQRKfDBFA1WEjWgP6LHhwBZeNK1VTsfTFUHCdrfp1bgwQ9xv5ski8PX9rL2dZXvgGDnw"
# Chain m/0H/1
m2 = m1.child(1)
xprv = m2.extended_key_string(Bitcoin)
assert xprv == "xprv9wTYmMFdV23N2TdNG573QoEsfRrWKQgWeibmLntzniatZvR9BmLnvSxqu53Kw1UmYPxLgboyZQaXwTCg8MSY3H2EU4pWcQDnRnrVA1xe8fs"
xpub = m2.public_key.extended_key_string(Bitcoin)
assert xpub == "xpub6ASuArnXKPbfEwhqN6e3mwBcDTgzisQN1wXN9BJcM47sSikHjJf3UFHKkNAWbWMiGj7Wf5uMash7SyYq527Hqck2AxYysAA7xmALppuCkwQ"
# Chain m/0H/1/2H
m3 = m2.child(2 + m.HARDENED)
xprv = m3.extended_key_string(Bitcoin)
assert xprv == "xprv9z4pot5VBttmtdRTWfWQmoH1taj2axGVzFqSb8C9xaxKymcFzXBDptWmT7FwuEzG3ryjH4ktypQSAewRiNMjANTtpgP4mLTj34bhnZX7UiM"
xpub = m3.public_key.extended_key_string(Bitcoin)
assert xpub == "xpub6D4BDPcP2GT577Vvch3R8wDkScZWzQzMMUm3PWbmWvVJrZwQY4VUNgqFJPMM3No2dFDFGTsxxpG5uJh7n7epu4trkrX7x7DogT5Uv6fcLW5"
# Chain m/0H/1/2H/2
m4 = m3.child(2)
xprv = m4.extended_key_string(Bitcoin)
assert xprv == "xprvA2JDeKCSNNZky6uBCviVfJSKyQ1mDYahRjijr5idH2WwLsEd4Hsb2Tyh8RfQMuPh7f7RtyzTtdrbdqqsunu5Mm3wDvUAKRHSC34sJ7in334"
xpub = m4.public_key.extended_key_string(Bitcoin)
assert xpub == "xpub6FHa3pjLCk84BayeJxFW2SP4XRrFd1JYnxeLeU8EqN3vDfZmbqBqaGJAyiLjTAwm6ZLRQUMv1ZACTj37sR62cfN7fe5JnJ7dh8zL4fiyLHV"
# Chain m/0H/1/2H/2/1000000000
m5 = m4.child(1000000000)
xprv = m5.extended_key_string(Bitcoin)
assert xprv == "xprvA41z7zogVVwxVSgdKUHDy1SKmdb533PjDz7J6N6mV6uS3ze1ai8FHa8kmHScGpWmj4WggLyQjgPie1rFSruoUihUZREPSL39UNdE3BBDu76"
xpub = m5.public_key.extended_key_string(Bitcoin)
assert xpub == "xpub6H1LXWLaKsWFhvm6RVpEL9P4KfRZSW7abD2ttkWP3SSQvnyA8FSVqNTEcYFgJS2UaFcxupHiYkro49S8yGasTvXEYBVPamhGW6cFJodrTHy"
def test_vector2(self):
seed = bytes.fromhex("fffcf9f6f3f0edeae7e4e1dedbd8d5d2cfccc9c6c3c0bdbab7b4b1aeaba8a5a29f9c999693908d8a8784817e7b7875726f6c696663605d5a5754514e4b484542")
# Chain m
m = bip32.PrivKey.from_seed(seed)
xprv = m.extended_key_string(Bitcoin)
assert xprv == "xprv9s21ZrQH143K31xYSDQpPDxsXRTUcvj2iNHm5NUtrGiGG5e2DtALGdso3pGz6ssrdK4PFmM8NSpSBHNqPqm55Qn3LqFtT2emdEXVYsCzC2U"
xpub = m.public_key.extended_key_string(Bitcoin)
assert xpub == "xpub661MyMwAqRbcFW31YEwpkMuc5THy2PSt5bDMsktWQcFF8syAmRUapSCGu8ED9W6oDMSgv6Zz8idoc4a6mr8BDzTJY47LJhkJ8UB7WEGuduB"
# Chain m/0
m1 = m.child(0)
xprv = m1.extended_key_string(Bitcoin)
assert xprv == "xprv9vHkqa6EV4sPZHYqZznhT2NPtPCjKuDKGY38FBWLvgaDx45zo9WQRUT3dKYnjwih2yJD9mkrocEZXo1ex8G81dwSM1fwqWpWkeS3v86pgKt"
xpub = m1.public_key.extended_key_string(Bitcoin)
assert xpub == "xpub69H7F5d8KSRgmmdJg2KhpAK8SR3DjMwAdkxj3ZuxV27CprR9LgpeyGmXUbC6wb7ERfvrnKZjXoUmmDznezpbZb7ap6r1D3tgFxHmwMkQTPH"
# Chain m/0H/2147483647H
m2 = m1.child(2147483647 + m.HARDENED)
xprv = m2.extended_key_string(Bitcoin)
assert xprv == "xprv9wSp6B7kry3Vj9m1zSnLvN3xH8RdsPP1Mh7fAaR7aRLcQMKTR2vidYEeEg2mUCTAwCd6vnxVrcjfy2kRgVsFawNzmjuHc2YmYRmagcEPdU9"
xpub = m2.public_key.extended_key_string(Bitcoin)
assert xpub == "xpub6ASAVgeehLbnwdqV6UKMHVzgqAG8Gr6riv3Fxxpj8ksbH9ebxaEyBLZ85ySDhKiLDBrQSARLq1uNRts8RuJiHjaDMBU4Zn9h8LZNnBC5y4a"
# Chain m/0H/2147483647H/1
m3 = m2.child(1)
xprv = m3.extended_key_string(Bitcoin)
xpub = m3.public_key.extended_key_string(Bitcoin)
assert xprv == "xprv9zFnWC6h2cLgpmSA46vutJzBcfJ8yaJGg8cX1e5StJh45BBciYTRXSd25UEPVuesF9yog62tGAQtHjXajPPdbRCHuWS6T8XA2ECKADdw4Ef"
assert xpub == "xpub6DF8uhdarytz3FWdA8TvFSvvAh8dP3283MY7p2V4SeE2wyWmG5mg5EwVvmdMVCQcoNJxGoWaU9DCWh89LojfZ537wTfunKau47EL2dhHKon"
# Chain m/0/2147483647H/1/2147483646H
m4 = m3.child(2147483646 + m.HARDENED)
xprv = m4.extended_key_string(Bitcoin)
xpub = m4.public_key.extended_key_string(Bitcoin)
assert xprv == "xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc"
assert xpub == "xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL"
# Chain m/0/2147483647H/1/2147483646H/2
m5 = m4.child(2)
xprv = m5.extended_key_string(Bitcoin)
xpub = m5.public_key.extended_key_string(Bitcoin)
assert xprv == "xprvA2nrNbFZABcdryreWet9Ea4LvTJcGsqrMzxHx98MMrotbir7yrKCEXw7nadnHM8Dq38EGfSh6dqA9QWTyefMLEcBYJUuekgW4BYPJcr9E7j"
assert xpub == "xpub6FnCn6nSzZAw5Tw7cgR9bi15UV96gLZhjDstkXXxvCLsUXBGXPdSnLFbdpq8p9HmGsApME5hQTZ3emM2rnY5agb9rXpVGyy3bdW6EEgAtqt"
def test_vector3(self):
seed = bytes.fromhex("4b381541583be4423346c643850da4b320e46a87ae3d2a4e6da11eba819cd4acba45d239319ac14f863b8d5ab5a0d0c64d2e8a1e7d1457df2e5a3c51c73235be")
# Chain m
m = bip32.PrivKey.from_seed(seed)
xprv = m.extended_key_string(Bitcoin)
xpub = m.public_key.extended_key_string(Bitcoin)
assert xprv == "xprv9s21ZrQH143K25QhxbucbDDuQ4naNntJRi4KUfWT7xo4EKsHt2QJDu7KXp1A3u7Bi1j8ph3EGsZ9Xvz9dGuVrtHHs7pXeTzjuxBrCmmhgC6"
assert xpub == "xpub661MyMwAqRbcEZVB4dScxMAdx6d4nFc9nvyvH3v4gJL378CSRZiYmhRoP7mBy6gSPSCYk6SzXPTf3ND1cZAceL7SfJ1Z3GC8vBgp2epUt13"
# Chain m/0H
m1 = m.child(0 + m.HARDENED)
xprv = m1.extended_key_string(Bitcoin)
xpub = m1.public_key.extended_key_string(Bitcoin)
assert xprv == "xprv9uPDJpEQgRQfDcW7BkF7eTya6RPxXeJCqCJGHuCJ4GiRVLzkTXBAJMu2qaMWPrS7AANYqdq6vcBcBUdJCVVFceUvJFjaPdGZ2y9WACViL4L"
assert xpub == "xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y"

306
wallet/bip32.py

@ -0,0 +1,306 @@
# Copyright (c) 2017, Neil Booth
#
# All rights reserved.
#
# See the file "LICENCE" for information about the copyright
# and warranty status of this software.
'''Logic for BIP32 Hierarchical Key Derviation.'''
import struct
import ecdsa
import ecdsa.ellipticcurve as EC
import ecdsa.numbertheory as NT
from lib.coins import Coin
from lib.hash import Base58, hmac_sha512, hash160
from lib.util import cachedproperty, bytes_to_int, int_to_bytes
class DerivationError(Exception):
'''Raised when an invalid derivation occurs.'''
class _KeyBase(object):
'''A BIP32 Key, public or private.'''
CURVE = ecdsa.SECP256k1
def __init__(self, chain_code, n, depth, parent):
if not isinstance(chain_code, (bytes, bytearray)):
raise TypeError('chain code must be raw bytes')
if len(chain_code) != 32:
raise ValueError('invalid chain code')
if not 0 <= n < 1 << 32:
raise ValueError('invalid child number')
if not 0 <= depth < 256:
raise ValueError('invalid depth')
if parent is not None:
if not isinstance(parent, type(self)):
raise TypeError('parent key has bad type')
self.chain_code = chain_code
self.n = n
self.depth = depth
self.parent = parent
def _hmac_sha512(self, msg):
'''Use SHA-512 to provide an HMAC, returned as a pair of 32-byte
objects.
'''
hmac = hmac_sha512(self.chain_code, msg)
return hmac[:32], hmac[32:]
def _extended_key(self, ver_bytes, raw_serkey):
'''Return the 78-byte extended key given prefix version bytes and
serialized key bytes.
'''
if not isinstance(ver_bytes, (bytes, bytearray)):
raise TypeError('ver_bytes must be raw bytes')
if len(ver_bytes) != 4:
raise ValueError('ver_bytes must have length 4')
if not isinstance(raw_serkey, (bytes, bytearray)):
raise TypeError('raw_serkey must be raw bytes')
if len(raw_serkey) != 33:
raise ValueError('raw_serkey must have length 33')
return (ver_bytes + bytes([self.depth])
+ self.parent_fingerprint() + struct.pack('>I', self.n)
+ self.chain_code + raw_serkey)
def fingerprint(self):
'''Return the key's fingerprint as 4 bytes.'''
return self.identifier()[:4]
def parent_fingerprint(self):
'''Return the parent key's fingerprint as 4 bytes.'''
return self.parent.fingerprint() if self.parent else bytes(4)
def extended_key_string(self, coin):
'''Return an extended key as a base58 string.'''
return Base58.encode_check(self.extended_key(coin))
class PubKey(_KeyBase):
'''A BIP32 public key.'''
def __init__(self, pubkey, chain_code, n, depth, parent=None):
super().__init__(chain_code, n, depth, parent)
if isinstance(pubkey, ecdsa.VerifyingKey):
self.verifying_key = pubkey
else:
self.verifying_key = self._verifying_key_from_pubkey(pubkey)
self.addresses = {}
@classmethod
def _verifying_key_from_pubkey(cls, pubkey):
'''Converts a 33-byte compressed pubkey into an ecdsa.VerifyingKey
object'''
if not isinstance(pubkey, (bytes, bytearray)):
raise TypeError('pubkey must be raw bytes')
if len(pubkey) != 33:
raise ValueError('pubkey must be 33 bytes')
if pubkey[0] not in (2, 3):
raise ValueError('invalid pubkey prefix byte')
curve = cls.CURVE.curve
is_odd = pubkey[0] == 3
x = bytes_to_int(pubkey[1:])
# p is the finite field order
a, b, p = curve.a(), curve.b(), curve.p()
y2 = pow(x, 3, p) + b
assert a == 0 # Otherwise y2 += a * pow(x, 2, p)
y = NT.square_root_mod_prime(y2 % p, p)
if bool(y & 1) != is_odd:
y = p - y
point = EC.Point(curve, x, y)
return ecdsa.VerifyingKey.from_public_point(point, curve=cls.CURVE)
@cachedproperty
def pubkey_bytes(self):
'''Return the compressed public key as 33 bytes.'''
point = self.verifying_key.pubkey.point
prefix = bytes([2 + (point.y() & 1)])
padded_bytes = _exponent_to_bytes(point.x())
return prefix + padded_bytes
def address(self, coin):
"The public key as a P2PKH address"
address = self.addresses.get(coin)
if not address:
address = coin.P2PKH_address_from_pubkey(self.pubkey_bytes)
self.addresses[coin] = address
return address
def ec_point(self):
return self.verifying_key.pubkey.point
def child(self, n):
'''Return the derived child extended pubkey at index N.'''
if not 0 <= n < (1 << 31):
raise ValueError('invalid BIP32 public key child number')
msg = self.pubkey_bytes + struct.pack('>I', n)
L, R = self._hmac_sha512(msg)
curve = self.CURVE
L = bytes_to_int(L)
if L >= curve.order:
raise DerivationError
point = curve.generator * L + self.ec_point()
if point == EC.INFINITY:
raise DerivationError
verkey = ecdsa.VerifyingKey.from_public_point(point, curve=curve)
return PubKey(verkey, R, n, self.depth + 1, self)
def identifier(self):
'''Return the key's identifier as 20 bytes.'''
return hash160(self.pubkey_bytes)
def extended_key(self, coin):
'''Return a raw extended public key.'''
return self._extended_key(coin.XPUB_VERBYTES, self.pubkey_bytes)
class PrivKey(_KeyBase):
'''A BIP32 private key.'''
HARDENED = 1 << 31
def __init__(self, privkey, chain_code, n, depth, parent=None):
super().__init__(chain_code, n, depth, parent)
if isinstance(privkey, ecdsa.SigningKey):
self.signing_key = privkey
else:
self.signing_key = self._signing_key_from_privkey(privkey)
@classmethod
def _signing_key_from_privkey(cls, privkey):
'''Converts a 32-byte privkey into an ecdsa.SigningKey object.'''
exponent = cls._privkey_secret_exponent(privkey)
return ecdsa.SigningKey.from_secret_exponent(exponent, curve=cls.CURVE)
@classmethod
def _privkey_secret_exponent(cls, privkey):
'''Return the private key as a secret exponent if it is a valid private
key.'''
if not isinstance(privkey, (bytes, bytearray)):
raise TypeError('privkey must be raw bytes')
if len(privkey) != 32:
raise ValueError('privkey must be 32 bytes')
exponent = bytes_to_int(privkey)
if not 1 <= exponent < cls.CURVE.order:
raise ValueError('privkey represents an invalid exponent')
return exponent
@classmethod
def from_seed(cls, seed):
# This hard-coded message string seems to be coin-independent...
hmac = hmac_sha512(b'Bitcoin seed', seed)
privkey, chain_code = hmac[:32], hmac[32:]
return cls(privkey, chain_code, 0, 0)
@cachedproperty
def privkey_bytes(self):
'''Return the serialized private key (no leading zero byte).'''
return _exponent_to_bytes(self.secret_exponent())
@cachedproperty
def public_key(self):
'''Return the corresponding extended public key.'''
verifying_key = self.signing_key.get_verifying_key()
parent_pubkey = self.parent.public_key if self.parent else None
return PubKey(verifying_key, self.chain_code, self.n, self.depth,
parent_pubkey)
def ec_point(self):
return self.public_key.ec_point()
def secret_exponent(self):
'''Return the private key as a secret exponent.'''
return self.signing_key.privkey.secret_multiplier
def WIF(self, coin):
'''Return the private key encoded in Wallet Import Format.'''
return coin.privkey_WIF(self.privkey_bytes, compressed=True)
def address(self, coin):
"The public key as a P2PKH address"
return self.public_key.address(coin)
def child(self, n):
'''Return the derived child extended privkey at index N.'''
if not 0 <= n < (1 << 32):
raise ValueError('invalid BIP32 private key child number')
if n >= self.HARDENED:
serkey = b'\0' + self.privkey_bytes
else:
serkey = self.public_key.pubkey_bytes
msg = serkey + struct.pack('>I', n)
L, R = self._hmac_sha512(msg)
curve = self.CURVE
L = bytes_to_int(L)
exponent = (L + bytes_to_int(self.privkey_bytes)) % curve.order
if exponent == 0 or L >= curve.order:
raise DerivationError
privkey = _exponent_to_bytes(exponent)
return PrivKey(privkey, R, n, self.depth + 1, self)
def identifier(self):
'''Return the key's identifier as 20 bytes.'''
return self.public_key.identifier()
def extended_key(self, coin):
'''Return a raw extended private key.'''
return self._extended_key(coin.XPRV_VERBYTES,
b'\0' + self.privkey_bytes)
def _exponent_to_bytes(exponent):
'''Convert an exponent to 32 big-endian bytes'''
return (bytes(32) + int_to_bytes(exponent))[-32:]
def _from_extended_key(ekey):
'''Return a PubKey or PrivKey from an extended key raw bytes.'''
if not isinstance(ekey, (bytes, bytearray)):
raise TypeError('extended key must be raw bytes')
if len(ekey) != 78:
raise ValueError('extended key must have length 78')
is_public, coin = Coin.lookup_xverbytes(ekey[:4])
depth = ekey[4]
fingerprint = ekey[5:9] # Not used
n, = struct.unpack('>I', ekey[9:13])
chain_code = ekey[13:45]
if is_public:
pubkey = ekey[45:]
key = PubKey(pubkey, chain_code, n, depth)
else:
if ekey[45] is not 0:
raise ValueError('invalid extended private key prefix byte')
privkey = ekey[46:]
key = PrivKey(privkey, chain_code, n, depth)
return key, coin
def from_extended_key_string(ekey_str):
'''Given an extended key string, such as
xpub6BsnM1W2Y7qLMiuhi7f7dbAwQZ5Cz5gYJCRzTNainXzQXYjFwtuQXHd
3qfi3t3KJtHxshXezfjft93w4UE7BGMtKwhqEHae3ZA7d823DVrL
return a (key, coin) pair. key is either a PubKey or PrivKey.
'''
return _from_extended_key(Base58.decode_check(ekey_str))
Loading…
Cancel
Save