|
|
@ -65,31 +65,3 @@ |
|
|
|
#Maximum number of address subscriptions across all sessions |
|
|
|
#MAX_SESSION_SUBS = 50000 |
|
|
|
#Maximum number of address subscriptions permitted to a single session. |
|
|
|
|
|
|
|
|
|
|
|
#If synchronizing from the Genesis block your performance might change |
|
|
|
#by tweaking the following cache variables. Cache size is only checked |
|
|
|
#roughly every minute, so the caches can grow beyond the specified |
|
|
|
#size. Also the Python process is often quite a bit fatter than the |
|
|
|
#combined cache size, because of Python overhead and also because |
|
|
|
#leveldb consumes a lot of memory during UTXO flushing. So I recommend |
|
|
|
#you set the sum of these to nothing over half your available physical |
|
|
|
#RAM: |
|
|
|
|
|
|
|
#HIST_MB = 300 |
|
|
|
#amount of history cache, in MB, to retain before flushing to |
|
|
|
#disk. Default is 300; probably no benefit being much larger |
|
|
|
#as history is append-only and not searched. |
|
|
|
|
|
|
|
#UTXO_MB = 1000 |
|
|
|
#amount of UTXO and history cache, in MB, to retain before |
|
|
|
#flushing to disk. Default is 1000. This may be too large |
|
|
|
#for small boxes or too small for machines with lots of RAM. |
|
|
|
#Larger caches generally perform better as there is |
|
|
|
#significant searching of the UTXO cache during indexing. |
|
|
|
#However, I don't see much benefit in my tests pushing this |
|
|
|
#too high, and in fact performance begins to fall. My |
|
|
|
#machine has 24GB RAM; the slow down is probably because of |
|
|
|
#leveldb caching and Python GC effects. However this may be |
|
|
|
#very dependent on hardware and you may have different |
|
|
|
#results. |
|
|
|