-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy path.env.example
108 lines (84 loc) · 4.34 KB
/
.env.example
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
# [system]
# This is the path to your nodeJS implementation. Detect with `which node`
EXEC_NODE=/your/path/to/node
# The path to the directory that ripper.js lives in
BASEPATH=/home/user/txRipper/
# The sync operation requires a direct, preferably same machine connection with a node
# the blockchain ID your node is - Mainnet = 1 https://chainlist.org/
RPC_NODE=http://192.168.1.104:8545
RPC_NODE_WS=http://192.168.1.104:8545
CHAIN_ID=1
# [Logging]
# Log levels: 1 standard operation, 2: debug, 3: verbose, 4: objects
LOG_LEVEL=4
LOG_TO_FILE=true
LOG_FILE_LOCATION=/base/path/txTheRipper/derived/application/log.txt
# [Utilities]
# To use the "popular wallets" utilities add your etherscan api key here.
# In order to not get banned they implement their own rate limiter which
# keeps the app running smoother than leaning on the etherscan 5 second
# rate limit. Increase with caution
ETHERSCAN_API_KEY=YOURETHERSCANAPIKEYHERE
ETHERSCAN_REQ_RATE=2
# [Database Settings]
DB_USER=dbUser
DB_HOST=localhost
DB_NAME=dbName
DB_PASS=dbUserPassword
REDIS_URL=redis://127.0.0.1:6379
KICKSTARTDIR=/yourpathto/backup/txRipper/
# [Application]
# This is the number of confirmations that ripper should stay behind a node.
# In order to prevent reorgs from corrupting the data, keep this back about 20 blocks
CONFIRMATIONS=20
# The mode is based on your application. For a "Full Archive" sync of the database, that is,
# to process each block and save transaction and topics data to a database, select "sync".
# to process each block and only provide subscription services select "subscription"
OPERATION_MODE=sync
# [Memory and CPU Usage]
# This setting can be set to false in order to use a lower ram footprint, but it increases disk writes
OPTIMIZE_DISK_WRITES=true
# Number of blocks to sync before writing to JSON file. this is not a full db commit but
# can be adjusted to fit the resources available/used on the given machine
# Nvme? Higher disk usage ok = More commits = Less Memory used
COMMIT_EVERYN_BLOCKS=500
# number of byts a JSON Batch file may have
# This value also is about the same size the sql file generated will be
# Recommended starting point here is 50 MB: 50000000
JSON_TX_FILE_MAX=50000000
# when a websocket is closed wait this many seconds until attempting to reconnect
WS_RECONNECT_TIMEOUT=5
# Number of separate processes to fire while consuming cpu intensive tasks
# can set USE_MULTI_THREADS=false when you want to use less CPU resources
USE_MULTI_THREADS=true
MULTI_THREADS=4
# Instead of delegating child jobs with files, use redis to pass data
USE_REDIS_DATA=true
# To not use an index cache turn this on. Just leave commented out unless you have other purposes
# INDEX_CACHE_DISABLE=true
# To not install regular DB indexes, To render the entire databse unusable, uncomment this
# DONT_INDEX=true
# the working and pause flags stop the script from executing on top of and already executing instance.
# Normally this is fine, but there may be cases where the app was shut down unexpectedly,
# e.g. power failure and the app was left in an unhappy status.
# to automatically reset pause and working flags on application restart disable or comment out.
# DEV_STOP_FLAGS_ENABLE=true
# [subscriptions]
# (Work in progress) The subscriptions module allows events to be either broadcasted via redis or unix socket.
# During the indexing of a block, which the trace is being read it allows the program to
# efficiently deliver notifications about such events. In the future a rest server will be
# built on the unix socket to allow for external use of the api. You can use both connection
# methods but because of sandboxing it will take up double the resources which may matter when
# subscribed to many accounts, so its recommended to use one or the other message delivery methods.
# Globally disable all subscriptions. The tables will still be added to the database.
SUB_SUSPEND_ALL=false
# Subscription service delivery methods:
# Enable subscriptions based on the unix socket path below
SUB_USE_UNIX_SOCKET=true
SUB_UNIX_SOCKET=/path/to/txTheRipper/derived/application/ripper.sock
# Enable subscriptions where notifications are delivered to the redis config above.
SUB_USE_REDIS=true
# for all delivery methods above, add the services supported here
# Accounts is a simple filter which provides data when a particular account
# is found in topics of a transaction
SUB_TYPE_ACCOUNT=true