-
Notifications
You must be signed in to change notification settings - Fork 9
/
Copy path.env.test
93 lines (70 loc) · 2.44 KB
/
.env.test
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
COMPOSE_PROJECT_NAME=brokernode
COMPOSE_CONVERT_WINDOWS_PATHS=true
APP_PORT=3000
# DB Creds
# Replace defaults with your own credentials if you want
DB_USER_DEV="root"
DB_PASSWORD_DEV="secret"
DB_NAME_DEV="dev"
DB_USER_TEST="root"
DB_PASSWORD_TEST="secret"
DB_NAME_TEST="test"
#Database URL
#Need this now for the Aurora DBs
#DATABASE_URL="change/to/real/URL"
# Host IP
# Needed for iota, without host IP your broker will not work
HOST_IP="127.0.0.1"
# Ethereum
# Without these you cannot receive PRL revenue
CHAIN_ID=559966
OYSTER_PEARL="b7baab5cad2d2ebfe75a500c288a4c02b74bc12c"
MAIN_WALLET_ADDRESS="919410005B53D6497517b9Ad58C23c6A30207747"
MAIN_WALLET_KEY="bc07ec20ceedff112f1498a63f6da115a78d6b26fb6ec282bf8b3f45e3358fdf"
MAIN_WALLET_PW="oysterby4000"
ETH_NODE_URL="http://54.86.134.172:8080"
# Test mode
# Set to the following options:
# PROD_MODE - Self-explanatory
# TEST_MODE_NO_TREASURE - No dummy treasures buried, no offsets
# TEST_MODE_DUMMY_TREASURE - Dummy treasures, chunk idx offsets
MODE="PROD_MODE"
# MODE="TEST_MODE_DUMMY_TREASURE"
# MODE="TEST_MODE_NO_TREASURE"
# Experimental
# May not need these
TEST_MODE_WALLET_ADDRESS="this_can_be_anything"
TEST_MODE_WALLET_KEY="9999999999999999999999999999999999999999999999999999999999999999"
TEST_MODE_ETH_NODE_URL="ws://lol:no"
#SEGMENT
SEGMENT_WRITE_KEY=""
#SENTRY
SENTRY_DSN=""
#Grifts
#add .env variables associated with grifts here
GRIFT_ETH_PRIVATE_KEY="ba74b0e24739c1fa7e8470737635e6d894c1d6aefe98fa5b224e3a9f3139371a"
#Oyster Pays
#Set this to true if we are paying, otherwise leave as an empty string
OYSTER_PAYS=""
# Num chunks limit
# If there is a file size limit, define it here by constraining the number of
# chunks allowed. Use number of chunks instead of file size, because the data_maps
# file uses num chunks to build the data map, so even if the client lies about num chunks
# to get past this limit, their upload will fail. 1 kb = 1 chunk, so a 5 MB file will
# have 5001 chunks (5000 file chunks + 1 metadata)
# use "unlimited" for unlimited numChunks
NUM_CHUNKS_LIMIT="5001"
# Use for sentry.io to tell different among endpoints.
# Use prod_ prefix for prod instance.
DISPLAY_NAME="unit_test"
# True if all data maps are in badger, false if still
# using SQL
DATA_MAPS_IN_BADGER="true"
# Enables lambd to do PoW
ENABLE_LAMBDA="false"
LAMBDA_ENV="dev"
# AWS Credentials
AWS_ACCESS_KEY_ID=""
AWS_SECRET_ACCESS_KEY=""
AWS_REGION=us-east-2
AWS_BUCKET_NAME="unknown"