-
Notifications
You must be signed in to change notification settings - Fork 80
/
Copy pathcotengra_setting_bench.py
162 lines (134 loc) · 4.65 KB
/
cotengra_setting_bench.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
"""
Optimization for performance comparison with different cotengra settings.
"""
import itertools
import sys
import warnings
import cotengra as ctg
import networkx as nx
import numpy as np
sys.path.insert(0, "../")
import tensorcircuit as tc
try:
import kahypar
except ImportError:
print("kahypar not installed, please install it to run this script.")
exit()
# suppress the warning from cotengra
warnings.filterwarnings(
"ignore",
message="The inputs or output of this tree are not ordered."
"Costs will be accurate but actually contracting requires "
"ordered indices corresponding to array axes.",
)
K = tc.set_backend("jax")
def generate_circuit(param, g, n, nlayers):
# construct the circuit ansatz
c = tc.Circuit(n)
for i in range(n):
c.H(i)
for j in range(nlayers):
c = tc.templates.blocks.QAOA_block(c, g, param[j, 0], param[j, 1])
return c
def trigger_cotengra_optimization(n, nlayers, graph):
# define the loss function
def loss_f(params, n, nlayers):
c = generate_circuit(params, graph, n, nlayers)
loss = c.expectation_ps(z=[0, 1, 2], reuse=False)
return K.real(loss)
params = K.implicit_randn(shape=[nlayers, 2])
# run only once to trigger the compilation
K.jit(
loss_f,
static_argnums=(1, 2),
)(params, n, nlayers)
# define the benchmark parameters
n = 12
nlayers = 12
# define the cotengra optimizer parameters
graph_args = {
"1D lattice": nx.convert_node_labels_to_integers(
nx.grid_graph((n, 1))
), # 1D lattice
"2D lattice": nx.convert_node_labels_to_integers(
nx.grid_graph((n // 5, n // (n // 5)))
), # 2D lattice
"all-to-all connected": nx.convert_node_labels_to_integers(
nx.complete_graph(n)
), # all-to-all connected
}
methods_args = [ # https://cotengra.readthedocs.io/en/latest/advanced.html#drivers
"greedy",
"kahypar",
# "labels",
# "spinglass", # requires igraph
# "labelprop", # requires igraph
# "betweenness", # requires igraph
# "walktrap", # requires igraph
# "quickbb", # requires https://github.com/dechterlab/quickbb
# "flowcutter", # requires https://github.com/kit-algo/flow-cutter-pace17
]
optlib_args = [ # https://cotengra.readthedocs.io/en/latest/advanced.html#optimization-library
"optuna", # pip install optuna
# "random", # default when no library is installed
# "baytune", # pip install baytune
# "nevergrad", # pip install nevergrad
# "chocolate", # pip install git+https://github.com/AIworx-Labs/chocolate@master
# "skopt", # pip install scikit-optimize
]
post_processing_args = [ # https://cotengra.readthedocs.io/en/latest/advanced.html#slicing-and-subtree-reconfiguration
(None, None),
# ("slicing_opts", {"target_size": 2**28}),
# ("slicing_reconf_opts", {"target_size": 2**28}),
("reconf_opts", {}),
("simulated_annealing_opts", {}),
]
minimize_args = [ # https://cotengra.readthedocs.io/en/main/advanced.html#objective
# "flops", # minimize the total number of scalar operations
# "size", # minimize the size of the largest intermediate tensor
# "write", # minimize the sum of sizes of all intermediate tensors
"combo", # minimize the sum of FLOPS + α * WRITE where α is 64
]
def get_optimizer(method, optlib, post_processing, minimize):
if post_processing[0] is None:
return ctg.HyperOptimizer(
methods=method,
optlib=optlib,
minimize=minimize,
parallel=True,
max_time=60,
max_repeats=128,
progbar=True,
)
else:
return ctg.HyperOptimizer(
methods=method,
optlib=optlib,
minimize=minimize,
parallel=True,
max_time=60,
max_repeats=128,
progbar=True,
**{post_processing[0]: post_processing[1]},
)
if __name__ == "__main__":
for graph, method, optlib, post_processing, minimize in itertools.product(
graph_args.keys(),
methods_args,
optlib_args,
post_processing_args,
minimize_args,
):
print(
f"graph: {graph}, method: {method}, optlib: {optlib}, "
f"post_processing: {post_processing}, minimize: {minimize}"
)
tc.set_contractor(
"custom",
optimizer=get_optimizer(method, optlib, post_processing, minimize),
contraction_info=True,
preprocessing=True,
debug_level=2, # no computation
)
trigger_cotengra_optimization(n, nlayers, graph_args[graph])
print("-------------------------")