-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathgenerate_contracted_data_ClusterGCN.py
163 lines (129 loc) · 5.42 KB
/
generate_contracted_data_ClusterGCN.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 25 18:55:32 2023
@author: chris
"""
import numpy as np
from networkx.readwrite import json_graph
import contraction
import utility
import datasets
import networkx as nx
import json
import argparse
import os
parser = argparse.ArgumentParser()
parser.add_argument('--dataset_name', nargs='?', const='PPI', type=str, default='PPI')
parser.add_argument('--centrality', nargs='?', const='EC', type=str, default='EC')
parser.add_argument('--constraint', nargs='?', const='FL', type=str, default='FL')
parser.add_argument('--N_cluster', nargs='?', const=100, type=int, default=100)
parser.add_argument('--gamma', nargs='?', const=0.52, type=float, default=0.52)
parser.add_argument('--node_budget', nargs='?', const=15000, type=int, default=15000)
print("Generating Cluster-GCN Dataset for:")
args = parser.parse_args()
print(args)
dataset_name = args.dataset_name
centrality = args.centrality
constraint = args.constraint
N_cluster = args.N_cluster
gamma = args.gamma
steps = [args.node_budget]
if dataset_name == 'PPI':
multilabel = True
# get train dataset
(x, y, edge_index, train_mask, val_mask, test_mask) = datasets.get_PPI(split='train')
num_features = x.shape[1]
num_classes = y.shape[1]
# get initial label distribution
Y_dist_before = utility.get_label_distribution_tensor(y[train_mask], multilabel)
# construct train graph
G_train = utility.construct_graph(x, y, edge_index, train_mask, val_mask, test_mask)
# get val dataset
(x, y, edge_index, train_mask, val_mask, test_mask) = datasets.get_PPI(split='val')
# construct val graph
G_val = utility.construct_graph(x, y, edge_index, train_mask, val_mask, test_mask)
# get test dataset
(x, y, edge_index, train_mask, val_mask, test_mask) = datasets.get_PPI(split='test')
# construct test graph
G_test = utility.construct_graph(x, y, edge_index, train_mask, val_mask, test_mask)
# contract training graph
G_train = contraction.contract_graph(G_train, centrality = centrality,
constraint = constraint,
num_features = num_features,
num_classes = num_classes,
N_cluster = N_cluster, gamma = gamma,
steps = steps, multilabel = multilabel)
# combine train, val, and test to single graph
G_val = nx.convert_node_labels_to_integers(G_val, first_label=G_train.number_of_nodes(), ordering='default')
G_test = nx.convert_node_labels_to_integers(G_test, first_label=G_train.number_of_nodes() + G_val.number_of_nodes(), ordering='default')
G = nx.compose(G_train, G_val)
G = nx.compose(G, G_test)
else:
multilabel = False
if dataset_name == 'OrganC':
(x, y, edge_index, train_mask, val_mask, test_mask) = datasets.get_Organ(view='C')
elif dataset_name == 'OrganS':
(x, y, edge_index, train_mask, val_mask, test_mask) = datasets.get_Organ(view='S')
elif dataset_name == 'Flickr':
(x, y, edge_index, train_mask, val_mask, test_mask) = datasets.get_Flickr()
num_features = x.shape[1]
num_classes = max(y) + 1
# construct networkx graph
G = utility.construct_graph(x, y, edge_index, train_mask, val_mask,
test_mask)
# contract graph
G = contraction.contract_graph(G, centrality = centrality,
constraint = constraint,
num_features = num_features,
num_classes = num_classes,
N_cluster = N_cluster, gamma = gamma,
steps = steps, multilabel = multilabel)
# Reordering the node key to train -> val -> test
reorder_map = {}
curr_id = 0
for node in G.nodes(data=True):
if bool(node[1]['train']) == True:
reorder_map[node[0]] = curr_id
curr_id += 1
for node in G.nodes(data=True):
if bool(node[1]['val']) == True:
reorder_map[node[0]] = curr_id
curr_id += 1
for node in G.nodes(data=True):
if bool(node[1]['test']) == True:
reorder_map[node[0]] = curr_id
curr_id += 1
G = nx.relabel_nodes(G, reorder_map)
# Generate Cluster-GCN Data Format
x = np.empty((G.number_of_nodes(), num_features))
y = {}
new_id_map = {}
new_G = nx.Graph()
for node in G.nodes(data=True):
x[node[0],:] = node[1]['x']
y[f"{node[0]}"] = node[1]['y'].astype(int).tolist()
new_id_map[f"{node[0]}"] = node[0]
new_G.add_node(node[0])
new_G.nodes[node[0]]['val'] = bool(node[1]['val'])
new_G.nodes[node[0]]['test'] = bool(node[1]['test'])
new_G.add_edges_from(G.edges())
path = "OtherBenchmarks/Cluster-GCN/cluster_gcn/data/"
suffix = dataset_name + '_' + centrality
# create directory
if not os.path.exists(path + suffix):
os.makedirs(path + suffix)
# saving class map JSON
file = open(path + suffix + '/' + suffix + '-class_map.json','w')
json.dump(y,file)
file.close()
# saving id map JSON
file = open(path + suffix + '/' + suffix + '-id_map.json','w')
json.dump(new_id_map,file)
file.close()
# saving JSON graph file
file = open(path + suffix + '/' + suffix + '-G.json', 'w')
json.dump(json_graph.node_link_data(new_G),file)
file.close()
# saving fature npy file
np.save(path + suffix + '/' + suffix + '-feats.npy',x)