Skip to content
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.

Commit be7835d

Browse files
committedApr 30, 2019
solve save model bug and add gcn
1 parent 391a39a commit be7835d

File tree

13 files changed

+754
-3
lines changed

13 files changed

+754
-3
lines changed
 

Diff for: ‎.gitignore

+117-1
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,120 @@
1-
21
data
32
.idea
43
tmp
4+
5+
# Byte-compiled / optimized / DLL files
6+
__pycache__/
7+
*.py[cod]
8+
*$py.class
9+
10+
# C extensions
11+
*.so
12+
13+
# Distribution / packaging
14+
.Python
15+
build/
16+
develop-eggs/
17+
dist/
18+
downloads/
19+
eggs/
20+
.eggs/
21+
lib/
22+
lib64/
23+
parts/
24+
sdist/
25+
var/
26+
wheels/
27+
pip-wheel-metadata/
28+
share/python-wheels/
29+
*.egg-info/
30+
.installed.cfg
31+
*.egg
32+
MANIFEST
33+
34+
# PyInstaller
35+
# Usually these files are written by a python script from a template
36+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
37+
*.manifest
38+
*.spec
39+
40+
# Installer logs
41+
pip-log.txt
42+
pip-delete-this-directory.txt
43+
44+
# Unit test / coverage reports
45+
htmlcov/
46+
.tox/
47+
.nox/
48+
.coverage
49+
.coverage.*
50+
.cache
51+
nosetests.xml
52+
coverage.xml
53+
*.cover
54+
.hypothesis/
55+
.pytest_cache/
56+
57+
# Translations
58+
*.mo
59+
*.pot
60+
61+
# Django stuff:
62+
*.log
63+
local_settings.py
64+
db.sqlite3
65+
66+
# Flask stuff:
67+
instance/
68+
.webassets-cache
69+
70+
# Scrapy stuff:
71+
.scrapy
72+
73+
# Sphinx documentation
74+
docs/_build/
75+
76+
# PyBuilder
77+
target/
78+
79+
# Jupyter Notebook
80+
.ipynb_checkpoints
81+
82+
# IPython
83+
profile_default/
84+
ipython_config.py
85+
86+
# pyenv
87+
.python-version
88+
89+
# celery beat schedule file
90+
celerybeat-schedule
91+
92+
# SageMath parsed files
93+
*.sage.py
94+
95+
# Environments
96+
.env
97+
.venv
98+
env/
99+
venv/
100+
ENV/
101+
env.bak/
102+
venv.bak/
103+
104+
# Spyder project settings
105+
.spyderproject
106+
.spyproject
107+
108+
# Rope project settings
109+
.ropeproject
110+
111+
# mkdocs documentation
112+
/site
113+
114+
# mypy
115+
.mypy_cache/
116+
.dmypy.json
117+
dmypy.json
118+
119+
# Pyre type checker
120+
.pyre/

Diff for: ‎lesson28-GCN/README.MD

+19
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,19 @@
1+
# Graph Convolution Network for TF2
2+
3+
GCN implementation for paper: [Semi-Supervised Classification with Graph Convolutional Networks](https://arxiv.org/pdf/1609.02907.pdf)
4+
5+
# Benchmark
6+
7+
| dataset | Citeseea | Cora | Pubmed | NELL |
8+
|---------------|----------|------|--------|------|
9+
| GCN(official) | 70.3 | 81.5 | 79.0 | 66.0 |
10+
| This repo. | | 81.8 | 78.9 | |
11+
12+
# HOWTO
13+
```
14+
python train.py
15+
```
16+
17+
# Screenshot
18+
19+
![](res/screen.png)

Diff for: ‎lesson28-GCN/config.py

+16
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,16 @@
1+
import argparse
2+
3+
args = argparse.ArgumentParser()
4+
args.add_argument('--dataset', default='cora')
5+
args.add_argument('--model', default='gcn')
6+
args.add_argument('--learning_rate', default=0.01)
7+
args.add_argument('--epochs', default=200)
8+
args.add_argument('--hidden1', default=16)
9+
args.add_argument('--dropout', default=0.5)
10+
args.add_argument('--weight_decay', default=5e-4)
11+
args.add_argument('--early_stopping', default=10)
12+
args.add_argument('--max_degree', default=3)
13+
14+
15+
args = args.parse_args()
16+
print(args)

Diff for: ‎lesson28-GCN/inits.py

+27
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,27 @@
1+
import tensorflow as tf
2+
import numpy as np
3+
4+
5+
def uniform(shape, scale=0.05, name=None):
6+
"""Uniform init."""
7+
initial = tf.random.uniform(shape, minval=-scale, maxval=scale, dtype=tf.float32)
8+
return tf.Variable(initial, name=name)
9+
10+
11+
def glorot(shape, name=None):
12+
"""Glorot & Bengio (AISTATS 2010) init."""
13+
init_range = np.sqrt(6.0/(shape[0]+shape[1]))
14+
initial = tf.random.uniform(shape, minval=-init_range, maxval=init_range, dtype=tf.float32)
15+
return tf.Variable(initial, name=name)
16+
17+
18+
def zeros(shape, name=None):
19+
"""All zeros."""
20+
initial = tf.zeros(shape, dtype=tf.float32)
21+
return tf.Variable(initial, name=name)
22+
23+
24+
def ones(shape, name=None):
25+
"""All ones."""
26+
initial = tf.ones(shape, dtype=tf.float32)
27+
return tf.Variable(initial, name=name)

Diff for: ‎lesson28-GCN/layers.py

+154
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,154 @@
1+
from inits import *
2+
import tensorflow as tf
3+
from tensorflow import keras
4+
from tensorflow.keras import layers
5+
from config import args
6+
7+
8+
9+
10+
# global unique layer ID dictionary for layer name assignment
11+
_LAYER_UIDS = {}
12+
13+
14+
def get_layer_uid(layer_name=''):
15+
"""Helper function, assigns unique layer IDs."""
16+
if layer_name not in _LAYER_UIDS:
17+
_LAYER_UIDS[layer_name] = 1
18+
return 1
19+
else:
20+
_LAYER_UIDS[layer_name] += 1
21+
return _LAYER_UIDS[layer_name]
22+
23+
24+
def sparse_dropout(x, rate, noise_shape):
25+
"""
26+
Dropout for sparse tensors.
27+
"""
28+
random_tensor = 1 - rate
29+
random_tensor += tf.random.uniform(noise_shape)
30+
dropout_mask = tf.cast(tf.floor(random_tensor), dtype=tf.bool)
31+
pre_out = tf.sparse.retain(x, dropout_mask)
32+
return pre_out * (1./(1 - rate))
33+
34+
35+
def dot(x, y, sparse=False):
36+
"""
37+
Wrapper for tf.matmul (sparse vs dense).
38+
"""
39+
if sparse:
40+
res = tf.sparse.sparse_dense_matmul(x, y)
41+
else:
42+
res = tf.matmul(x, y)
43+
return res
44+
45+
46+
47+
48+
class Dense(layers.Layer):
49+
"""Dense layer."""
50+
def __init__(self, input_dim, output_dim, placeholders, dropout=0., sparse_inputs=False,
51+
act=tf.nn.relu, bias=False, featureless=False, **kwargs):
52+
super(Dense, self).__init__(**kwargs)
53+
54+
if dropout:
55+
self.dropout = placeholders['dropout']
56+
else:
57+
self.dropout = 0.
58+
59+
self.act = act
60+
self.sparse_inputs = sparse_inputs
61+
self.featureless = featureless
62+
self.bias = bias
63+
64+
# helper variable for sparse dropout
65+
self.num_features_nonzero = placeholders['num_features_nonzero']
66+
67+
with tf.variable_scope(self.name + '_vars'):
68+
self.vars['weights'] = glorot([input_dim, output_dim],
69+
name='weights')
70+
if self.bias:
71+
self.vars['bias'] = zeros([output_dim], name='bias')
72+
73+
if self.logging:
74+
self._log_vars()
75+
76+
def _call(self, inputs):
77+
x = inputs
78+
79+
# dropout
80+
if self.sparse_inputs:
81+
x = sparse_dropout(x, 1-self.dropout, self.num_features_nonzero)
82+
else:
83+
x = tf.nn.dropout(x, 1-self.dropout)
84+
85+
# transform
86+
output = dot(x, self.vars['weights'], sparse=self.sparse_inputs)
87+
88+
# bias
89+
if self.bias:
90+
output += self.vars['bias']
91+
92+
return self.act(output)
93+
94+
95+
class GraphConvolution(layers.Layer):
96+
"""
97+
Graph convolution layer.
98+
"""
99+
def __init__(self, input_dim, output_dim, num_features_nonzero,
100+
dropout=0.,
101+
is_sparse_inputs=False,
102+
activation=tf.nn.relu,
103+
bias=False,
104+
featureless=False, **kwargs):
105+
super(GraphConvolution, self).__init__(**kwargs)
106+
107+
self.dropout = dropout
108+
self.activation = activation
109+
self.is_sparse_inputs = is_sparse_inputs
110+
self.featureless = featureless
111+
self.bias = bias
112+
self.num_features_nonzero = num_features_nonzero
113+
114+
self.weights_ = []
115+
for i in range(1):
116+
w = self.add_variable('weight' + str(i), [input_dim, output_dim])
117+
self.weights_.append(w)
118+
if self.bias:
119+
self.bias = self.add_variable('bias', [output_dim])
120+
121+
122+
# for p in self.trainable_variables:
123+
# print(p.name, p.shape)
124+
125+
126+
127+
def call(self, inputs, training=None):
128+
x, support_ = inputs
129+
130+
# dropout
131+
if training is not False and self.is_sparse_inputs:
132+
x = sparse_dropout(x, self.dropout, self.num_features_nonzero)
133+
elif training is not False:
134+
x = tf.nn.dropout(x, self.dropout)
135+
136+
137+
# convolve
138+
supports = list()
139+
for i in range(len(support_)):
140+
if not self.featureless: # if it has features x
141+
pre_sup = dot(x, self.weights_[i], sparse=self.is_sparse_inputs)
142+
else:
143+
pre_sup = self.weights_[i]
144+
145+
support = dot(support_[i], pre_sup, sparse=True)
146+
supports.append(support)
147+
148+
output = tf.add_n(supports)
149+
150+
# bias
151+
if self.bias:
152+
output += self.bias
153+
154+
return self.activation(output)

Diff for: ‎lesson28-GCN/metrics.py

+24
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,24 @@
1+
import tensorflow as tf
2+
3+
4+
def masked_softmax_cross_entropy(preds, labels, mask):
5+
"""
6+
Softmax cross-entropy loss with masking.
7+
"""
8+
loss = tf.nn.softmax_cross_entropy_with_logits(logits=preds, labels=labels)
9+
mask = tf.cast(mask, dtype=tf.float32)
10+
mask /= tf.reduce_mean(mask)
11+
loss *= mask
12+
return tf.reduce_mean(loss)
13+
14+
15+
def masked_accuracy(preds, labels, mask):
16+
"""
17+
Accuracy with masking.
18+
"""
19+
correct_prediction = tf.equal(tf.argmax(preds, 1), tf.argmax(labels, 1))
20+
accuracy_all = tf.cast(correct_prediction, tf.float32)
21+
mask = tf.cast(mask, dtype=tf.float32)
22+
mask /= tf.reduce_mean(mask)
23+
accuracy_all *= mask
24+
return tf.reduce_mean(accuracy_all)

Diff for: ‎lesson28-GCN/models.py

+123
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,123 @@
1+
import tensorflow as tf
2+
from tensorflow import keras
3+
from layers import *
4+
from metrics import *
5+
from config import args
6+
7+
8+
9+
10+
11+
class MLP(keras.Model):
12+
def __init__(self, placeholders, input_dim, **kwargs):
13+
super(MLP, self).__init__(**kwargs)
14+
15+
self.inputs = placeholders['features']
16+
self.input_dim = input_dim
17+
# self.input_dim = self.inputs.get_shape().as_list()[1] # To be supported in future Tensorflow versions
18+
self.output_dim = placeholders['labels'].get_shape().as_list()[1]
19+
self.placeholders = placeholders
20+
21+
self.optimizer = tf.train.AdamOptimizer(learning_rate=args.learning_rate)
22+
23+
self.build()
24+
25+
def _loss(self):
26+
# Weight decay loss
27+
for var in self.layers[0].vars.values():
28+
self.loss += args.weight_decay * tf.nn.l2_loss(var)
29+
30+
# Cross entropy error
31+
self.loss += masked_softmax_cross_entropy(self.outputs, self.placeholders['labels'],
32+
self.placeholders['labels_mask'])
33+
34+
def _accuracy(self):
35+
self.accuracy = masked_accuracy(self.outputs, self.placeholders['labels'],
36+
self.placeholders['labels_mask'])
37+
38+
def _build(self):
39+
self.layers.append(Dense(input_dim=self.input_dim,
40+
output_dim=args.hidden1,
41+
placeholders=self.placeholders,
42+
act=tf.nn.relu,
43+
dropout=True,
44+
sparse_inputs=True,
45+
logging=self.logging))
46+
47+
self.layers.append(Dense(input_dim=args.hidden1,
48+
output_dim=self.output_dim,
49+
placeholders=self.placeholders,
50+
act=lambda x: x,
51+
dropout=True,
52+
logging=self.logging))
53+
54+
def predict(self):
55+
return tf.nn.softmax(self.outputs)
56+
57+
58+
class GCN(keras.Model):
59+
60+
def __init__(self, input_dim, output_dim, num_features_nonzero, **kwargs):
61+
super(GCN, self).__init__(**kwargs)
62+
63+
self.input_dim = input_dim # 1433
64+
self.output_dim = output_dim
65+
66+
print('input dim:', input_dim)
67+
print('output dim:', output_dim)
68+
print('num_features_nonzero:', num_features_nonzero)
69+
70+
self.layers_ = []
71+
self.layers_.append(GraphConvolution(input_dim=self.input_dim, # 1433
72+
output_dim=args.hidden1, # 16
73+
num_features_nonzero=num_features_nonzero,
74+
activation=tf.nn.relu,
75+
dropout=args.dropout,
76+
is_sparse_inputs=True))
77+
78+
79+
80+
81+
82+
self.layers_.append(GraphConvolution(input_dim=args.hidden1, # 16
83+
output_dim=self.output_dim, # 7
84+
num_features_nonzero=num_features_nonzero,
85+
activation=lambda x: x,
86+
dropout=args.dropout))
87+
88+
89+
for p in self.trainable_variables:
90+
print(p.name, p.shape)
91+
92+
def call(self, inputs, training=None):
93+
"""
94+
95+
:param inputs:
96+
:param training:
97+
:return:
98+
"""
99+
x, label, mask, support = inputs
100+
101+
outputs = [x]
102+
103+
for layer in self.layers:
104+
hidden = layer((outputs[-1], support), training)
105+
outputs.append(hidden)
106+
output = outputs[-1]
107+
108+
# # Weight decay loss
109+
loss = tf.zeros([])
110+
for var in self.layers_[0].trainable_variables:
111+
loss += args.weight_decay * tf.nn.l2_loss(var)
112+
113+
# Cross entropy error
114+
loss += masked_softmax_cross_entropy(output, label, mask)
115+
116+
acc = masked_accuracy(output, label, mask)
117+
118+
return loss, acc
119+
120+
121+
122+
def predict(self):
123+
return tf.nn.softmax(self.outputs)

Diff for: ‎lesson28-GCN/nohup.out

Whitespace-only changes.

Diff for: ‎lesson28-GCN/res/screen.png

168 KB
Loading

Diff for: ‎lesson28-GCN/train.py

+97
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,97 @@
1+
import time
2+
import tensorflow as tf
3+
from tensorflow.keras import optimizers
4+
5+
from utils import *
6+
from models import GCN, MLP
7+
from config import args
8+
9+
import os
10+
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
11+
print('tf version:', tf.__version__)
12+
assert tf.__version__.startswith('2.')
13+
14+
15+
16+
# set random seed
17+
seed = 123
18+
np.random.seed(seed)
19+
tf.random.set_seed(seed)
20+
21+
22+
23+
# load data
24+
adj, features, y_train, y_val, y_test, train_mask, val_mask, test_mask = load_data(args.dataset)
25+
print('adj:', adj.shape)
26+
print('features:', features.shape)
27+
print('y:', y_train.shape, y_val.shape, y_test.shape)
28+
print('mask:', train_mask.shape, val_mask.shape, test_mask.shape)
29+
30+
31+
32+
# D^-1@X
33+
features = preprocess_features(features) # [49216, 2], [49216], [2708, 1433]
34+
print('features coordinates::', features[0].shape)
35+
print('features data::', features[1].shape)
36+
print('features shape::', features[2])
37+
38+
if args.model == 'gcn':
39+
# D^-0.5 A D^-0.5
40+
support = [preprocess_adj(adj)]
41+
num_supports = 1
42+
model_func = GCN
43+
elif args.model == 'gcn_cheby':
44+
support = chebyshev_polynomials(adj, args.max_degree)
45+
num_supports = 1 + args.max_degree
46+
model_func = GCN
47+
elif args.model == 'dense':
48+
support = [preprocess_adj(adj)] # Not used
49+
num_supports = 1
50+
model_func = MLP
51+
else:
52+
raise ValueError('Invalid argument for model: ' + str(args.model))
53+
54+
55+
56+
# Create model
57+
model = GCN(input_dim=features[2][1], output_dim=y_train.shape[1], num_features_nonzero=features[1].shape) # [1433]
58+
59+
60+
61+
62+
train_label = tf.convert_to_tensor(y_train)
63+
train_mask = tf.convert_to_tensor(train_mask)
64+
val_label = tf.convert_to_tensor(y_val)
65+
val_mask = tf.convert_to_tensor(val_mask)
66+
test_label = tf.convert_to_tensor(y_test)
67+
test_mask = tf.convert_to_tensor(test_mask)
68+
features = tf.SparseTensor(*features)
69+
support = [tf.cast(tf.SparseTensor(*support[0]), dtype=tf.float32)]
70+
num_features_nonzero = features.values.shape
71+
dropout = args.dropout
72+
73+
74+
optimizer = optimizers.Adam(lr=1e-2)
75+
76+
77+
78+
for epoch in range(args.epochs):
79+
80+
with tf.GradientTape() as tape:
81+
loss, acc = model((features, train_label, train_mask,support))
82+
grads = tape.gradient(loss, model.trainable_variables)
83+
optimizer.apply_gradients(zip(grads, model.trainable_variables))
84+
85+
_, val_acc = model((features, val_label, val_mask, support), training=False)
86+
87+
88+
if epoch % 20 == 0:
89+
90+
print(epoch, float(loss), float(acc), '\tval:', float(val_acc))
91+
92+
93+
94+
test_loss, test_acc = model((features, test_label, test_mask, support), training=False)
95+
96+
97+
print('\ttest:', float(test_loss), float(test_acc))

Diff for: ‎lesson28-GCN/utils.py

+171
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,171 @@
1+
import numpy as np
2+
import pickle as pkl
3+
import networkx as nx
4+
import scipy.sparse as sp
5+
from scipy.sparse.linalg.eigen.arpack import eigsh
6+
import sys
7+
8+
9+
def parse_index_file(filename):
10+
"""
11+
Parse index file.
12+
"""
13+
index = []
14+
for line in open(filename):
15+
index.append(int(line.strip()))
16+
return index
17+
18+
19+
def sample_mask(idx, l):
20+
"""
21+
Create mask.
22+
"""
23+
mask = np.zeros(l)
24+
mask[idx] = 1
25+
return np.array(mask, dtype=np.bool)
26+
27+
28+
def load_data(dataset_str):
29+
"""
30+
Loads input data from gcn/data directory
31+
32+
ind.dataset_str.x => the feature vectors of the training instances as scipy.sparse.csr.csr_matrix object;
33+
ind.dataset_str.tx => the feature vectors of the test instances as scipy.sparse.csr.csr_matrix object;
34+
ind.dataset_str.allx => the feature vectors of both labeled and unlabeled training instances
35+
(a superset of ind.dataset_str.x) as scipy.sparse.csr.csr_matrix object;
36+
ind.dataset_str.y => the one-hot labels of the labeled training instances as numpy.ndarray object;
37+
ind.dataset_str.ty => the one-hot labels of the test instances as numpy.ndarray object;
38+
ind.dataset_str.ally => the labels for instances in ind.dataset_str.allx as numpy.ndarray object;
39+
ind.dataset_str.graph => a dict in the format {index: [index_of_neighbor_nodes]} as collections.defaultdict
40+
object;
41+
ind.dataset_str.test.index => the indices of test instances in graph, for the inductive setting as list object.
42+
43+
All objects above must be saved using python pickle module.
44+
45+
:param dataset_str: Dataset name
46+
:return: All data input files loaded (as well the training/test data).
47+
"""
48+
names = ['x', 'y', 'tx', 'ty', 'allx', 'ally', 'graph']
49+
objects = []
50+
for i in range(len(names)):
51+
with open("data/ind.{}.{}".format(dataset_str, names[i]), 'rb') as f:
52+
if sys.version_info > (3, 0):
53+
objects.append(pkl.load(f, encoding='latin1'))
54+
else:
55+
objects.append(pkl.load(f))
56+
57+
x, y, tx, ty, allx, ally, graph = tuple(objects)
58+
test_idx_reorder = parse_index_file("data/ind.{}.test.index".format(dataset_str))
59+
test_idx_range = np.sort(test_idx_reorder)
60+
61+
if dataset_str == 'citeseer':
62+
# Fix citeseer dataset (there are some isolated nodes in the graph)
63+
# Find isolated nodes, add them as zero-vecs into the right position
64+
test_idx_range_full = range(min(test_idx_reorder), max(test_idx_reorder)+1)
65+
tx_extended = sp.lil_matrix((len(test_idx_range_full), x.shape[1]))
66+
tx_extended[test_idx_range-min(test_idx_range), :] = tx
67+
tx = tx_extended
68+
ty_extended = np.zeros((len(test_idx_range_full), y.shape[1]))
69+
ty_extended[test_idx_range-min(test_idx_range), :] = ty
70+
ty = ty_extended
71+
72+
features = sp.vstack((allx, tx)).tolil()
73+
features[test_idx_reorder, :] = features[test_idx_range, :]
74+
adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph))
75+
76+
labels = np.vstack((ally, ty))
77+
labels[test_idx_reorder, :] = labels[test_idx_range, :]
78+
79+
idx_test = test_idx_range.tolist()
80+
idx_train = range(len(y))
81+
idx_val = range(len(y), len(y)+500)
82+
83+
train_mask = sample_mask(idx_train, labels.shape[0])
84+
val_mask = sample_mask(idx_val, labels.shape[0])
85+
test_mask = sample_mask(idx_test, labels.shape[0])
86+
87+
y_train = np.zeros(labels.shape)
88+
y_val = np.zeros(labels.shape)
89+
y_test = np.zeros(labels.shape)
90+
y_train[train_mask, :] = labels[train_mask, :]
91+
y_val[val_mask, :] = labels[val_mask, :]
92+
y_test[test_mask, :] = labels[test_mask, :]
93+
94+
return adj, features, y_train, y_val, y_test, train_mask, val_mask, test_mask
95+
96+
97+
def sparse_to_tuple(sparse_mx):
98+
"""
99+
Convert sparse matrix to tuple representation.
100+
"""
101+
def to_tuple(mx):
102+
if not sp.isspmatrix_coo(mx):
103+
mx = mx.tocoo()
104+
coords = np.vstack((mx.row, mx.col)).transpose()
105+
values = mx.data
106+
shape = mx.shape
107+
return coords, values, shape
108+
109+
if isinstance(sparse_mx, list):
110+
for i in range(len(sparse_mx)):
111+
sparse_mx[i] = to_tuple(sparse_mx[i])
112+
else:
113+
sparse_mx = to_tuple(sparse_mx)
114+
115+
return sparse_mx
116+
117+
118+
def preprocess_features(features):
119+
"""
120+
Row-normalize feature matrix and convert to tuple representation
121+
"""
122+
rowsum = np.array(features.sum(1)) # get sum of each row, [2708, 1]
123+
r_inv = np.power(rowsum, -1).flatten() # 1/rowsum, [2708]
124+
r_inv[np.isinf(r_inv)] = 0. # zero inf data
125+
r_mat_inv = sp.diags(r_inv) # sparse diagonal matrix, [2708, 2708]
126+
features = r_mat_inv.dot(features) # D^-1:[2708, 2708]@X:[2708, 2708]
127+
return sparse_to_tuple(features) # [coordinates, data, shape], []
128+
129+
130+
def normalize_adj(adj):
131+
"""Symmetrically normalize adjacency matrix."""
132+
adj = sp.coo_matrix(adj)
133+
rowsum = np.array(adj.sum(1)) # D
134+
d_inv_sqrt = np.power(rowsum, -0.5).flatten() # D^-0.5
135+
d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.
136+
d_mat_inv_sqrt = sp.diags(d_inv_sqrt) # D^-0.5
137+
return adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo() # D^-0.5AD^0.5
138+
139+
140+
def preprocess_adj(adj):
141+
"""Preprocessing of adjacency matrix for simple GCN model and conversion to tuple representation."""
142+
adj_normalized = normalize_adj(adj + sp.eye(adj.shape[0]))
143+
return sparse_to_tuple(adj_normalized)
144+
145+
146+
147+
148+
149+
def chebyshev_polynomials(adj, k):
150+
"""
151+
Calculate Chebyshev polynomials up to order k. Return a list of sparse matrices (tuple representation).
152+
"""
153+
print("Calculating Chebyshev polynomials up to order {}...".format(k))
154+
155+
adj_normalized = normalize_adj(adj)
156+
laplacian = sp.eye(adj.shape[0]) - adj_normalized
157+
largest_eigval, _ = eigsh(laplacian, 1, which='LM')
158+
scaled_laplacian = (2. / largest_eigval[0]) * laplacian - sp.eye(adj.shape[0])
159+
160+
t_k = list()
161+
t_k.append(sp.eye(adj.shape[0]))
162+
t_k.append(scaled_laplacian)
163+
164+
def chebyshev_recurrence(t_k_minus_one, t_k_minus_two, scaled_lap):
165+
s_lap = sp.csr_matrix(scaled_lap, copy=True)
166+
return 2 * s_lap.dot(t_k_minus_one) - t_k_minus_two
167+
168+
for i in range(2, k+1):
169+
t_k.append(chebyshev_recurrence(t_k[-1], t_k[-2], scaled_laplacian))
170+
171+
return sparse_to_tuple(t_k)
Binary file not shown.

Diff for: ‎深度学习与TensorFlow入门实战-源码和PPT/lesson31-Keras模型保存与加载/save_load_model.py

+6-2
Original file line numberDiff line numberDiff line change
@@ -52,8 +52,12 @@ def preprocess(x, y):
5252
print('saved total model.')
5353
del network
5454

55-
print('load model from file')
56-
network = tf.keras.models.load_model('model.h5')
55+
print('loaded model from file.')
56+
network = tf.keras.models.load_model('model.h5', compile=False)
57+
network.compile(optimizer=optimizers.Adam(lr=0.01),
58+
loss=tf.losses.CategoricalCrossentropy(from_logits=True),
59+
metrics=['accuracy']
60+
)
5761
x_val = tf.cast(x_val, dtype=tf.float32) / 255.
5862
x_val = tf.reshape(x_val, [-1, 28*28])
5963
y_val = tf.cast(y_val, dtype=tf.int32)

0 commit comments

Comments
 (0)
Please sign in to comment.