Skip to content

Commit fa124f7

Browse files
authored
Update tl.py
1 parent aca56af commit fa124f7

File tree

1 file changed

+35
-33
lines changed

1 file changed

+35
-33
lines changed

SC2Spa/tl.py

Lines changed: 35 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -16,11 +16,14 @@
1616

1717
from IPython.display import clear_output
1818

19-
import keras.backend as K
19+
from tensorflow.keras import backend as K
2020
from . import pp
2121

2222
def rmse(y_true, y_pred):
23+
y_true = K.cast(y_true, dtype='float32') # float32로 변환
24+
y_pred = K.cast(y_pred, dtype='float32') # float32로 변환
2325
return K.sqrt(K.mean(K.square(y_pred - y_true), axis=-1))
26+
2427
def calc_rmse(y_true, y_pred):
2528
return np.sqrt(np.square(y_true - y_pred).mean())
2629

@@ -154,7 +157,8 @@ def CrossValidation(X: np.array, Y: np.array, train_indices, test_indices,
154157

155158
length = Y.shape[1]
156159

157-
earlystopper = EarlyStopping(patience=ES_patience, verbose=1)
160+
earlystopper = EarlyStopping(monitor='rmse', patience=ES_patience, mode='min',verbose=1 )
161+
158162

159163
# Set a learning rate annealer
160164
learning_rate_reduction = ReduceLROnPlateau(monitor='val_' + loss_name,
@@ -184,11 +188,12 @@ def CrossValidation(X: np.array, Y: np.array, train_indices, test_indices,
184188
shuffle = True
185189
else:
186190
shuffle = False
187-
history = model.fit_generator(generator = batch_generator(X, Y, cv_train,
188-
batch_size, shuffle),
189-
epochs = epoch, steps_per_epoch = len(cv_train) / batch_size,
190-
validation_data = (X_test, Y[cv_test, :]),
191-
callbacks = [learning_rate_reduction, earlystopper])
191+
history = model.fit(
192+
batch_generator(X, Y, np.arange(X.shape[0]), batch_size, shuffle),
193+
epochs=epoch,
194+
steps_per_epoch=X.shape[0] // batch_size,
195+
callbacks=[learning_rate_reduction, earlystopper]
196+
)
192197

193198
train_pred = model.predict(X_train)
194199
test_pred = model.predict(X_test)
@@ -240,7 +245,7 @@ def Train(X, Y, root='Model_SI/', name = 'SI',
240245

241246
length = Y.shape[1]
242247

243-
earlystopper = EarlyStopping(monitor=loss_name, patience=ES_patience, verbose=1)
248+
earlystopper = EarlyStopping(monitor=loss_name, patience=ES_patience, verbose=1,mode= 'min')
244249

245250
# Set a learning rate annealer
246251
learning_rate_reduction = ReduceLROnPlateau(monitor=loss_name,
@@ -257,19 +262,20 @@ def Train(X, Y, root='Model_SI/', name = 'SI',
257262
shuffle = True
258263
else:
259264
shuffle = False
260-
model.fit_generator(generator = batch_generator(X,\
261-
Y,\
262-
np.arange(X.shape[0]),\
263-
batch_size, shuffle),\
264-
epochs = epoch,\
265-
steps_per_epoch = X.shape[0] / batch_size,\
266-
callbacks = [learning_rate_reduction, earlystopper])
265+
266+
model.fit(
267+
batch_generator(X, Y, np.arange(X.shape[0]), batch_size, shuffle),
268+
epochs=epoch,
269+
steps_per_epoch=X.shape[0] // batch_size,
270+
callbacks=[learning_rate_reduction, earlystopper]
271+
)
267272

268273
if(save):
269274
if not os.path.exists(root):
270275
os.makedirs(root)
271276
model.save(root + name + '.h5')
272277

278+
273279
return model
274280

275281
def WassersteinD(adata_ref: anndata.AnnData, adata_query: anndata.AnnData,
@@ -1021,7 +1027,7 @@ def CV_LR(X:np.array, Y:np.array, train_indices:list, test_indices:list,
10211027
'''
10221028
length = Y.shape[1]
10231029

1024-
earlystopper = EarlyStopping(patience=50, verbose=1)
1030+
earlystopper = EarlyStopping(patience=50, verbose=1, mode='min')
10251031

10261032
# Set a learning rate annealer
10271033
learning_rate_reduction = ReduceLROnPlateau(monitor='val_rmse',
@@ -1048,14 +1054,12 @@ def CV_LR(X:np.array, Y:np.array, train_indices:list, test_indices:list,
10481054
model = LR((X.shape[1],), length, l1_reg = l1_reg, l2_reg = l2_reg, dropout = dropout)
10491055
model.compile(optimizer = 'adam', loss = rmse, metrics = [rmse])
10501056

1051-
history = model.fit_generator(generator = batch_generator(X,\
1052-
Y,\
1053-
cv_train,\
1054-
batch_size, True),\
1055-
epochs = epoch,\
1056-
steps_per_epoch = len(cv_train) / batch_size,\
1057-
validation_data = (X_test, Y[cv_test, :]),\
1058-
callbacks = [learning_rate_reduction, earlystopper])
1057+
history = model.fit(
1058+
batch_generator(X, Y, np.arange(X.shape[0]), batch_size, shuffle),
1059+
epochs=epoch,
1060+
steps_per_epoch=X.shape[0] // batch_size,
1061+
callbacks=[learning_rate_reduction, earlystopper]
1062+
)
10591063

10601064
train_pred = model.predict(X_train)
10611065
test_pred = model.predict(X_test)
@@ -1119,7 +1123,7 @@ def Train_transfer(adata, root, model_root, sparse = True, polar = True, CT = 'A
11191123
# Extract values
11201124
X, Y, Y_ref, RTheta_ref = ExtractXY(adata=adata, sparse=sparse, polar=polar)
11211125

1122-
earlystopper = EarlyStopping(monitor=loss_name, patience=ES_patience, verbose=1)
1126+
earlystopper = EarlyStopping(monitor=loss_name, patience=ES_patience, verbose=1,mode= 'min')
11231127

11241128
# Set a learning rate annealer
11251129
learning_rate_reduction = ReduceLROnPlateau(monitor=loss_name,
@@ -1133,13 +1137,12 @@ def Train_transfer(adata, root, model_root, sparse = True, polar = True, CT = 'A
11331137
for layer in model.layers[:-NLFT]:
11341138
layer.trainable = False
11351139
model.compile(optimizer = 'adam', loss = loss, metrics = [loss])
1136-
model.fit_generator(generator = batch_generator(X,\
1137-
Y,\
1138-
np.arange(X.shape[0]),\
1139-
batch_size, True),\
1140-
epochs = epoch,\
1141-
steps_per_epoch = X.shape[0] / batch_size,\
1142-
callbacks = [learning_rate_reduction, earlystopper])
1140+
model.fit(
1141+
batch_generator(X, Y, np.arange(X.shape[0]), batch_size, shuffle),
1142+
epochs=epoch,
1143+
steps_per_epoch=X.shape[0] // batch_size,
1144+
callbacks=[learning_rate_reduction, earlystopper]
1145+
)
11431146
model.save(root + 'SI_' + CT + '.h5')
11441147

11451148
return model
@@ -1221,4 +1224,3 @@ def CheckAccuracy(name:str, item_name = 'rmse'):
12211224
print('Validation', item_name)
12221225
print(accuracy)
12231226
print(np.mean(accuracy))
1224-

0 commit comments

Comments
 (0)