๋ฌธ์ ์์ธ
callback ํจ์๋ฅผ ์ฌ์ฉํด ๋ชจ๋ธ์ ์ ์ฅํ๋ ค ํ๋๋ฐ ์๋ฌ๊ฐ ๋ฐ์ํ์๋ค.
def training(self, fold_var, dataset,epochs,batch_size,train_index,test_index, X_data, Y_data, x_data, y_data):
self.epochs = epochs
self.batch_size = batch_size
self.dataset = dataset
self.X_data = X_data
self.Y_data = Y_data
self.x_data = x_data
self.y_data = y_data
### Cross Validation (CV)
es = EarlyStopping(monitor='val_loss', mode='min', verbose=1, patience=100)
self.fold_var = fold_var
Path("./models/"+self.dataset+"_models(s="+str(self.MAX_SENTENCES)+"w="+str(self.MAX_SENTENCE_LENGTH)+")").mkdir(parents=True, exist_ok=True)
save_folder = "./models/"+self.dataset+"_models(s="+str(self.MAX_SENTENCES)+"w="+str(self.MAX_SENTENCE_LENGTH)+")"
if not os.path.isdir(save_folder):
os.mkdir(save_folder)
self.model_path = os.path.join(save_folder, "model.h5")
train_val_X,test_X= self.X_data[train_index],self.X_data[test_index]
train_val_Y,test_Y=self.y_data[train_index],self.y_data[test_index]
self.nb_classes = len(set(train_val_Y))
train_val_Y = to_categorical(train_val_Y, self.nb_classes)
self.train_X, self.val_X, self.train_Y, self.val_Y= train_test_split(train_val_X, train_val_Y,
test_size=0.1111,
random_state=42)
# self.embedding_matrix = self.load_embedding('word2vec')#์ถ๊ฐ
self.model, self.attention_extractor = self.HAN_layer(
attention_dim=100,
rnn_dim=50,
include_dense_batch_normalization=False,
include_dense_dropout=True,
nb_dense=1,
dense_dim=300,
dense_dropout=0.2)
checkpointer = ModelCheckpoint(filepath=save_folder+self.get_model_name(self.fold_var),
monitor='val_loss',
verbose=True,
save_best_only=True,
mode='min')
#========callback ์ฌ์ฉ ๋ถ๋ถ============
self.history = self.model.fit(x=[self.train_X],
y=[self.train_Y],
batch_size=self.batch_size,
epochs=self.epochs,
verbose=True,
validation_data=(self.val_X, self.val_Y),
callbacks=[es,checkpointer]
)
self.model.load_weights(save_folder+"/model_"+str(self.fold_var)+".h5")
length = len(test_Y)
y_true = test_Y
y_pred = []
y_predict = self.model.predict(test_X)
ํด๊ฒฐ ๋ฐฉ์
def get_config(self) ๋ฉ์๋๋ฅผ ์ถ๊ฐํ์ฌ ์
๋ฐ์ดํธํ ํ๋ผ๋ฏธํฐ๋ค์ ๋ฃ์ด์ฃผ๋ฉด ๋๋ค.
get_config๋ layer๋ฅผ ๋ค์ ์ฌ์ฉํ ์ ์๊ฒ ์ ์๋ฅผ ํด์ฃผ๋ ์ญํ ์ ํ๋ค.
class Hierarchical_attention_networks():
def __init__(self,lr,op,MAX_SENTENCES,MAX_SENTENCE_LENGTH,embedding_dim, max_nb_words, tokenizer,number_of_class):
self.MAX_SENTENCES = MAX_SENTENCES
self.MAX_SENTENCE_LENGTH = MAX_SENTENCE_LENGTH
self.tokenizer = tokenizer
self.embedding_dim = embedding_dim
self.max_nb_words = max_nb_words
self.embedding_matrix = self.load_embedding('word2vec')
self.nb_classes = number_of_class
self.lr = lr
if op == 'Adagrad':
self.optimizer = keras.optimizers.Adagrad(lr=self.lr, epsilon=1e-6)
elif op =='Adadelta':
self.optimizer = keras.optimizers.Adadelta(lr=self.lr, epsilon=1e-6)
elif op == 'Adam':
self.optimizer = keras.optimizers.Adam(lr=self.lr)
elif op == 'RMSprop':
self.optimizer = keras.optimizers.Adadelta(lr=self.lr,rho=0.9, epsilon=1e-6)
#========์ด ๋ถ๋ถ! ============
def get_config(self):
config = super().get_config()
config.update({
'MAX_SENTENCES':self.MAX_SENTENCES,
'MAX_SENTENCE_LENGTH':self.MAX_SENTENCE_LENGTH,
'embedding_dim':self.embedding_dim,
'max_nb_words':self.max_nb_words,
'tokenizer':self.tokenizer,
})
return config
์ฐธ๊ณ
'TIL > Linux' ์นดํ ๊ณ ๋ฆฌ์ ๋ค๋ฅธ ๊ธ
[Ubuntu] Python ๋ฒ์ ๋ณ๊ฒฝ (0) | 2022.05.16 |
---|---|
๋ฐฑ๊ทธ๋ผ์ด๋์์ ์ฃผํผํฐ ์๋ฒ ์คํ (0) | 2022.05.16 |
SyntaxError: Non-ASCII character '\xeb' in file solution1.py on line 1, but no encoding declared; see http://python.org/dev/peps/pep-0263/ for details (0) | 2022.01.17 |
[Ubuntu] KoNLPy ์ค์น (0) | 2022.01.03 |
[Ubuntu] Ubuntu ๋ฒ์ ํ์ธ (0) | 2021.12.26 |