Keras - Embedding

  • 사전 훈련된 단어 임베딩 알고리즘
  1. Word2Vec : https://code.google.com/archive/p/word2vec
  2. GloVe : https://nlp.stanford.edu/projects/glove
  • 단어의 동시 출현 통계를 기록한 행렬을 분해하는 기법
#데이터 준비import os, glob
imdb_dir = "data/aclImdb"train_dir = os.path.join(imdb_dir, "train")

labels = []
texts = []

for label_type in ['neg', 'pos']:    fnames = glob.glob(os.path.join(train_dir, label_type, "*.txt"))
    for fname in fnames :        with open(fname, encoding="utf8") as f :            texts.append(f.read())
        labels.append(0 if label_type == 'neg' else 1)

#데이터토큰화from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
import numpy as np

maxlen = 100 #100개 단어 이후는 버림training_samples = 200 #훈련샘플수validation_samples = 10000 # 검증 샘플수max_words = 10000 #데이터셋에서 가장 빈도 높은 1만개 단어만 사용embedding_dim = 100
tokenizer = Tokenizer(num_words=max_words)
tokenizer.fit_on_texts(texts)
sequences = tokenizer.texts_to_sequences(texts)

word_index = tokenizer.word_index #고유한 토큰 리스트
data = pad_sequences(sequences, maxlen=maxlen) #100개단어로 패딩labels = np.asarray(labels)

indices = np.arange(data.shape[0]) #데이터 순서 섞기np.random.shuffle(indices)
data = data[indices]
labels = labels[indices]

x_train = data[:training_samples]
y_train = labels[:training_samples]
x_val = data[training_samples: training_samples + validation_samples]
y_val = labels[training_samples: training_samples + validation_samples]

#모델링from keras.models import Sequential
from keras.layers import Embedding, Flatten, Dense

model = Sequential()
model.add(Embedding(max_words, embedding_dim, input_length=maxlen))
model.add(Flatten())
model.add(Dense(32, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
model.summary()

model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['acc'])
history = model.fit(x_train, y_train, epochs=10, batch_size=32, validation_data=(x_val, y_val))

#결과 시각화import matplotlib.pyplot as plt

acc = history.history['acc']
val_acc = history.history['val_acc']
loss = history.history['loss']
val_loss = history.history['val_loss']

epochs = range(1, len(acc) + 1)

plt.plot(epochs, acc, 'bo', label='Training acc')
plt.plot(epochs, val_acc, 'b', label='Validation acc')
plt.title('Training and validation accuracy')
plt.legend()

plt.figure()

plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'b', label='Validation loss')
plt.title('Training and validation loss')
plt.legend()

#테스트 데이터 토큰화하기test_dir = os.path.join(imdb_dir, "test")

labels =[]
texts =[]

for label_type in ['neg', 'pos']:    fnames = glob.glob(os.path.join(test_dir, label_type, "*.txt"))
    fnames.sort()
    for fname in fnames :        with open(fname, encoding='utf8') as f :            texts.append(f.read())
        labels.append(0 if label_type == 'neg' else 1)

sequences = tokenizer.texts_to_sequences(texts)
x_test = pad_sequences(sequences, maxlen=maxlen)
y_test = np.asarray(labels)

#테스트 세트 모델 평가model.load_weights('pre_trained_glove_model.h5')
model.evaluate(x_test, y_test)





댓글

가장 많이 본 글