-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathtest.py
69 lines (60 loc) · 2.21 KB
/
test.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
import numpy as np
import argparse
import json
import io
import tensorflow as tf
import keras
import nltk
nltk.download('punkt')
from nltk import word_tokenize
from keras.preprocessing.text import Tokenizer, tokenizer_from_json
from keras.preprocessing.sequence import pad_sequences
from model import create_model
from config import *
tokenizer_path = 'tokenizer.json'
tag_tokenizer_path = 'tag_tokenizer.json'
def parse_argument():
parser = argparse.ArgumentParser(description='Bidirectional LSTM POS tagger')
parser.add_argument('--sent', help='Enter your sentence')
return parser.parse_args()
def load_tokenizer(path):
with open(path) as f:
data = json.load(f)
tokenizer = tokenizer_from_json(data)
return tokenizer
def get_tags(sequences, tag_index):
sequence_tags = []
for sequence in sequences:
sequence_tag = []
for categorical in sequence:
sequence_tag.append(tag_index.get(np.argmax(categorical)))
sequence_tags.append(sequence_tag)
return sequence_tags
def predict():
args = parse_argument()
sentence = args.sent
tokenizer = load_tokenizer(tokenizer_path)
word_index = tokenizer.word_index
vocab_size = len(word_index) + 1
tag_tokenizer = load_tokenizer(tag_tokenizer_path)
tag_index = tag_tokenizer.word_index
tag_size = len(tag_index) + 1
tokens = word_tokenize(sentence)
print(f'tokens after being tokenized: {tokens}')
print(f'tokens: {tokens}')
encoded_sent = tokenizer.texts_to_sequences([tokens])[0]
print(f'encoded sentence: {encoded_sent}')
encoded_sent = pad_sequences([encoded_sent], maxlen=max_length, padding='post')
print(f'encoded sentence after being padded: {encoded_sent}')
model = create_model(vocab_size, max_length, embedding_dim, word_index, tag_index)
model.load_weights('models/POS_BiLSTM_CRF_WSJ_new.h5')
pred = model.predict(encoded_sent)
sequence_tags = get_tags(pred, {i: t for t, i in tag_index.items()})
print(sequence_tags[0][:len(tokens)])
res1 = sequence_tags[0][:len(tokens)]
res2 = []
for tok, tag in zip(tokens, res1):
res2.append((tok, tag))
print(res2)
if __name__ == "__main__":
predict()