def cnn_with_one_filter(word_emb_size=100, output_size=7):
input_ids = Input(shape=(15,), dtype=tf.int32, name="input_ids")
input_word_emb = Embedding(len(tokenizer.word_index.items())+1, word_emb_size, name="class_emb")(input_ids)
conv1 = Conv1D(word_emb_size, kernel_size=2, activation=tf.nn.tanh)(input_word_emb)
max_pooling = MaxPooling1D(15 - 2 + 1)(conv1) # TensorShape([Dimension(None), Dimension(1), Dimension(100)])
flatten = Flatten()(max_pooling) # TensorShape([Dimension(None), Dimension(100)])
hidden1 = Dense(100, activation=tf.nn.relu)(flatten)
output = Dense(output_size, activation=tf.nn.softmax)(hidden1)
model = tf.keras.Model(inputs=[input_ids], outputs=[output])
return model
def cnn_with_multi_filter(word_emb_size=100, output_size=7):
input_ids = Input(shape=(15,), dtype=tf.int32, name="input_ids")
input_word_emb = Embedding(len(tokenizer.word_index.items())+1, word_emb_size, name="class_emb")(input_ids)
convs = []
filter_sizes = [2, 3, 4, 5]
for fsz in filter_sizes:
conv1 = Conv1D(word_emb_size, kernel_size=fsz, activation=tf.nn.tanh)(input_word_emb)
max_pooling = MaxPooling1D(15 - fsz + 1)(conv1) # TensorShape([Dimension(None), Dimension(1), Dimension(100)])
flatten = Flatten()(max_pooling) # TensorShape([Dimension(None), Dimension(100)])
convs.append(flatten)
merge = Lambda(lambda x: K.concatenate(x, axis=1))(convs)
hidden1 = Dense(100, activation=tf.nn.relu)(merge)
output = Dense(output_size, activation=tf.nn.softmax)(hidden1)
model = tf.keras.Model(inputs=[input_ids], outputs=[output])
return model
003 算法实战 | CNN
算法实战相关文章
最近热门
最常浏览
- 016 推荐系统 | 排序学习(LTR - Learning To Rank)
- 偏微分符号
- i.i.d(又称IID)
- 利普希茨连续条件(Lipschitz continuity)
- (error) MOVED 原因和解决方案
- TextCNN详解
- 找不到com.google.protobuf.GeneratedMessageV3的类文件
- Deployment failed: repository element was not specified in the POM inside distributionManagement
- cannot access com.google.protobuf.GeneratedMessageV3 解决方案
- CLUSTERDOWN Hash slot not served 问题原因和解决办法
×