def cnn_with_one_filter(word_emb_size=100, output_size=7):
    input_ids = Input(shape=(15,), dtype=tf.int32, name="input_ids")
    input_word_emb = Embedding(len(tokenizer.word_index.items())+1, word_emb_size, name="class_emb")(input_ids)
    conv1 = Conv1D(word_emb_size, kernel_size=2, activation=tf.nn.tanh)(input_word_emb)
    max_pooling = MaxPooling1D(15 - 2 + 1)(conv1)  # TensorShape([Dimension(None), Dimension(1), Dimension(100)])
    flatten = Flatten()(max_pooling)  # TensorShape([Dimension(None), Dimension(100)])
    hidden1 = Dense(100, activation=tf.nn.relu)(flatten)
    output = Dense(output_size, activation=tf.nn.softmax)(hidden1)
    model = tf.keras.Model(inputs=[input_ids], outputs=[output])
    return model


def cnn_with_multi_filter(word_emb_size=100, output_size=7):
    input_ids = Input(shape=(15,), dtype=tf.int32, name="input_ids")
    input_word_emb = Embedding(len(tokenizer.word_index.items())+1, word_emb_size, name="class_emb")(input_ids)
    convs = []
    filter_sizes = [2, 3, 4, 5]
    for fsz in filter_sizes:
        conv1 = Conv1D(word_emb_size, kernel_size=fsz, activation=tf.nn.tanh)(input_word_emb)
        max_pooling = MaxPooling1D(15 - fsz + 1)(conv1)  # TensorShape([Dimension(None), Dimension(1), Dimension(100)])
        flatten = Flatten()(max_pooling)  # TensorShape([Dimension(None), Dimension(100)])
        convs.append(flatten)
    merge = Lambda(lambda x: K.concatenate(x, axis=1))(convs)
    hidden1 = Dense(100, activation=tf.nn.relu)(merge)
    output = Dense(output_size, activation=tf.nn.softmax)(hidden1)
    model = tf.keras.Model(inputs=[input_ids], outputs=[output])
    return model