Keras: Using weights for NCE loss

懵懂的女人 提交于 2019-12-10 10:11:06

问题


So here is the model with the standard loss function.

target = Input(shape=(1, ), dtype='int32')
w_inputs = Input(shape=(1, ), dtype='int32')
w_emb = Embedding(V, dim, embeddings_initializer='glorot_uniform',name='word_emb')(w_inputs)
w_flat= Flatten()(w_emb)
    # context


w1=  Dense(input_dim=dim, units=V, activation='softmax') # because I want to use predicition on valid set)

w= w1(w_flat)
model = Model(inputs=[w_inputs], outputs=[w])

model.compile(loss='sparse_categorical_crossentropy', optimizer='sgd',metrics=['accuracy'])

It works fine. Given NCE loss isnt available in keras, I wrote up a custom loss.

def model_loss(layer,labels, inputs, num_sampled, num_classes, num_true):
    weights= K.transpose( layer.get_weights()[0])
    biases = layer.get_weights()[1]
    def loss(y_true, y_pred):

        if K.learning_phase() == 1:
            compute_loss = tf.nn.nce_loss(weights, biases, labels, inputs, num_sampled, num_classes, num_true,
            partition_strategy="div")
        else:
            logits = tf.matmul(K.squeeze(inputs,axis=0), K.transpose(weights))
            logits = tf.nn.bias_add(logits, biases)
            labels_one_hot = tf.one_hot(labels, num_classes)
            loss = tf.nn.sigmoid_cross_entropy_with_logits(
                labels=labels_one_hot[:][0][:],
                logits=logits)
            compute_loss = tf.reduce_sum(loss, axis=1)
        return compute_loss

    return loss

And changed the last line to:

model.compile(loss=model_loss(w1,target, w_emb, num_sampled, num_classes, num_true), optimizer='sgd',metrics=['accuracy']) 

This compiles by the way.

And on execution dies.

---------------------------------------------------------------------------
ValueError                                Traceback (most recent call last)
<ipython-input-68-d3b3ef93b81b> in <module>
      3                                  epochs=epochs, steps_per_epoch = seq_len,
      4 
----> 5                                  verbose=1, max_queue_size=15)

/opt/conda/lib/python3.6/site-packages/keras/legacy/interfaces.py in wrapper(*args, **kwargs)
     89                 warnings.warn('Update your `' + object_name + '` call to the ' +
     90                               'Keras 2 API: ' + signature, stacklevel=2)
---> 91             return func(*args, **kwargs)
     92         wrapper._original_function = func
     93         return wrapper

/opt/conda/lib/python3.6/site-packages/keras/engine/training.py in fit_generator(self, generator, steps_per_epoch, epochs, verbose, callbacks, validation_data, validation_steps, class_weight, max_queue_size, workers, use_multiprocessing, shuffle, initial_epoch)
   1416             use_multiprocessing=use_multiprocessing,
   1417             shuffle=shuffle,
-> 1418             initial_epoch=initial_epoch)
   1419 
   1420     @interfaces.legacy_generator_methods_support

/opt/conda/lib/python3.6/site-packages/keras/engine/training_generator.py in fit_generator(model, generator, steps_per_epoch, epochs, verbose, callbacks, validation_data, validation_steps, class_weight, max_queue_size, workers, use_multiprocessing, shuffle, initial_epoch)
     38 
     39     do_validation = bool(validation_data)
---> 40     model._make_train_function()
     41     if do_validation:
     42         model._make_test_function()

/opt/conda/lib/python3.6/site-packages/keras/engine/training.py in _make_train_function(self)
    507                     training_updates = self.optimizer.get_updates(
    508                         params=self._collected_trainable_weights,
--> 509                         loss=self.total_loss)
    510                 updates = (self.updates +
    511                            training_updates +

/opt/conda/lib/python3.6/site-packages/keras/legacy/interfaces.py in wrapper(*args, **kwargs)
     89                 warnings.warn('Update your `' + object_name + '` call to the ' +
     90                               'Keras 2 API: ' + signature, stacklevel=2)
---> 91             return func(*args, **kwargs)
     92         wrapper._original_function = func
     93         return wrapper

/opt/conda/lib/python3.6/site-packages/keras/optimizers.py in get_updates(self, loss, params)
    182     @interfaces.legacy_get_updates_support
    183     def get_updates(self, loss, params):
--> 184         grads = self.get_gradients(loss, params)
    185         self.updates = [K.update_add(self.iterations, 1)]
    186 

/opt/conda/lib/python3.6/site-packages/keras/optimizers.py in get_gradients(self, loss, params)
     89         grads = K.gradients(loss, params)
     90         if None in grads:
---> 91             raise ValueError('An operation has `None` for gradient. '
     92                              'Please make sure that all of your ops have a '
     93                              'gradient defined (i.e. are differentiable). '

ValueError: An operation has `None` for gradient. Please make sure that all of your ops have a gradient defined (i.e. are differentiable). Common ops without gradient: K.argmax, K.round, K.eval.

The issue is of course, that the weights aren't quite getting updated in the layer, hence the non gradient. How could i do that without making a custom layer? I tried that approach but I give up on measuring things like val_acc using a layer.


回答1:


It seems like you cannot do it in Keras without Layer's API. You can try this solution using custom layer: Keras NCE Implementation



来源:https://stackoverflow.com/questions/56859126/keras-using-weights-for-nce-loss

易学教程内所有资源均来自网络或用户发布的内容,如有违反法律规定的内容欢迎反馈
该文章没有解决你所遇到的问题?点击提问,说说你的问题,让更多的人一起探讨吧!