tf.nn.sampled

mac2024-05-21  27

tensorflow中具体的函数说明如下:

tf.nn.sampled_softmax_loss(weights, # Shape (num_classes, dim) - floatXX biases, # Shape (num_classes) - floatXX labels, # Shape (batch_size, num_true) - int64 inputs, # Shape (batch_size, dim) - floatXX num_sampled, # - int num_classes, # - int num_true=1, sampled_values=None, remove_accidental_hits=True, partition_strategy="mod", name="sampled_softmax_loss")

使用样例

import tensorflow as tf # Network Parameters n_hidden_1 = 256 # 1st layer number of features n_input = 784 # MNIST data input (img shape: 28*28) n_classes = 10 # MNIST total classes (0-9 digits) # Dependent & Independent Variable Placeholders x = tf.placeholder("float", [None, n_input]) y = tf.placeholder("float", [None, n_classes]) # # Weights and Biases weights = { 'h1': tf.Variable(tf.random_normal([n_input, n_hidden_1])), 'out': tf.Variable(tf.random_normal([n_hidden_1, n_classes])) } biases = { 'b1': tf.Variable(tf.random_normal([n_hidden_1])), 'out': tf.Variable(tf.random_normal([n_classes])) } # Super simple model builder def tiny_perceptron(x, weights, biases): layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1']) out_layer = tf.nn.relu(layer_1) # out_layer = tf.matmul(layer_1, weights['out']) + biases['out'] return out_layer # Create the model pred = tiny_perceptron(x, weights, biases) # Set up loss function inputs and inspect their shapes w = tf.transpose(weights['out']) b = biases['out'] labels = tf.reshape(tf.argmax(y, 1), [-1,1]) inputs = pred num_sampled = 3 num_true = 1 num_classes = n_classes print('Shapes\n------\nw:\t%s\nb:\t%s\nlabels:\t%s\ninputs:\t%s' % (w.shape, b.shape, labels.shape, inputs.shape)) # Shapes # ------ # w: (10, 256) # Requires (num_classes, dim) - CORRECT # b: (10,) # Requires (num_classes) - CORRECT # labels: (?, 1) # Requires (batch_size, num_true) - CORRECT # inputs: (?, 256) # Requires (batch_size, dim) - CORRECT loss_function = tf.reduce_mean(tf.nn.sampled_softmax_loss( weights=w, biases=b, labels=labels, inputs=inputs, num_sampled=num_sampled, num_true=num_true, num_classes=num_classes) )

需要提到的是,这里的labels如果是one-hot类型编码,需要labels=tf.reshape(tf.argmax(labels_one_hot, 1), [-1,1])

参考地址:https://stackoverflow.com/questions/43810195/tensorflow-sampled-softmax-loss-correct-usage

最新回复(0)