1. 程式人生 > >tensorflow API

tensorflow API

1.conv

1.1 tf.nn.conv2d  ----------- kernel,stride      [ksize,ksize,input,output]    [1,stride,stride,1]

with tf.name_scope('conv1') as scope:
  kernel = tf.Variable(tf.truncated_normal([11, 11, 3, 64], dtype=tf.float32,
                                           stddev=1e-1), name='weights')
  conv = tf.nn.conv2d
(images, kernel, [1, 4, 4, 1], padding='SAME') biases = tf.Variable(tf.constant(0.0, shape=[64], dtype=tf.float32), trainable=True, name='biases') bias = tf.nn.bias_add(conv, biases)

1.2 slim.conv2d   --------outputsize,ksize ,stride=default=1

net=slim.conv2d(images, 64, 11
, stride=4,padding='SAME',scope='conv1') or

  net=slim.conv2d(images, 64, [11,11], stride=4,padding='SAME',scope='conv1')

2.pool

2.1 tf. nn.max_pool

pool1 = tf.nn.max_pool(lrn1,
                       ksize=[1, 3, 3, 1],
                       strides=[1, 2, 2, 1],
                       padding='VALID',
                       name='pool1')

2.2 slim.max_pool2d

pool2 = slim.max_pool2d(lrn2, 3, 2, padding='VALID', scope='pool2')

3 fully_connect

3.1 tf.nn.xw_plus_b

in_shape = pool5.get_shape()[1]*pool5.get_shape()[2]*pool5.get_shape()[3]
flattened = tf.reshape(pool5, [-1, in_shape])
with tf.variable_scope('fc6') as scope:
    weights = tf.get_variable('weights',shape=[in_shape,4096],trainable=True)
    biases = tf.get_variable('biases',shape=[4096],trainable=True)
    parameters += [weights, biases]
    act = tf.nn.xw_plus_b(flattened,weights,biases)
    relu = tf.nn.relu(act)

3.2  fully_connected

                net = slim.fully_connected(net, 4096, activation_fn=None,                         scope='Bottleneck', reuse=False)

4 relu

   4.1 tf.nn.relu

      relu = tf.nn.relu(net) 

   4.2

def _relu(x, leakiness=0.0):
    """Relu, with optional leaky support."""
    return tf.where(tf.less(x, 0.0), leakiness * x, x, name='leaky_relu')

5.  tf.concat

def block35(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
    """Builds the 35x35 resnet block."""
    with tf.variable_scope(scope, 'Block35', [net], reuse=reuse):
        with tf.variable_scope('Branch_0'):
            tower_conv = slim.conv2d(net, 32, 1, scope='Conv2d_1x1')
        with tf.variable_scope('Branch_1'):
            tower_conv1_0 = slim.conv2d(net, 32, 1, scope='Conv2d_0a_1x1')
            tower_conv1_1 = slim.conv2d(tower_conv1_0, 32, 3, scope='Conv2d_0b_3x3')
        with tf.variable_scope('Branch_2'):
            tower_conv2_0 = slim.conv2d(net, 32, 1, scope='Conv2d_0a_1x1')
            tower_conv2_1 = slim.conv2d(tower_conv2_0, 32, 3, scope='Conv2d_0b_3x3')
            tower_conv2_2 = slim.conv2d(tower_conv2_1, 32, 3, scope='Conv2d_0c_3x3')
        mixed = tf.concat([tower_conv, tower_conv1_1, tower_conv2_2], 3)
        up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None,
                         activation_fn=None, scope='Conv2d_1x1')
        net += scale * up
        if activation_fn:
            net = activation_fn(net)
    return net

   6. init

     6.1   tf.truncated_normal的輸出如字面意思是截斷的,而截斷的標準是2倍的stddev。

kernel = tf.Variable(tf.truncated_normal([11, 11, 3, 64], dtype=tf.float32,
                                         stddev=1e-1), name='weights')

    6.2.1 random_normal_initializer

     with tf.variable_scope(name):
         n = filter_size * filter_size * out_filters
         kernel = tf.get_variable(
             'DW', [filter_size, filter_size, in_filters, out_filters],
             tf.float32, initializer=tf.random_normal_initializer(
                 stddev=np.sqrt(2.0 / n)))

  6.2.2

images = tf.Variable(tf.random_normal([50,
                                       image_size,
                                       image_size, 3],
                                      dtype=tf.float32,
                                      stddev=1e-1), name='input')

6.3.1

with slim.arg_scope([slim.conv2d, slim.fully_connected],
                    weights_initializer=slim.initializers.xavier_initializer(),
                    weights_regularizer=slim.l2_regularizer(weight_decay),
                    normalizer_fn=slim.batch_norm,
                    normalizer_params=batch_norm_params):

6.3.2

6.4 .1

     b = tf.get_variable('biases', [out_dim],
                         initializer=tf.constant_initializer())

6.4.2

biases = tf.Variable(tf.constant(0.0, shape=[64], dtype=tf.float32),
                     trainable=True, name='biases')

scope     tf.name_scope has no influence on tf.Variable

3