1. 程式人生 > >5cifar100資料集的讀取-5.1/5.2/5.3TensorFlow讀取Cifar100資料集(上/中/下)

5cifar100資料集的讀取-5.1/5.2/5.3TensorFlow讀取Cifar100資料集(上/中/下)

這裡寫圖片描述

這裡寫圖片描述

兩個檔案:cifar_input.py 和 convnets_test.py

只需修改這兩個值,完成cifar10和cifar100之間切換
這裡寫圖片描述

cifar_input.py:

# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and
# limitations under the License. # ============================================================================== """Routine for decoding the CIFAR-10 binary file format.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os from
six.moves import xrange # pylint: disable=redefined-builtin import tensorflow as tf import sys from six.moves import urllib import tarfile # Process images of this size. Note that this differs from the original CIFAR # image size of 32 x 32. If one alters this number, then the entire model # architecture will change and any model would need to be retrained. # IMAGE_SIZE = 24 # Global constants describing the CIFAR-10 data set. #用於描述CiFar資料集的全域性常量 # NUM_CLASSES = 10 IMAGE_SIZE = 32 IMAGE_DEPTH = 3 NUM_CLASSES_CIFAR10 = 10 NUM_CLASSES_CIFAR20 = 20 NUM_CLASSES_CIFAR100 = 100 NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN = 50000 NUM_EXAMPLES_PER_EPOCH_FOR_EVAL = 10000 print('呼叫我啦...cifar_input...') #從網址下載資料集存放到data_dir指定的目錄下 CIFAR10_DATA_URL = 'http://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz' CIFAR100_DATA_URL = 'http://www.cs.toronto.edu/~kriz/cifar-100-binary.tar.gz' #從網址下載資料集存放到data_dir指定的目錄中 def maybe_download_and_extract(data_dir,data_url=CIFAR10_DATA_URL): """下載並解壓縮資料集 from Alex's website.""" dest_directory = data_dir #'../CIFAR10_dataset' DATA_URL = data_url if not os.path.exists(dest_directory): os.makedirs(dest_directory) filename = DATA_URL.split('/')[-1] #'cifar-10-binary.tar.gz' filepath = os.path.join(dest_directory, filename)#'../CIFAR10_dataset\\cifar-10-binary.tar.gz' if not os.path.exists(filepath): def _progress(count, block_size, total_size): sys.stdout.write('\r>> Downloading %s %.1f%%' % (filename, float(count * block_size) / float(total_size) * 100.0)) sys.stdout.flush() filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress) print() statinfo = os.stat(filepath) print('Successfully downloaded', filename, statinfo.st_size, 'bytes.') if data_url== CIFAR10_DATA_URL: extracted_dir_path = os.path.join(dest_directory,'cifar-10-batches-bin') # '../CIFAR10_dataset\\cifar-10-batches-bin' else : extracted_dir_path = os.path.join(dest_directory, 'cifar-100-binary') # '../CIFAR10_dataset\\cifar-10-batches-bin' if not os.path.exists(extracted_dir_path): tarfile.open(filepath, 'r:gz').extractall(dest_directory) def read_cifar10(filename_queue,coarse_or_fine=None): """Reads and parses examples from CIFAR10 data files. Recommendation: if you want N-way read parallelism, call this function N times. This will give you N independent Readers reading different files & positions within those files, which will give better mixing of examples. Args: filename_queue: A queue of strings with the filenames to read from. Returns: An object representing a single example, with the following fields: height: number of rows in the result (32) width: number of columns in the result (32) depth: number of color channels in the result (3) key: a scalar string Tensor describing the filename & record number for this example. label: an int32 Tensor with the label in the range 0..9. uint8image: a [height, width, depth] uint8 Tensor with the image data """ class CIFAR10Record(object): pass result = CIFAR10Record() # Dimensions of the images in the CIFAR-10 dataset. # See http://www.cs.toronto.edu/~kriz/cifar.html for a description of the # input format. #cifar10 binary中的樣本記錄:3072=32x32x3 #<1 x label><3072 x pixel> #... #<1 x label><3072 x pixel> # 型別標籤位元組數 label_bytes = 1 # 2 for CIFAR-100 result.height = 32 result.width = 32 result.depth = 3 #影象位元組數 image_bytes = result.height * result.width * result.depth # Every record consists of a label followed by the image, with a # fixed number of bytes for each. # 每一條樣本記錄由 標籤 + 影象 組成,其位元組數是固定的。 record_bytes = label_bytes + image_bytes # Read a record, getting filenames from the filename_queue. No # header or footer in the CIFAR-10 format, so we leave header_bytes # and footer_bytes at their default of 0. # 建立一個固定長度記錄讀取器,讀取一個樣本記錄的所有位元組(label_bytes + image_bytes) # 由於cifar10中的記錄沒有header_bytes 和 footer_bytes,所以設定為0 reader = tf.FixedLengthRecordReader(record_bytes=record_bytes,header_bytes=0,footer_bytes=0) # 呼叫讀取器物件的read 方法返回一條記錄 result.key, value = reader.read(filename_queue) # Convert from a string to a vector of uint8 that is record_bytes long. #將一個位元組組成的string型別的記錄轉換為長度為record_bytes,型別為unit8的一個數字向量 record_bytes = tf.decode_raw(value, tf.uint8) # The first bytes represent the label, which we convert from uint8->int32. # 將一個位元組代表了標籤,我們把它從unit8轉換為int32. result.label = tf.cast( tf.strided_slice(record_bytes, [0], [label_bytes]), tf.int32) # The remaining bytes after the label represent the image, which we reshape # from [depth * height * width] to [depth, height, width]. # 剩餘的所有位元組都是影象資料,把他從unit8轉換為int32 # 轉為三維張量[depth,height,width] depth_major = tf.reshape( tf.strided_slice(record_bytes, [label_bytes], [label_bytes + image_bytes]), [result.depth, result.height, result.width]) # Convert from [depth, height, width] to [height, width, depth]. # 把影象的空間位置和深度位置順序由[depth, height, width] 轉換成[height, width, depth] result.uint8image = tf.transpose(depth_major, [1, 2, 0]) return result def read_cifar100(filename_queue,coarse_or_fine='fine'): """Reads and parses examples from CIFAR100 data files. Recommendation: if you want N-way read parallelism, call this function N times. This will give you N independent Readers reading different files & positions within those files, which will give better mixing of examples. Args: filename_queue: A queue of strings with the filenames to read from. Returns: An object representing a single example, with the following fields: height: number of rows in the result (32) width: number of columns in the result (32) depth: number of color channels in the result (3) key: a scalar string Tensor describing the filename & record number for this example. label: an int32 Tensor with the label in the range 0..9. uint8image: a [height, width, depth] uint8 Tensor with the image data """ class CIFAR100Record(object): pass result = CIFAR100Record() result.height = 32 result.width = 32 result.depth = 3 # cifar100中每個樣本記錄都有兩個類別標籤,每一個位元組是粗略分類標籤, # 第二個位元組是精細分類標籤:<1 x coarse label><1 x fine label><3072 x pixel> coarse_label_bytes = 1 fine_label_bytes = 1 #影象位元組數 image_bytes = result.height * result.width * result.depth # 每一條樣本記錄由 標籤 + 影象 組成,其位元組數是固定的。 record_bytes = coarse_label_bytes + fine_label_bytes + image_bytes # 建立一個固定長度記錄讀取器,讀取一個樣本記錄的所有位元組(label_bytes + image_bytes) # 由於cifar100中的記錄沒有header_bytes 和 footer_bytes,所以設定為0 reader = tf.FixedLengthRecordReader(record_bytes=record_bytes,header_bytes=0,footer_bytes=0) # 呼叫讀取器物件的read 方法返回一條記錄 result.key, value = reader.read(filename_queue) #將一系列位元組組成的string型別的記錄轉換為長度為record_bytes,型別為unit8的一個數字向量 record_bytes = tf.decode_raw(value, tf.uint8) # 將一個位元組代表了粗分類標籤,我們把它從unit8轉換為int32. coarse_label = tf.cast(tf.strided_slice(record_bytes, [0], [coarse_label_bytes]), tf.int32) # 將二個位元組代表了細分類標籤,我們把它從unit8轉換為int32. fine_label = tf.cast(tf.strided_slice(record_bytes, [coarse_label_bytes], [coarse_label_bytes + fine_label_bytes]), tf.int32) if coarse_or_fine == 'fine': result.label = fine_label #100個精細分類標籤 else: result.label = coarse_label #100個粗略分類標籤 # 剩餘的所有位元組都是影象資料,把他從一維張量[depth * height * width] # 轉為三維張量[depth,height,width] depth_major = tf.reshape( tf.strided_slice(record_bytes, [coarse_label_bytes + fine_label_bytes], [coarse_label_bytes + fine_label_bytes + image_bytes]), [result.depth, result.height, result.width]) # 把影象的空間位置和深度位置順序由[depth, height, width] 轉換成[height, width, depth] result.uint8image = tf.transpose(depth_major, [1, 2, 0]) return result def _generate_image_and_label_batch(image, label, min_queue_examples, batch_size, shuffle): """Construct a queued batch of images and labels. Args: image: 3-D Tensor of [height, width, 3] of type.float32. label: 1-D Tensor of type.int32 min_queue_examples: int32, minimum number of samples to retain in the queue that provides of batches of examples. batch_size: Number of images per batch. shuffle: boolean indicating whether to use a shuffling queue. Returns: images: Images. 4D tensor of [batch_size, height, width, 3] size. labels: Labels. 1D tensor of [batch_size] size. """ # Create a queue that shuffles the examples, and then # read 'batch_size' images + labels from the example queue. num_preprocess_threads = 16 if shuffle: images, label_batch = tf.train.shuffle_batch( [image, label], batch_size=batch_size, num_threads=num_preprocess_threads, capacity=min_queue_examples + 3 * batch_size, min_after_dequeue=min_queue_examples) else: images, label_batch = tf.train.batch( [image, label], batch_size=batch_size, num_threads=num_preprocess_threads, capacity=min_queue_examples + 3 * batch_size) # Display the training images in the visualizer. tf.summary.image('images', images) return images, tf.reshape(label_batch, [batch_size]) def distorted_inputs(cifar10or20or100,data_dir, batch_size): """使用Reader ops 構造distorted input 用於CIFAR的訓練 輸入引數: cifar10or20or100:指定要讀取的資料集是cifar10 還是細分類的cifar100 ,或者粗分類的cifar100 data_dir: 指向CIFAR-10 或者 CIFAR-100 資料集的目錄 batch_size: 每個批次的影象數量 Returns: images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size. labels: Labels. 1D tensor of [batch_size] size. """ #判斷是讀取cifar10 還是 cifar100(cifar100可分為20類或100類) if cifar10or20or100 == 10: filenames = [os.path.join(data_dir,'data_batch_%d.bin' % i) for i in xrange(1,6)] read_cifar = read_cifar10 coarse_or_fine = None if cifar10or20or100 == 20: filenames = [os.path.join(data_dir,'train.bin')] read_cifar = read_cifar100 coarse_or_fine = 'coarse' if cifar10or20or100 == 100: filenames = [os.path.join(data_dir, 'train.bin')] read_cifar = read_cifar100 coarse_or_fine = 'fine' #檢查檔案是否存在 for f in filenames: if not tf.gfile.Exists(f): raise ValueError('Failed to find file: ' + f) # 根據檔名列表建立一個檔名佇列 filename_queue = tf.train.string_input_producer(filenames) # 從檔名佇列的檔案中讀取樣本 read_input = read_cifar(filename_queue) # 將無符號8點陣圖像資料轉換成float32位 casted_image = tf.cast(read_input.uint8image, tf.float32) # 要生成的目標影象的大小,在這裡與原影象的尺寸保持一致 height = IMAGE_SIZE width = IMAGE_SIZE #為影象新增padding = 4,影象尺寸變為[32+4,32+4],為後面的隨機裁切留出位置 padded_image = tf.image.resize_image_with_crop_or_pad(casted_image,width+4,height+4) #下面的這些操作為原始影象添加了很多不同的distortions,擴增了原始訓練資料集 # 在[36,36]大小的影象中隨機裁切出[height,width]即[32,,32]的影象區域 distorted_image = tf.random_crop(padded_image, [height, width, 3]) # 將影象進行隨機的水平翻轉(左邊和右邊的畫素對調) distorted_image = tf.image.random_flip_left_right(distorted_image) # 下面這兩個操作不滿足交換律,即 亮度調整+對比度調整 和 對比度調整+亮度調整 # 產生的結果是不一樣的,你可以採取隨機的順序來執行這兩個操作 distorted_image = tf.image.random_brightness(distorted_image,max_delta=63) distorted_image = tf.image.random_contrast(distorted_image,lower=0.2, upper=1.8) # 資料集標準化操作:減去均值+方差歸一化(divide by the variance of the pixels) float_image = tf.image.per_image_standardization(distorted_image) # 設定張量的形狀 float_image.set_shape([height, width, 3]) read_input.label.set_shape([1]) # 確保: the random shuffling has good mixing properties. min_fraction_of_examples_in_queue = 0.4 min_queue_examples = int(NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN * min_fraction_of_examples_in_queue) print ('Filling queue with %d CIFAR images before starting to train. ' 'This will take a few minutes.' % min_queue_examples) # Generate a batch of images and labels by building up a queue of examples. return _generate_image_and_label_batch(float_image, read_input.label, min_queue_examples, batch_size, shuffle=True) def inputs(cifar10or20or100, eval_data, data_dir, batch_size): """使用Reader ops 讀取資料集,用於CIFAR的評估 輸入引數: cifar10or20or100:指定要讀取的資料集是cifar10 還是細分類的cifar100 ,或者粗分類的cifar100 eval_data: True or False ,指示要讀取的是訓練集還是測試集 data_dir: 指向CIFAR-10 或者 CIFAR-100 資料集的目錄 batch_size: 每個批次的影象數量 返回: images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size. labels: Labels. 1D tensor of [batch_size] size. """ #判斷是讀取cifar10 還是 cifar100(cifar100可分為20類或100類) if cifar10or20or100 == 10: read_cifar = read_cifar10 coarse_or_fine = None if not eval_data: filenames = [os.path.join(data_dir,'data_batch_%d.bin' % i) for i in xrange(1,6)] num_examples_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN else: filenames = [os.path.join(data_dir,'test_batch.bin')] num_examples_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_EVAL if cifar10or20or100 == 20 or cifar10or20or100 == 100: read_cifar = read_cifar100 if not eval_data: filenames = [os.path.join(data_dir,'train.bin')] num_examples_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN else: filenames = [os.path.join(data_dir,'test.bin')] num_examples_per_epoch = NUM_EXAMPLES_PER_EPOCH_FOR_EVAL if cifar10or20or100 == 100: coarse_or_fine = 'fine' if cifar10or20or100 == 20: coarse_or_fine = 'coarse' for f in filenames: if not tf.gfile.Exists(f): raise ValueError('Failed to find file: ' + f) # 根據檔名列表建立一個檔名佇列 filename_queue = tf.train.string_input_producer(filenames) # 從檔名佇列的檔案中讀取樣本 read_input = read_cifar(filename_queue, coarse_or_fine = coarse_or_fine) # 將無符號8點陣圖像資料轉換成float32位 casted_image = tf.cast(read_input.uint8image, tf.float32) # 要生成的目標影象的大小,在這裡與原影象的尺寸保持一致 height = IMAGE_SIZE width = IMAGE_SIZE # 用於評估過程的影象資料預處理 # Crop the central [height, width] of the image.(其實這裡並未發生裁剪) resized_image = tf.image.resize_image_with_crop_or_pad(casted_image,width,height) #資料集標準化操作:減去均值 + 方差歸一化 float_image = tf.image.per_image_standardization(resized_image) # 設定資料集中張量的形狀 float_image.set_shape([height, width, 3]) read_input.label.set_shape([1]) # Ensure that the random shuffling has good mixing properties. min_fraction_of_examples_in_queue = 0.4 min_queue_examples = int(num_examples_per_epoch * min_fraction_of_examples_in_queue) # Generate a batch of images and labels by building up a queue of examples. # 通過構造樣本佇列(a queue of examples)產生一個批次的影象和標籤 return _generate_image_and_label_batch(float_image, read_input.label, min_queue_examples, batch_size, shuffle=False)

convnets_test.py:

#-*- coding:utf-8 -*-
#實現簡單卷積神經網路對MNIST資料集進行分類:conv2d + activation + pool + fc
import csv
import tensorflow as tf
import os
from tensorflow.examples.tutorials.mnist import input_data
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import sys
from six.moves import urllib
import tarfile
import cifar_input
import numpy as np

# 設定演算法超引數
learning_rate_init = 0.001
training_epochs = 2
batch_size = 100
display_step = 10
conv1_kernel_num = 64
conv2_kernel_num = 64
fc1_units_num = 512
fc2_units_num = 512

activation_func = tf.nn.relu
activation_name = 'relu'
l2loss_ratio = 0.05

# Network Parameters
n_input = 784 # MNIST data input (img shape: 28*28)

#資料集中輸入影象的引數
# dataset_dir_cifar10 = '../CIFAR10_dataset/cifar-10-batches-bin'
# dataset_dir_cifar100 = '../CIFAR100_dataset/cifar-100-binary'

dataset_dir_cifar10 = '../CIFAR10_dataset'
dataset_dir_cifar100 = '../CIFAR100_dataset'

num_examples_per_epoch_for_train = cifar_input.NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN#50000
num_examples_per_epoch_for_eval = cifar_input.NUM_EXAMPLES_PER_EPOCH_FOR_EVAL#10000
image_size = cifar_input.IMAGE_SIZE
image_channel = cifar_input.IMAGE_DEPTH

cifar10_data_url = cifar_input.CIFAR10_DATA_URL
cifar100_data_url = cifar_input.CIFAR100_DATA_URL

cifar_data_url = cifar100_data_url

#通過修改cifar10or20or100,就可以測試cifar10,cifar20,cifar100
#或者使用假資料跑模型(讓cifar10or20or100 = -1)
cifar10or20or100 = 100
if cifar10or20or100 == 10:
    n_classes = cifar_input.NUM_CLASSES_CIFAR10
    dataset_dir = dataset_dir_cifar10

if cifar10or20or100 == 20:
    n_classes = cifar_input.NUM_CLASSES_CIFAR20
    dataset_dir = dataset_dir_cifar100

if cifar10or20or100 == 100:
    n_classes = cifar_input.NUM_CLASSES_CIFAR100
    dataset_dir = dataset_dir_cifar100

#從網址下載資料集存放到data_dir指定的目錄中
# def maybe_download_and_extract(data_dir):
#     """下載並解壓縮資料集 from Alex's website."""
#     dest_directory = data_dir
#     DATA_URL = 'http://www.cs.toronto.edu/~kriz/cifar-10-binary.tar.gz'
#     if not os.path.exists(dest_directory):
#         os.makedirs(dest_directory)
#     filename = DATA_URL.split('/')[-1] #'cifar-10-binary.tar.gz'
#     filepath = os.path.join(dest_directory, filename)#'../CIFAR10_dataset\\cifar-10-binary.tar.gz'
#     if not os.path.exists(filepath):
#         def _progress(count, block_size, total_size):
#             sys.stdout.write('\r>> Downloading %s %.1f%%' % (filename,
#                 float(count * block_size) / float(total_size) * 100.0))
#             sys.stdout.flush()
#         filepath, _ = urllib.request.urlretrieve(DATA_URL, filepath, _progress)
#         print()
#         statinfo = os.stat(filepath)
#         print('Successfully downloaded', filename, statinfo.st_size, 'bytes.')
#
#     extracted_dir_path = os.path.join(dest_directory, 'cifar-10-batches-bin')#'../CIFAR10_dataset\\cifar-10-batches-bin'
#     if not os.path.exists(extracted_dir_path):
#         tarfile.open(filepath, 'r:gz').extractall(dest_directory)

def get_distorted_train_batch(cifar10or20or100,data_dir,batch_size):
    """Construct distorted input for CIFAR training using the Reader ops.

      Returns:
        images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
        labels: Labels. 1D tensor of [batch_size] size.

      Raises:
        ValueError: If no data_dir
      """
    if not data_dir:
        raise ValueError('Please supply a data_dir')
    if cifar10or20or100==10:
        data_dir = os.path.join(data_dir, 'cifar-10-batches-bin')
    else :
        data_dir = os.path.join(data_dir, 'cifar-100-binary')
    images, labels = cifar_input.distorted_inputs(cifar10or20or100=cifar10or20or100,data_dir=data_dir,batch_size=batch_size)
    return images,labels

def get_undistorted_eval_batch(cifar10or20or100,data_dir,eval_data, batch_size):
    """Construct input for CIFAR evaluation using the Reader ops.
    Args:
        eval_data: bool, indicating if one should use the train or eval data set.
    Returns:
        images: Images. 4D tensor of [batch_size, IMAGE_SIZE, IMAGE_SIZE, 3] size.
        labels: Labels. 1D tensor of [batch_size] size.
    Raises:
        ValueError: If no data_dir
    """
    if not data_dir:
        raise ValueError('Please supply a data_dir')
    if cifar10or20or100==10:
        data_dir = os.path.join(data_dir, 'cifar-10-batches-bin')
    else :
        data_dir = os.path.join(data_dir, 'cifar-100-binary')
    images, labels = cifar_input.inputs(cifar10or20or100=cifar10or20or100,eval_data=eval_data,data_dir=data_dir,batch_size=batch_size)
    return images,labels

#根據指定的維數返回初始化好的指定名稱的權重 Variable
def WeightsVariable(shape, name_str, stddev=0.1):
    # initial = tf.random_normal(shape=shape, stddev=stddev, dtype=tf.float32)
    initial = tf.truncated_normal(shape=shape, stddev=stddev, dtype=tf.float32)
    return tf.Variable(initial, dtype=tf.float32, name=name_str)

#根據指定的維數返回初始化好的指定名稱的偏置 Variable
def BiasesVariable(shape, name_str, init_value=0.00001):
    initial = tf.constant(init_value, shape=shape)
    return tf.Variable(initial, dtype=tf.float32, name=name_str)

# 二維卷積層activation(conv2d+bias)的封裝
def Conv2d(x, W, b, stride=1, padding='SAME',activation=tf.nn.relu,act_name='relu'):
    with tf.name_scope('conv2d_bias'):
        y = tf.nn.conv2d(x, W, strides=[1, stride, stride, 1], padding=padding)
        y = tf.nn.bias_add(y, b)
    with tf.name_scope(act_name):
        y = activation(y)
    return y

# 二維池化層pool的封裝
def Pool2d(x, pool= tf.nn.max_pool, k=2, stride=2,padding='SAME'):
    return pool(x, ksize=[1, k, k, 1], strides=[1, stride, stride, 1], padding=padding)

# 全連線層activate(wx+b)的封裝
def FullyConnected(x, W, b, activation=tf.nn.relu, act_name='relu'):
    with tf.name_scope('Wx_b'):
        y = tf.matmul(x, W)
        y = tf.add(y, b)
    with tf.name_scope(act_name):
        y = activation(y)
    return y

#為每一層的啟用輸出新增彙總節點
def AddActivationSummary(x):
    tf.summary.histogram('/activations',x)
    tf.summary.scalar('/sparsity',tf.nn.zero_fraction(x))

#為所有損失節點新增(滑動平均)標量彙總操作
def AddLossesSummary(losses):
    #計算所有(individual losses)和(total loss)的滑動平均
    loss_averages = tf.train.ExponentialMovingAverage(0.9,name='avg')
    loss_averages_op = loss_averages.apply(losses)

    #為所有individual losses 和 total loss 繫結標量彙總節點
    #為所有平滑處理過的individual losses 和 total loss也繫結標量彙總節點
    for loss in losses:
        #沒有平滑過的loss名字後面加上‘(raw)’,平滑以後的loss使用其原來的名稱
        tf.summary.scalar(loss.op.name + '(raw)',loss)
        tf.summary.scalar(loss.op.name + '(avg)',loss_averages.average(loss))
    return loss_averages_op

#修改了4處啟用函式:Conv2d_1、Conv2d_2、FC1_nonlinear、FC2_nonlinear
def Inference(image_holder):
    # 第一個卷積層activate(conv2d + biase)
    with tf.name_scope('Conv2d_1'):
        # conv1_kernel_num = 64
        weights = WeightsVariable(shape=[5, 5, image_channel, conv1_kernel_num],
                                  name_str='weights',stddev=5e-2)
        biases = BiasesVariable(shape=[conv1_kernel_num], name_str='biases',init_value=0.0)
        conv1_out = Conv2d(image_holder, weights, biases, stride=1, padding='SAME',activation=activation_func,act_name=activation_name)
        AddActivationSummary(conv1_out)

    # 第一個池化層(pool 2d)
    with tf.name_scope('Pool2d_1'):
        pool1_out = Pool2d(conv1_out, pool=tf.nn.max_pool, k=3, stride=2,padding='SAME')

    # 第二個卷積層activate(conv2d + biase)
    with tf.name_scope('Conv2d_2'):
        # conv2_kernels_num = 64
        weights = WeightsVariable(shape=[5, 5, conv1_kernel_num, conv2_kernel_num], name_str='weights', stddev=5e-2)
        biases = BiasesVariable(shape=[conv2_kernel_num], name_str='biases', init_value=0.0)
        conv2_out = Conv2d(pool1_out, weights, biases, stride=1, padding='SAME',activation=activation_func,act_name=activation_name)
        AddActivationSummary(conv2_out)

    # 第二個池化層(pool 2d)
    with tf.name_scope('Pool2d_2'):
        pool2_out = Pool2d(conv2_out, pool=tf.nn.max_pool, k=3, stride=2, padding='SAME')

    #將二維特徵圖變換為一維特徵向量
    with tf.name_scope('FeatsReshape'):
        features = tf.reshape(pool2_out, [batch_size,-1])
        feats_dim = features.get_shape()[1].value

    # 第一個全連線層(fully connected layer)
    with tf.name_scope('FC1_nonlinear'):
        weights = WeightsVariable(shape=[feats_dim, fc1_units_num],name_str='weights',stddev=4e-2)
        biases = BiasesVariable(shape=[fc1_units_num], name_str='biases',init_value=0.1)
        fc1_out = FullyConnected(features, weights, biases, activation=activation_func,act_name=activation_name)
        AddActivationSummary(fc1_out)
        with tf.name_scope('L2_loss'):
            weight_loss = tf.multiply(tf.nn.l2_loss(weights),l2loss_ratio,name="fc1_weight_loss")
            tf.add_to_collection('losses',weight_loss)

    # 第二個全連線層(fully connected layer)
    with tf.name_scope('FC2_nonlinear'):
        weights = WeightsVariable(shape=[fc1_units_num, fc2_units_num],name_str='weights',stddev=4e-2)
        biases = BiasesVariable(shape=[fc2_units_num], name_str='biases',init_value=0.1)
        fc2_out = FullyConnected(fc1_out, weights, biases, activation=activation_func,act_name=activation_name)
        AddActivationSummary(fc2_out)
        with tf.name_scope('L2_loss'):
            weight_loss = tf.multiply(tf.nn.l2_loss(weights), l2loss_ratio, name="fc2_weight_loss")
            tf.add_to_collection('losses', weight_loss)

    # 第三個全連線層(fully connected layer)
    with tf.name_scope('FC3_linear'):
        fc3_units_num = n_classes
        weights = WeightsVariable(shape=[fc2_units_num, fc3_units_num],name_str='weights',stddev=1.0/fc2_units_num)
        biases = BiasesVariable(shape=[fc3_units_num], name_str='biases',init_value=0.0)
        logits = FullyConnected(fc2_out, weights, biases,activation=tf.identity, act_name='linear')
        AddActivationSummary(logits)
    return logits

def TrainModel():
    #呼叫上面寫的函式構造計算圖
    with tf.Graph().as_default():

        # 計算圖輸入
        with tf.name_scope('Inputs'):
            image_holder = tf.placeholder(tf.float32, [batch_size, image_size,image_size,image_channel], name='images')
            labels_holder = tf.placeholder(tf.int32, [batch_size], name='labels')

        # 計算圖前向推斷過程
        with tf.name_scope('Inference'):
             logits = Inference(image_holder)

        # 定義損失層(loss layer)
        with tf.name_scope('Loss'):
            cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels_holder,logits=logits)
            cross_entropy_loss = tf.reduce_mean(cross_entropy,name='xentropy_loss')
            tf.add_to_collection('losses',cross_entropy_loss)
            #總體損失(total loss)= 交叉熵損失 + 所有權重的L2損失
            total_loss = tf.add_n(tf.get_collection('losses'),name='total_loss')
            average_losses = AddLossesSummary(tf.get_collection('losses') + [total_loss])

        # 定義優化訓練層(train layer)
        with tf.name_scope('Train'):
            learning_rate = tf.placeholder(tf.float32)
            global_step = tf.Variable(0, name='global_step', trainable=False, dtype=tf.int64)
            optimizer = tf.train.RMSPropOptimizer(learning_rate=learning_rate)
            # optimizer = tf.train.MomentumOptimizer(learning_rate=learning_rate,momentum=0.9)
            # optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate)
            # optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
            # optimizer = tf.train.AdagradOptimizer(learning_rate=learning_rate)
            # optimizer = tf.train.FtrlOptimizer(learning_rate=learning_rate)

            train_op = optimizer.minimize(total_loss,global_step=global_step)

        # 定義模型評估層(evaluate layer)
        with tf.name_scope('Evaluate'):
            top_K_op = tf.nn.in_top_k(predictions=logits,targets=labels_holder,k=1)

        #定義獲取訓練樣本批次的計算節點
        with tf.name_scope('GetTrainBatch'):
            image_train,labels_train = get_distorted_train_batch(cifar10or20or100=cifar10or20or100,data_dir=dataset_dir,batch_size=batch_size)

        # 定義獲取測試樣本批次的計算節點
        with tf.name_scope('GetTestBatch'):
            image_test, labels_test = get_undistorted_eval_batch(cifar10or20or100=cifar10or20or100,data_dir=dataset_dir,eval_data=True, batch_size=batch_size)

        merged_summaries = tf.summary.merge_all()

        # 新增所有變數的初始化節點
        init_op = tf.global_variables_initializer()

        print('把計算圖寫入事件檔案,在TensorBoard裡面檢視')
        summary_writer = tf.summary.FileWriter(logdir='logs')
        summary_writer.add_graph(graph=tf.get_default_graph())
        summary_writer.flush()

        # 將評估結果儲存到檔案
        results_list = list()

        # 寫入引數配置
        results_list.append(['learning_rate', learning_rate_init,
                             'training_epochs', training_epochs,
                             'batch_size', batch_size,
                             'conv1_kernel_num', conv1_kernel_num,
                             'conv2_kernel_num', conv2_kernel_num,
                             'fc1_units_num', fc1_units_num,
                             'fc2_units_num', fc2_units_num])
        results_list.append(['train_step', 'train_loss','train_step', 'train_accuracy'])

        with tf.Session() as sess:
            sess.run(init_op)
            print('===>>>>>>>==開始訓練集上訓練模型==<<<<<<<=====')
            total_batches = int(num_examples_per_epoch_for_train / batch_size)
            print('Per batch Size:,',batch_size)
            print('Train sample Count Per Epoch:',num_examples_per_epoch_for_train)
            print('Total batch Count Per Epoch:', total_batches)

            #啟動資料讀取佇列
            tf.train.start_queue_runners()
            #記錄模型被訓練的步數
            training_step = 0
            # 訓練指定輪數,每一輪的訓練樣本總數為:num_examples_per_epoch_for_train
            for epoch in range(training_epochs):
                #每一輪都要把所有的batch跑一遍
                for batch_idx in range(total_batches):
                    #執行獲取訓練資料的計算圖,取出一個批次資料
                    images_batch ,labels_batch = sess.run([image_train,labels_train])
                    #執行優化器訓練節點
                    _,loss_value,avg_losses = sess.run([train_op,total_loss,average_losses],
                                            feed_dict={image_holder:images_batch,
                                                       labels_holder:labels_batch,
                                                       learning_rate:learning_rate_init})
                    #每呼叫一次訓練節點,training_step就加1,最終==training_epochs * total_batch
                    training_step = sess.run(global_step)
                    #每訓練display_step次,計算當前模型的損失和分類準確率
                    if training_step % display_step == 0:
                        #執行accuracy節點,計算當前批次的訓練樣本的準確率
                        predictions = sess.run([top_K_op],
                                               feed_dict={image_holder:images_batch,
                                                          labels_holder:labels_batch})
                        #當前批次上的預測正確的樣本量
                        batch_accuracy = np.sum(predictions)/batch_size
                        results_list.append([training_step,loss_value,training_step,batch_accuracy])
                        print("Training Step:" + str(training_step) +
                              ",Training Loss = " + "{:.6f}".format(loss_value) +
                              ",Training Accuracy = " + "{:.5f}".format(batch_accuracy) )
                        #執行彙總節點
                        summaries_str = sess.run(merged_summaries,feed_dict=
                                                    {image_holder:images_batch,
                                                     labels_holder:labels_batch})
                        summary_writer.add_summary(summary=summaries_str,global_step=training_step)
                        summary_writer.flush()

            summary_writer.close()
            print('訓練完畢')

            print('===>>>>>>>==開始在測試集上評估模型==<<<<<<<=====')
            total_batches = int(num_examples_per_epoch_for_eval / batch_size)
            total_examples = total_batches * batch_size
            print('Per batch Size:,', batch_size)
            print('Test sample Count Per Epoch:', total_examples)
            print('Total batch Count Per Epoch:', total_batches)
            correct_predicted = 0
            for test_step in range(total_batches):
                #執行獲取測試資料的計算圖,取出一個批次測試資料
                images_batch,labels_batch = sess.run([image_test,labels_test])
                #執行accuracy節點,計算當前批次的測試樣本的準確率
                predictions = sess.run([top_K_op],
                                       feed_dict={image_holder:images_batch,
                                                  labels_holder:labels_batch})
                #累計每個批次上的預測正確的樣本量
                correct_predicted += np.sum(predictions)

            accuracy_score = correct_predicted / total_examples
            print('---------->Accuracy on Test Examples:',accuracy_score)
            results_list.append(['Accuracy on Test Examples:',accuracy_score])
            # 將評估結果儲存到檔案
            results_file = open('evaluate_results/evaluate_results.csv', 'w', newline='')
            csv_writer = csv.writer(results_file, dialect='excel')
            for row in results_list:
                csv_writer.writerow(row)

def main(argv=None):
    cifar_input.maybe_download_and_extract(data_dir=dataset_dir,data_url=cifar_data_url)
    train_dir='/logs'
    if tf.gfile.Exists(train_dir):
        tf.gfile.DeleteRecursively(train_dir)
    tf.gfile.MakeDirs(train_dir)
    TrainModel()

if __name__ =='__main__':
    tf.app.run()