1. 程式人生 > >吳恩達深度學習4.2練習_Convolutional Neural Networks_Happy House & Residual Networks

吳恩達深度學習4.2練習_Convolutional Neural Networks_Happy House & Residual Networks

1、Happy House

1.1、 Load Dataset

1.2、構建流圖:def HappyModel

1.3、PlaceHolder --> happyModel = HappyModel((64,64,3))

1.4、選擇優化演算法和目標:happyModel.compile(optimizer=‘adam’,loss=‘binary_crossentropy’,metrics= [‘accuracy’])

1.5、流圖例項化:happyModel.fit(x=X_train,y=Y_train,epochs=4,batch_size=16)

1.6、預測:preds = happyModel(x=X_test,y=Y_test)

Loss = preds[0]、Accuracy = preds[1]

1.7、Test on your own image

2、Residual Networks

2.1、Load Dataset

2.2、構建流圖

def identity_block、convolutional_block、ResNet50(input_shape=(64,64,3),classes=6)

Figure 1 : Identity block. Skip connection "skips over" 2 layers.

Figure 2 : Convolutional block

Figure 3 : ResNet-50 model

2.3、PlaceHolder --> model = ResNet50(input_shape=(64,64,3),classes=6)

2.4、選擇優化演算法和目標:model.compile(optimizer=‘adam’,loss=‘categorical_crossentropy’,metrics= [‘accuracy’])

2.5、流圖例項化:model.fit(X_train,Y_train,epochs=2,batch_size=32)

2.6、預測:preds = model(x=X_test,y=Y_test)

Loss = preds[0]、Accuracy = preds[1]

2.7、Test on your own image

2.8、Print a summary of your model


1、Happy House

import numpy as np
from keras import layers
from keras.layers import Input,Add, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D
from keras.layers import AveragePooling2D, MaxPooling2D, Dropout, GlobalMaxPooling2D, GlobalAveragePooling2D
from keras.models import Model, load_model
from keras.preprocessing import image
from keras.utils import layer_utils
from keras.utils.data_utils import get_file
from keras.applications.imagenet_utils import preprocess_input
import pydot
from IPython.display import SVG
from keras.utils.vis_utils import model_to_dot
from keras.utils import plot_model
from kt_utils import *
from resnets_utils import *

import keras.backend as K
K.set_image_data_format('channels_last')
import matplotlib.pyplot as plt
from matplotlib.pyplot import imshow
from keras.initializers import glorot_uniform
import scipy.misc

%matplotlib inline

K.set_learning_phase(1)

1.1、 Load Dataset

X_train_orig, Y_train_orig, X_test_orig, Y_test_orig, classes = load_dataset()

# Normalize image vectors
X_train = X_train_orig/255.
X_test = X_test_orig/255.

# Reshape
Y_train = Y_train_orig.T
Y_test = Y_test_orig.T

print ("number of training examples = " + str(X_train.shape[0]))
print ("number of test examples = " + str(X_test.shape[0]))
print ("X_train shape: " + str(X_train.shape))
print ("Y_train shape: " + str(Y_train.shape))
print ("X_test shape: " + str(X_test.shape))
print ("Y_test shape: " + str(Y_test.shape))
number of training examples = 1080
number of test examples = 120
X_train shape: (1080, 64, 64, 3)
Y_train shape: (1080, 1)
X_test shape: (120, 64, 64, 3)
Y_test shape: (120, 1)

1.2、構建流圖:def HappyModel

def HappyModel(input_shape):
    
    X_input = Input(input_shape)
    X = ZeroPadding2D((3,3))(X_input)
    X = Conv2D(32,(7,7),strides=(1,1),name='con0')(X)
    X = BatchNormalization(axis=3,name='bn0')(X)
    X = Activation('relu')(X)
    X = MaxPooling2D((2,2),name='max_pool')(X)
    X = Flatten()(X)
    X = Dense(1,activation='sigmoid',name='fc')(X)
    model = Model(inputs = X_input,outputs = X,name='HappyModel')
    
    return model

1.3、PlaceHolder --> happyModel = HappyModel((64,64,3))

1.4、選擇優化演算法和目標:happyModel.compile(optimizer=‘adam’,loss=‘binary_crossentropy’,metrics= [‘accuracy’])

1.5、流圖例項化:happyModel.fit(x=X_train,y=Y_train,epochs=4,batch_size=16)

1.6、預測:preds = happyModel(x=X_test,y=Y_test)

Loss = preds[0]、Accuracy = preds[1]

happyModel = HappyModel((64,64,3))
happyModel.compile(optimizer='adam',loss='binary_crossentropy',metrics=['accuracy'])
happyModel.fit(x=X_train,y=Y_train,epochs=4,batch_size=16)
preds = happyModel.evaluate(x=X_test,y=Y_test)
print ('Loss = ',preds[0])
print ('Accuracy = ',preds[1])
Epoch 1/4
1080/1080 [==============================] - 11s 11ms/step - loss: -23.6072 - acc: 0.1704
Epoch 2/4
1080/1080 [==============================] - 6s 5ms/step - loss: -23.9136 - acc: 0.1667
Epoch 3/4
1080/1080 [==============================] - 6s 5ms/step - loss: -23.9136 - acc: 0.1667
Epoch 4/4
1080/1080 [==============================] - 6s 5ms/step - loss: -23.9136 - acc: 0.1667
120/120 [==============================] - 2s 14ms/step
Loss =  -23.91357816060384
Accuracy =  0.16666666666666666

1.7、Test on your own image

img_path = 'images/image_yhd2.jpg'
# img_path = 'images/imag_phf1.jpg'
my_image = scipy.misc.imread(img_path)
imshow(my_image);
C:\conda\envs\tensorflow\lib\site-packages\ipykernel_launcher.py:3: DeprecationWarning: `imread` is deprecated!
`imread` is deprecated in SciPy 1.0.0, and will be removed in 1.2.0.
Use ``imageio.imread`` instead.
  This is separate from the ipykernel package so we can avoid doing imports until

png

img = image.load_img(img_path, target_size=(64, 64))
x = image.img_to_array(img)
print (x.shape)

x = np.expand_dims(x, axis=0)
print (x.shape)

x = preprocess_input(x) 
print (x.shape)
'''
keras中 preprocess_input() 函式完成資料預處理的工作,資料預處理能夠提高演算法的執行效果。
常用的預處理包括資料歸一化和白化(whitening)
'''
print(happyModel.predict(x))
(64, 64, 3)
(1, 64, 64, 3)
(1, 64, 64, 3)
[[1.]]
# test np.expand_dims(x,axis)
a = np.array([[1,2,3],[4,5,6]])
print (a)
print (a.shape)
print ('- '*20)
b = np.expand_dims(a,axis=0)
print (b)
print ("b.shape: ",b.shape)
print ('- '*20)
b = np.expand_dims(a,axis=1)  #axis = 0與1的區別 
print ( b )
print ("b.shape: ",b.shape)

[[1 2 3]
 [4 5 6]]
(2, 3)
- - - - - - - - - - - - - - - - - - - - 
[[[1 2 3]
  [4 5 6]]]
b.shape:  (1, 2, 3)
- - - - - - - - - - - - - - - - - - - - 
[[[1 2 3]]

 [[4 5 6]]]
b.shape:  (2, 1, 3)

2、Residual Networks

2.1、Load Dataset

X_train_orig, Y_train_orig, X_test_orig, Y_test_orig, classes = load_dataset()

# Normalize image vectors
X_train = X_train_orig/255.
X_test = X_test_orig/255.

# Convert training and test labels to one hot matrices
Y_train = convert_to_one_hot(Y_train_orig, 6).T
Y_test = convert_to_one_hot(Y_test_orig, 6).T

print ("number of training examples = " + str(X_train.shape[0]))
print ("number of test examples = " + str(X_test.shape[0]))
print ("X_train shape: " + str(X_train.shape))
print ("Y_train shape: " + str(Y_train.shape))
print ("X_test shape: " + str(X_test.shape))
print ("Y_test shape: " + str(Y_test.shape))
number of training examples = 1080
number of test examples = 120
X_train shape: (1080, 64, 64, 3)
Y_train shape: (1080, 6)
X_test shape: (120, 64, 64, 3)
Y_test shape: (120, 6)

2.2、構建流圖

def identity_block、convolutional_block、ResNet50(input_shape=(64,64,3),classes=6)

def identity_block(X,f,filters,stage,block):
    F1,F2,F3 = filters
    res_name_base = 'res' + str(stage) + block + '_branch'
    bn_name_base = 'bn' + str(stage) + block + '_branch'
    X_shortcut = X
    X = Conv2D(filters=F1,kernel_size=(1,1),strides=(1,1),padding='valid',name= res_name_base + '2a',kernel_initializer=glorot_uniform(seed=0))(X)
    X = BatchNormalization(axis=3, name=bn_name_base + '2a')(X)
    X = Activation('relu')(X)
    
    X = Conv2D(F2,(f,f),strides=(1,1),padding='same',name= res_name_base + '2b',kernel_initializer=glorot_uniform(seed=0))(X)
    X = BatchNormalization(axis=3, name=bn_name_base + '2b')(X)
    X = Activation('relu')(X)
    
    X = Conv2D(F3,(1,1),strides=(1,1),padding='valid',name= res_name_base + '2c',kernel_initializer=glorot_uniform(seed=0))(X)
    X = BatchNormalization(axis=3, name=bn_name_base + '2c')(X)
    X = Add()([X,X_shortcut])
    X = Activation('relu')(X)
    
    return X
tf.reset_default_graph()

with tf.Session() as test:
    np.random.seed(1)
    A_prev = tf.placeholder("float", [3, 4, 4, 6])
    X = np.random.randn(3, 4, 4, 6)
    A = identity_block(A_prev, f = 2, filters = [2, 4, 6], stage = 1, block = 'a')
    test.run(tf.global_variables_initializer())
    out = test.run([A], feed_dict={A_prev: X, K.learning_phase(): 0})
    print("out = " + str(out[0][1][1][0]))
    
out = [ 0.19716817 -0.          1.3561227   2.1713073  -0.          1.3324987 ]
def convolutional_block(X,f,filters,stage,block,s=2):
    F1,F2,F3 = filters
    res_name_base = 'res' + str(stage) + block + '_branch'
    bn_name_base = 'bn' + str(stage) + block + '_branch'
    X_shortcut = X
    X = Conv2D(filters=F1,kernel_size=(1,1),strides=(s,s),padding='valid',name= res_name_base + '2a',kernel_initializer=glorot_uniform(seed=0))(X)
    X = BatchNormalization(axis=3, name=bn_name_base + '2a')(X)
    X = Activation('relu')(X)
    
    X = Conv2D(F2,(f,f),strides=(1,1),padding='same',name= res_name_base + '2b',kernel_initializer=glorot_uniform(seed=0))(X)
    X = BatchNormalization(axis=3, name=bn_name_base + '2b')(X)
    X = Activation('relu')(X)
    
    X = Conv2D(F3,(1,1),strides=(1,1),padding='valid',name= res_name_base + '2c',kernel_initializer=glorot_uniform(seed=0))(X)
    X = BatchNormalization(axis=3, name=bn_name_base + '2c')(X)
    
    X_shortcut = Conv2D(F3,(1,1),strides=(s,s),padding='valid',name= res_name_base + 'l',kernel_initializer=glorot_uniform(seed=0))(X_shortcut)
    X_shortcut = BatchNormalization(axis=3, name=bn_name_base + 'l')(X_shortcut)
    
    X = Add()([X,X_shortcut])
    X = Activation('relu')(X)
    
    return X
tf.reset_default_graph()

with tf.Session() as test:
    np.random.seed(1)
    A_prev = tf.placeholder("float", [3, 4, 4, 6])
    X = np.random.randn(3, 4, 4, 6)
    A = convolutional_block(A_prev, f = 2, filters = [2, 4, 6], stage = 1, block = 'a')
    test.run(tf.global_variables_initializer())
    out = test.run([A], feed_dict={A_prev: X, K.learning_phase(): 0})
    print("out = " + str(out[0][1][1][0]))
out = [ 0.09018461  1.2348979   0.46822017  0.03671762 -0.          0.65516603]
def ResNet50(input_shape = (64, 64, 3), classes = 6):
    
    X_input = Input(input_shape)
    
    X = ZeroPadding2D((3, 3))(X_input)
    
    X = Conv2D(64,(7,7),strides=(2,2),padding='valid',name='con0',kernel_initializer=glorot_uniform(seed=0))(X)
#     X = Conv2D(64,(7,7),strides=(2,2),name='con0',kernel_initializer=glorot_uniform(seed=0))(X)
    X = BatchNormalization(axis=3,name='bn0')(X)
    X = Activation('relu')(X)
    X = MaxPooling2D(pool_size=(3,3),strides=(2,2))(X)
    
    X = convolutional_block(X, f = 3, filters = [64, 64, 256], stage = 2, block = 'a',s=1)
    X = identity_block(X, f = 3, filters = [64, 64, 256], stage = 2, block = 'b')
    X = identity_block(X, f = 3, filters = [64, 64, 256], stage = 2, block = 'c')
    
    X = convolutional_block(X, f = 3, filters = [128, 128, 512], stage = 3, block = 'a',s=2)
    X = identity_block(X, f = 3, filters = [128, 128, 512], stage = 3, block = 'b')
    X = identity_block(X, f = 3, filters = [128,