MLP
cnn이전에 먼저 multi layer percetron으로도 학습 가능
## model function
def create_model():
model = keras.Sequential()
model.add(keras.layers.Flatten(input_shape=(128,128,3)))
model.add(keras.layers.Dense(512, activation='relu'))
model.add(keras.layers.Dense(256, activation='relu'))
model.add(keras.layers.Dense(128, activation='relu'))
model.add(keras.layers.Dense(5, activation='softmax'))
return model
GoogLeNet
## GoogLeNet
from tensorflow.keras.layers import Input, Conv2D, Dense, MaxPool2D,
GlobalAveragePooling2D, Concatenate, Dropout
IMG_SIZE = 224
def create_model():
## Stem
inputs = Input(shape=(IMG_SIZE, IMG_SIZE, 3))
net = Conv2D(64, 7, 2, 'SAME', activation='relu')(inputs)
net = MaxPool2D(3, 2, 'SAME')(net)
net = Conv2D(64, 1, 1, 'SAME', activation='relu')(net)
net = Conv2D(192, 3, 1, 'SAME', activation='relu')(net)
net = MaxPool2D(3, 2, 'SAME')(net)
## inception 3a
b1 = Conv2D(64, 1, 1, 'SAME', activation='relu')(net)
b2 = Conv2D(96, 1, 1, 'SAME', activation='relu')(net)
b2 = Conv2D(128, 3, 1, 'SAME', activation='relu')(b2)
b3 = Conv2D(16, 1, 1, 'SAME', activation='relu')(net)
b3 = Conv2D(32, 5, 1, 'SAME', activation='relu')(b3)
b4 = MaxPool2D(3, 1, 'SAME')(net)
b4 = Conv2D(32, 1, 1, 'SAME', activation='relu')(b4)
net = Concatenate()([b1, b2, b3, b4])
## inception 3b
b1 = Conv2D(128, 1, 1, 'SAME', activation='relu')(net)
b2 = Conv2D(128, 1, 1, 'SAME', activation='relu')(net)
b2 = Conv2D(192, 3, 1, 'SAME', activation='relu')(b2)
b3 = Conv2D(32, 1, 1, 'SAME', activation='relu')(net)
b3 = Conv2D(96, 5, 1, 'SAME', activation='relu')(b3)
b4 = MaxPool2D(3, 1, 'SAME')(net)
b4 = Conv2D(64, 1, 1, 'SAME', activation='relu')(b4)
net = Concatenate()([b1, b2, b3, b4])
##
net = MaxPool2D(3, 2, 'SAME')(net)
## inception 4a
b1 = Conv2D(192, 1, 1, 'SAME', activation='relu')(net)
b2 = Conv2D(96, 1, 1, 'SAME', activation='relu')(net)
b2 = Conv2D(208, 3, 1, 'SAME', activation='relu')(b2)
b3 = Conv2D(16, 1, 1, 'SAME', activation='relu')(net)
b3 = Conv2D(48, 5, 1, 'SAME', activation='relu')(b3)
b4 = MaxPool2D(3, 1, 'SAME')(net)
b4 = Conv2D(64, 1, 1, 'SAME', activation='relu')(b4)
net = Concatenate()([b1, b2, b3, b4])
## inception 4b
b1 = Conv2D(160, 1, 1, 'SAME', activation='relu')(net)
b2 = Conv2D(112, 1, 1, 'SAME', activation='relu')(net)
b2 = Conv2D(224, 3, 1, 'SAME', activation='relu')(b2)
b3 = Conv2D(24, 1, 1, 'SAME', activation='relu')(net)
b3 = Conv2D(64, 5, 1, 'SAME', activation='relu')(b3)
b4 = MaxPool2D(3, 1, 'SAME')(net)
b4 = Conv2D(64, 1, 1, 'SAME', activation='relu')(b4)
net = Concatenate()([b1, b2, b3, b4])
## inception 4c
b1 = Conv2D(128, 1, 1, 'SAME', activation='relu')(net)
b2 = Conv2D(128, 1, 1, 'SAME', activation='relu')(net)
b2 = Conv2D(256, 3, 1, 'SAME', activation='relu')(b2)
b3 = Conv2D(24, 1, 1, 'SAME', activation='relu')(net)
b3 = Conv2D(64, 5, 1, 'SAME', activation='relu')(b3)
b4 = MaxPool2D(3, 1, 'SAME')(net)
b4 = Conv2D(64, 1, 1, 'SAME', activation='relu')(b4)
net = Concatenate()([b1, b2, b3, b4])
## inception 4d
b1 = Conv2D(112, 1, 1, 'SAME', activation='relu')(net)
b2 = Conv2D(144, 1, 1, 'SAME', activation='relu')(net)
b2 = Conv2D(288, 3, 1, 'SAME', activation='relu')(b2)
b3 = Conv2D(32, 1, 1, 'SAME', activation='relu')(net)
b3 = Conv2D(64, 5, 1, 'SAME', activation='relu')(b3)
b4 = MaxPool2D(3, 1, 'SAME')(net)
b4 = Conv2D(64, 1, 1, 'SAME', activation='relu')(b4)
net = Concatenate()([b1, b2, b3, b4])
## inception 4e
b1 = Conv2D(256, 1, 1, 'SAME', activation='relu')(net)
b2 = Conv2D(160, 1, 1, 'SAME', activation='relu')(net)
b2 = Conv2D(320, 3, 1, 'SAME', activation='relu')(b2)
b3 = Conv2D(32, 1, 1, 'SAME', activation='relu')(net)
b3 = Conv2D(128, 5, 1, 'SAME', activation='relu')(b3)
b4 = MaxPool2D(3, 1, 'SAME')(net)
b4 = Conv2D(128, 1, 1, 'SAME', activation='relu')(b4)
net = Concatenate()([b1, b2, b3, b4])
##
net = MaxPool2D(3, 2, 'SAME')(net)
## inception 5a
b1 = Conv2D(256, 1, 1, 'SAME', activation='relu')(net)
b2 = Conv2D(160, 1, 1, 'SAME', activation='relu')(net)
b2 = Conv2D(320, 3, 1, 'SAME', activation='relu')(b2)
b3 = Conv2D(32, 1, 1, 'SAME', activation='relu')(net)
b3 = Conv2D(128, 5, 1, 'SAME', activation='relu')(b3)
b4 = MaxPool2D(3, 1, 'SAME')(net)
b4 = Conv2D(128, 1, 1, 'SAME', activation='relu')(b4)
net = Concatenate()([b1, b2, b3, b4])
## inception 5b
b1 = Conv2D(384, 1, 1, 'SAME', activation='relu')(net)
b2 = Conv2D(192, 1, 1, 'SAME', activation='relu')(net)
b2 = Conv2D(384, 3, 1, 'SAME', activation='relu')(b2)
b3 = Conv2D(48, 1, 1, 'SAME', activation='relu')(net)
b3 = Conv2D(128, 5, 1, 'SAME', activation='relu')(b3)
b4 = MaxPool2D(3, 1, 'SAME')(net)
b4 = Conv2D(128, 1, 1, 'SAME', activation='relu')(b4)
net = Concatenate()([b1, b2, b3, b4])
## global average pooling
net = GlobalAveragePooling2D()(net)
net = Dropout(0.4)(net)
net = Dense(5, activation='softmax')(net)
return keras.Model(inputs=inputs, outputs=net)
MobileMet
## MobileNet
from tensorflow.keras.layers import Conv2D, Dense, ReLU, Softmax, BatchNormalization,
DepthwiseConv2D, GlobalAveragePooling2D
IMG_SIZE = 224
def create_model():
model = keras.Sequential()
model.add(Conv2D(filters=32, kernel_size=3, strides=2, padding='SAME',
use_bias=False, input_shape=(IMG_SIZE, IMG_SIZE, 3)))
model.add(BatchNormalization())
model.add(ReLU())
model.add(DepthwiseConv2D(kernel_size=3, strides=1, padding='SAME', use_bias=False))
model.add(BatchNormalization())
model.add(ReLU())
model.add(Conv2D(filters=64, kernel_size=1, use_bias=False))
model.add(BatchNormalization())
model.add(ReLU())
model.add(DepthwiseConv2D(kernel_size=3, strides=2, padding='SAME', use_bias=False))
model.add(BatchNormalization())
model.add(ReLU())
model.add(Conv2D(filters=128, kernel_size=1, use_bias=False))
model.add(BatchNormalization())
model.add(ReLU())
model.add(DepthwiseConv2D(kernel_size=3, strides=1, padding='SAME', use_bias=False))
model.add(BatchNormalization())
model.add(ReLU())
model.add(Conv2D(filters=128, kernel_size=1, use_bias=False))
model.add(BatchNormalization())
model.add(ReLU())
model.add(DepthwiseConv2D(kernel_size=3, strides=2, padding='SAME', use_bias=False))
model.add(BatchNormalization())
model.add(ReLU())
model.add(Conv2D(filters=256, kernel_size=1, use_bias=False))
model.add(BatchNormalization())
model.add(ReLU())
model.add(DepthwiseConv2D(kernel_size=3, strides=1, padding='SAME', use_bias=False))
model.add(BatchNormalization())
model.add(ReLU())
model.add(Conv2D(filters=256, kernel_size=1, use_bias=False))
model.add(BatchNormalization())
model.add(ReLU())
model.add(DepthwiseConv2D(kernel_size=3, strides=2, padding='SAME', use_bias=False))
model.add(BatchNormalization())
model.add(ReLU())
model.add(Conv2D(filters=512, kernel_size=1, use_bias=False))
model.add(BatchNormalization())
model.add(ReLU())
model.add(DepthwiseConv2D(kernel_size=3, strides=1, padding='SAME', use_bias=False))
model.add(BatchNormalization())
model.add(ReLU())
model.add(Conv2D(filters=512, kernel_size=1, use_bias=False))
model.add(BatchNormalization())
model.add(ReLU())
model.add(DepthwiseConv2D(kernel_size=3, strides=1, padding='SAME', use_bias=False))
model.add(BatchNormalization())
model.add(ReLU())
model.add(Conv2D(filters=512, kernel_size=1, use_bias=False))
model.add(BatchNormalization())
model.add(ReLU())
model.add(DepthwiseConv2D(kernel_size=3, strides=1, padding='SAME', use_bias=False))
model.add(BatchNormalization())
model.add(ReLU())
model.add(Conv2D(filters=512, kernel_size=1, use_bias=False))
model.add(BatchNormalization())
model.add(ReLU())
model.add(DepthwiseConv2D(kernel_size=3, strides=1, padding='SAME', use_bias=False))
model.add(BatchNormalization())
model.add(ReLU())
model.add(Conv2D(filters=512, kernel_size=1, use_bias=False))
model.add(BatchNormalization())
model.add(ReLU())
model.add(DepthwiseConv2D(kernel_size=3, strides=1, padding='SAME', use_bias=False))
model.add(BatchNormalization())
model.add(ReLU())
model.add(Conv2D(filters=512, kernel_size=1, use_bias=False))
model.add(BatchNormalization())
model.add(ReLU())
model.add(DepthwiseConv2D(kernel_size=3, strides=2, padding='SAME', use_bias=False))
model.add(BatchNormalization())
model.add(ReLU())
model.add(Conv2D(filters=1024, kernel_size=1, use_bias=False))
model.add(BatchNormalization())
model.add(ReLU())
model.add(DepthwiseConv2D(kernel_size=3, strides=1, padding='SAME', use_bias=False))
model.add(BatchNormalization())
model.add(ReLU())
model.add(Conv2D(filters=1024, kernel_size=1, use_bias=False))
model.add(BatchNormalization())
model.add(ReLU())
model.add(GlobalAveragePooling2D())
model.add(Dense(units=128))
model.add(BatchNormalization())
model.add(ReLU())
model.add(Dense(units=5))
model.add(BatchNormalization())
model.add(Softmax())
return model
Module: tf.keras.applications
'DEEP LEARNING > Tensorflow Training' 카테고리의 다른 글
cnn 실습_나만의 이미지 데이터를 만들고 학습하기(3) - functional API & save_model (0) | 2020.03.26 |
---|---|
cnn 실습_나만의 이미지 데이터를 만들고 학습하기(2) - sequential API (0) | 2020.03.21 |
cnn 실습_나만의 이미지 데이터를 만들고 학습하기(1) - image crawling (0) | 2020.03.20 |
Linear Regression & Logistic Regression (0) | 2020.03.01 |
Make A Dataset (0) | 2020.02.26 |