Keras basic learning I Sequential Model

keras introduction

Keras is a high-level API interface for deep learning. It is implemented in python and supports tensorflow. theano is the back-end. Recently, keras has also become the official high-level API of tensorflow, so its adaptability to tensorflow is better. Keras supports the quick prototype design of profile, CNN and RNN, seamless CPU and GPU switching. In addition, keras model can also be directly converted into coreML model and applied to iOS devices.

If you are familiar with the basic concepts of deep learning, keras is easy to use for quick recurrence, so you don't need to implement many layer s yourself, and the threshold is very low.

Sequential Model

The sequence model is a linear superposition of multiple network layers, which can be passed into the layer list or added to the model by calling the add() method.

from keras.models import Sequential
from keras.layers import Dense,Activation
# add method
model = Sequential()
model.add(Dense(32,input_shape=(784,)))
model.add(Activation('relu'))
model.add(Dense(10))
model.add(Activation("softmax"))

After defining the model, you need to call the compile method to configure the model. At this time, three parameters are passed in: optimizer,loss,metrics,loss and metrics can be customized.

# for a multi-class classification problem
model.compile(optimizer='rmsprop',
             loss = 'categorical_crossentropy',
              metrics=['accuracy']
             )
# For a binary classification problem
model.compile(optimizer='rmsprop',
              loss='binary_crossentropy',
              metrics=['accuracy'])

# For a mean squared error regression problem
model.compile(optimizer='rmsprop',
              loss='mse')


#for custom metrics
import keras.backend as K

def mean_pred(y_true,y_pred):
    return K.mean(y_pred)


model.compile(optimizer='rmsprop',
              loss='binary_crossentropy',
              metrics=['accuracy', mean_pred])

After the model and configuration are defined, you can use the Fit() and fit ﹐ generator() methods to pass in for training.

model.fit(x_train,y_train,epochs=20,batch_size=128)

After training, you can call the evaluate() method to evaluate the trained model.

score = model.evaluate(x_test,y_test,batch_size=128)

Let's take a look at how keras models through a handwritten character classification.

from keras.layers import Dense,Dropout
from keras import models
from keras.datasets import mnist
from keras.utils import to_categorical # convert int labels to one-hot vector
#define model
model = models.Sequential()
model.add(Dense(128,activation="relu",input_dim=784))
model.add(Dropout(0.5))
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(10, activation='softmax'))
# print model
model.summary()
_________________________________________________________________
Layer (type)                 Output Shape              Param #   
=================================================================
dense_79 (Dense)             (None, 128)               100480    
_________________________________________________________________
dropout_21 (Dropout)         (None, 128)               0         
_________________________________________________________________
dense_80 (Dense)             (None, 64)                8256      
_________________________________________________________________
dropout_22 (Dropout)         (None, 64)                0         
_________________________________________________________________
dense_81 (Dense)             (None, 10)                650       
=================================================================
Total params: 109,386
Trainable params: 109,386
Non-trainable params: 0
_________________________________________________________________
# load data
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
train_images = train_images.astype('float32')/255 # normalize to 0~1
test_images = test_images.astype('float32')/255
train_images = train_images.reshape((60000,-1))
test_images = test_images.reshape((10000,-1))
# convert to one-hot vectors
train_labels = to_categorical(train_labels)
test_labels = to_categorical(test_labels)
# define training config
model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])

#train the model
model.fit(train_images,train_labels,epochs=5,batch_size=64)


#Evaluation model
test_loss,test_accuracy = model.evaluate(test_images,test_labels)

print("test loss:",test_loss)
print("test acc:",test_accuracy)
Epoch 1/5
60000/60000 [==============================] - 7s 113us/step - loss: 0.6265 - acc: 0.8106
Epoch 2/5
60000/60000 [==============================] - 5s 83us/step - loss: 0.3415 - acc: 0.9079
Epoch 3/5
60000/60000 [==============================] - 5s 82us/step - loss: 0.2935 - acc: 0.9228
Epoch 4/5
60000/60000 [==============================] - 5s 82us/step - loss: 0.2749 - acc: 0.9312
Epoch 5/5
60000/60000 [==============================] - 5s 84us/step - loss: 0.2656 - acc: 0.9356
10000/10000 [==============================] - 1s 128us/step
test loss: 0.1488323472943157
test acc: 0.9647

CNN

from keras.datasets import mnist
from keras.utils import np_utils #convert int labels to one-hot vector
from keras.layers import Dense,Conv2D,MaxPool2D
from keras.models import Sequential

# define model
model = models.Sequential()
model.add(layers.Conv2D(32, (3, 3), activation='relu', input_shape=(28, 28, 1)))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.MaxPooling2D((2, 2)))
model.add(layers.Conv2D(64, (3, 3), activation='relu'))
model.add(layers.Flatten())
model.add(layers.Dense(64, activation='relu'))
model.add(layers.Dense(10, activation='softmax'))


#print model
# model.summary()

#load data
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
train_images = train_images.reshape((60000, 28, 28, 1))
train_images = train_images.astype('float32') / 255  # normalize to 0~1

test_images = test_images.reshape((10000, 28, 28, 1))
test_images = test_images.astype('float32') / 255

# convert to one-hot vectors
train_labels = to_categorical(train_labels)
test_labels = to_categorical(test_labels)

# define training config
model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])

# train the model
model.fit(train_images, train_labels, epochs=5, batch_size=64)

# evaluate the model
test_loss, test_accuracy = model.evaluate(test_images, test_labels)
print("test loss:", test_loss)
print("test accuracy:", test_accuracy)
Epoch 1/5
60000/60000 [==============================] - 17s 276us/step - loss: 0.1867 - acc: 0.9410
Epoch 2/5
38592/60000 [==================>...........] - ETA: 3s - loss: 0.0514 - acc: 0.9841

Keywords: Python iOS network

Added by c815902 on Thu, 31 Oct 2019 10:44:26 +0200