Keras 學習筆記
Keras 學習筆記
安裝 keras
需要以下套件
numpy
scipy
需要以下套件
numpy
scipy
查看 keras 的 backend
在 terminal 打以下指令
$ python
Python 2.7.10 (default, Jul 30 2016, 18:31:42)
[GCC 4.2.1 Compatible Apple LLVM 8.0.0 (clang-800.0.34)] on darwin
Type "help", "copyright", "credits" or "license" for more information.
>>> import keras
Using TensorFlow backend.
import keras 時就可以看到 backend 是用什麼
臨時修改 keras backend
import os
os.environ['KERAS_BACKEND']='theano'
import keras
用 keras 做 regression
#coding=utf-8
import keras
import numpy as np
from keras.models import Sequential # 按順序建立的層
from keras.layers import Dense # 全連接層
import matplotlib.pyplot as plt
# 製造 data (共200筆)
np.random.seed(1337)
X = np.linspace(-1,1,200)
np.random.shuffle(X)
Y = 0.5 * X + 2 + np.random.normal(0, 0.05, (200,))
# 畫出 data
plt.scatter(X,Y)
plt.show()
# 建立 trainig 與 testing data
X_train, Y_train = X[:160], Y[:160] # 取資料點當中前 160 筆資料當作 training data
X_test, Y_test = X[160:], Y[160:] # 取資料點當中160以上到200,後 40 筆資料當作 testing data
# 建立 neural network from the first layer to last layer
model = Sequential()
model.add(Dense(output_dim=1,input_dim=1)) # 加一層,定義 output 與 input 的 dimension
# 除了第一層以外,定義第二層以上時,不需要定義 input dimension,因為第二層 input 就是第一層的 input
# 開始搭建 model
# mse = mean square error
# sgd = stochastic gradient descent
# 解釋 http://blog.bryanbigdata.com/2014/11/algorithm-stochastic-gradient.html
model.compile(loss='mse',optimizer='sgd')
# training
print "start training"
for step in range(301):
cost = model.train_on_batch(X_train, Y_train) #
if step % 100 == 0:
print "train cost: {}".format(cost)
# testing
print "start testing"
cost = model.evaluate(X_test, Y_test, batch_size=40)
print "test cost: {}".format(cost)
W , b = model.layers[0].get_weights()
print "Weights = {}, bias = {}".format(W,b)
# 印出測試的結果
Y_pred = model.predict(X_test) # Y predict
plt.scatter(X_test, Y_test)
plt.plot(X_test, Y_pred)
plt.show()
用 keras 做 classifier
# coding=utf-8
import numpy as np
np.random.seed(1337) # for reproducibility
from keras.datasets import mnist
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers import Dense, Activation
from keras.optimizers import RMSprop
# download the mnist to the path '~/.keras/datasets/' if it is the first time to be called
# X shape (60,000 28x28), y shape (10,000, )
(X_train, y_train), (X_test, y_test) = mnist.load_data()
# data pre-processing
X_train = X_train.reshape(X_train.shape[0], -1) / 255. # normalize
X_test = X_test.reshape(X_test.shape[0], -1) / 255. # normalize
y_train = np_utils.to_categorical(y_train, num_classes=10)
y_test = np_utils.to_categorical(y_test, num_classes=10)
# Another way to build your neural net
# 28x28 = 784 pixels
# 兩層的神經網路
model = Sequential([
Dense(32, input_dim=784),
Activation('relu'),
Dense(10),
Activation('softmax'),
])
# Another way to define your optimizer
# lr = learning rate
rmsprop = RMSprop(lr=0.001, rho=0.9, epsilon=1e-08, decay=0.0)
# We add metrics to get more results you want to see
model.compile(optimizer=rmsprop,
loss='categorical_crossentropy',
metrics=['accuracy'])
print "Training ------------"
# Another way to train the model
model.fit(X_train, y_train, nb_epoch=2, batch_size=32)
# nb_epoch 控制我們要訓練幾次
print "\nTesting ------------"
# Evaluate the model with the metrics we defined earlier
loss, accuracy = model.evaluate(X_test, y_test)
print "test loss: {}".format(loss)
print "test accuracy: {}".format(accuracy)
用 keras 做 CNN
#coding=utf-8
import numpy as np
np.random.seed(1337) # for reproducibility
from keras.datasets import mnist
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers import Dense, Activation, Convolution2D, MaxPooling2D, Flatten
from keras.optimizers import Adam
# download the mnist to the path '~/.keras/datasets/' if it is the first time to be called
# X shape (60,000 28x28), y shape (10,000, )
(X_train, y_train), (X_test, y_test) = mnist.load_data()
# data pre-processing
X_train = X_train.reshape(-1, 1,28, 28)/255.
X_test = X_test.reshape(-1, 1,28, 28)/255.
y_train = np_utils.to_categorical(y_train, num_classes=10)
y_test = np_utils.to_categorical(y_test, num_classes=10)
# Another way to build your CNN
model = Sequential()
# Conv layer 1 output shape (32, 28, 28)
model.add(Convolution2D(
nb_filter=32,
nb_row=5,
nb_col=5,
border_mode='same', # Padding method
dim_ordering='th', # if use tensorflow, to set the input dimension order to theano ("th") style, but you can change it.
input_shape=(1, # channels
28, 28,) # height & width
))
model.add(Activation('relu'))
# Pooling layer 1 (max pooling) output shape (32, 14, 14)
model.add(MaxPooling2D(
pool_size=(2, 2),
strides=(2, 2), # 要跳幾個
border_mode='same', # Padding method
))
# Convolution layer 2 output shape (64, 14, 14)
model.add(Convolution2D(64, 5, 5, border_mode='same'))
model.add(Activation('relu'))
# Pooling layer 2 (max pooling) output shape (64, 7, 7)
model.add(MaxPooling2D(pool_size=(2, 2), border_mode='same'))
# Fully connected layer 1 input shape (64 * 7 * 7) = (3136), output shape (1024)
model.add(Flatten()) # 把三維的層攤平成一維的
model.add(Dense(1024))
model.add(Activation('relu'))
# Fully connected layer 2 to shape (10) for 10 classes
model.add(Dense(10))
model.add(Activation('softmax'))
# Another way to define your optimizer
adam = Adam(lr=1e-4) # lr = learning rate
# We add metrics to get more results you want to see
model.compile(optimizer=adam,
loss='categorical_crossentropy',
metrics=['accuracy'])
print "Training ------------"
# Another way to train the model
model.fit(X_train, y_train, nb_epoch=1, batch_size=32,)
print "\nTesting ------------"
# Evaluate the model with the metrics we defined earlier
loss, accuracy = model.evaluate(X_test, y_test)
print "\ntest loss: {}".format(loss)
print "\ntest accuracy: {}".format(accuracy)
用 keras 做 RNN
classify
#coding=utf-8
import numpy as np
np.random.seed(1337) # for reproducibility
from keras.datasets import mnist
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers import SimpleRNN, Activation, Dense
from keras.optimizers import Adam
TIME_STEPS = 28 # same as the height of the image 時間點數據:要讀取多少個時間長度
INPUT_SIZE = 28 # same as the width of the image 每一次每一行要讀取多少個 pixels
BATCH_SIZE = 50 # 一次訓練多少個圖片
BATCH_INDEX = 0
OUTPUT_SIZE = 10 # 讀完每張圖片 output 的結果 0 ~ 9 共 10 種
CELL_SIZE = 50 #
LR = 0.001 # Learning Rate
# download the mnist to the path '~/.keras/datasets/' if it is the first time to be called
# X shape (60,000 28x28), y shape (10,000, )
(X_train, y_train), (X_test, y_test) = mnist.load_data()
# data pre-processing
X_train = X_train.reshape(-1, 28, 28) / 255. # normalize 把顏色控制在 0 ~ 1 之間
X_test = X_test.reshape(-1, 28, 28) / 255. # normalize
y_train = np_utils.to_categorical(y_train, num_classes=10)
y_test = np_utils.to_categorical(y_test, num_classes=10)
# build RNN model
model = Sequential()
# RNN cell
model.add(SimpleRNN(
# for batch_input_shape, if using tensorflow as the backend, we have to put None for the batch_size.
# Otherwise, model.evaluate() will get error.
batch_input_shape=(None, TIME_STEPS, INPUT_SIZE), # Or: input_dim=INPUT_SIZE, input_length=TIME_STEPS,
output_dim=CELL_SIZE,
unroll=True,
))
# output layer
model.add(Dense(OUTPUT_SIZE))
model.add(Activation('softmax'))
# optimizer
adam = Adam(LR)
model.compile(optimizer=adam,
loss='categorical_crossentropy',
metrics=['accuracy'])
# training
for step in range(4001):
# data shape = (batch_num, steps, inputs/outputs)
X_batch = X_train[BATCH_INDEX: BATCH_INDEX+BATCH_SIZE, :, :]
Y_batch = y_train[BATCH_INDEX: BATCH_INDEX+BATCH_SIZE, :]
cost = model.train_on_batch(X_batch, Y_batch)
BATCH_INDEX += BATCH_SIZE
BATCH_INDEX = 0 if BATCH_INDEX >= X_train.shape[0] else BATCH_INDEX
if step % 500 == 0:
cost, accuracy = model.evaluate(X_test, y_test, batch_size=y_test.shape[0], verbose=False)
print "test cost: {} test accuracy: {}".format(cost,accuracy)
用 keras 做 RNN LSTM
regression
#coding=utf-8
import numpy as np
np.random.seed(1337) # for reproducibility
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.layers import LSTM, TimeDistributed, Dense
from keras.optimizers import Adam
BATCH_START = 0
TIME_STEPS = 20
BATCH_SIZE = 50
INPUT_SIZE = 1
OUTPUT_SIZE = 1
CELL_SIZE = 20
LR = 0.006
def get_batch():
global BATCH_START, TIME_STEPS
# xs shape (50batch, 20steps)
xs = np.arange(BATCH_START, BATCH_START+TIME_STEPS*BATCH_SIZE).reshape((BATCH_SIZE, TIME_STEPS)) / (10*np.pi)
seq = np.sin(xs)
res = np.cos(xs)
BATCH_START += TIME_STEPS
# plt.plot(xs[0, :], res[0, :], 'r', xs[0, :], seq[0, :], 'b--')
# plt.show()
return [seq[:, :, np.newaxis], res[:, :, np.newaxis], xs]
model = Sequential()
# build a LSTM RNN
model.add(LSTM(
batch_input_shape=(BATCH_SIZE, TIME_STEPS, INPUT_SIZE), # Or: input_dim=INPUT_SIZE, input_length=TIME_STEPS,
output_dim=CELL_SIZE,
return_sequences=True, # True: output at all steps. False: output as last step. 對於每一個時間點,是否要輸出 output
stateful=True, # True: the final state of batch1 is feed into the initial state of batch2 Batch 之間狀態是否有聯繫
))
# add output layer
model.add(TimeDistributed(Dense(OUTPUT_SIZE))) #
adam = Adam(LR) # LR = Learning rate 我們設定為 0.006
model.compile(optimizer=adam,
loss='mse',)
print "Training ------------"
for step in range(501):
# data shape = (batch_num, steps, inputs/outputs)
X_batch, Y_batch, xs = get_batch()
cost = model.train_on_batch(X_batch, Y_batch)
pred = model.predict(X_batch, BATCH_SIZE)
plt.plot(xs[0, :], Y_batch[0].flatten(), 'r', xs[0, :], pred.flatten()[:TIME_STEPS], 'b--')
plt.ylim((-1.2, 1.2))
plt.draw()
plt.pause(0.1)
if step % 10 == 0:
print "train cost: {}".format(cost)
keras Autoencoder
#coding=utf-8
import numpy as np
np.random.seed(1337) # for reproducibility
from keras.datasets import mnist
from keras.models import Model
from keras.layers import Dense, Input
import matplotlib.pyplot as plt
# download the mnist to the path '~/.keras/datasets/' if it is the first time to be called
# X shape (60,000 28x28), y shape (10,000, )
(x_train, _), (x_test, y_test) = mnist.load_data()
# data pre-processing
x_train = x_train.astype('float32') / 255. - 0.5 # minmax_normalized
x_test = x_test.astype('float32') / 255. - 0.5 # minmax_normalized
x_train = x_train.reshape((x_train.shape[0], -1))
x_test = x_test.reshape((x_test.shape[0], -1))
print(x_train.shape)
print(x_test.shape)
# in order to plot in a 2D figure
encoding_dim = 2 # encode 端的目標是把整張圖壓縮成以 2 個特徵值表示
# this is our input placeholder
input_img = Input(shape=(784,)) # 輸入的原圖為 28x28 共有 784 個像素點 (pixel)
# encoder layers
encoded = Dense(128, activation='relu')(input_img) # 把原圖 784 個 input 壓縮成 128 個 feature
encoded = Dense(64, activation='relu')(encoded) # 把上一步驟 128 個 feature 進一步壓縮成 64 個 feature
encoded = Dense(10, activation='relu')(encoded) # 把上一步驟 64 個 feature 再壓縮為 10 個 feature
encoder_output = Dense(encoding_dim)(encoded) # 最後把上一步僅剩的 10 個 feature 壓縮為 2 個 feature
# decoder layers (把 encoder 結構顛倒過來)
decoded = Dense(10, activation='relu')(encoder_output) # 把 2 個 feature decode 成 10 個
decoded = Dense(64, activation='relu')(decoded) # 把上一步的 10 個 feature decode 成 64 個
decoded = Dense(128, activation='relu')(decoded) # 把上一步的 64 個 feature decode 成 128 個
decoded = Dense(784, activation='tanh')(decoded) # 最後,把上一步的 128 個 feature decode 成原圖 784 個
# construct the autoencoder model
autoencoder = Model(input=input_img, output=decoded)
# construct the encoder model for plotting
encoder = Model(input=input_img, output=encoder_output)
# compile autoencoder
autoencoder.compile(optimizer='adam', loss='mse')
# training
autoencoder.fit(x_train, x_train,
nb_epoch=20,
batch_size=256,
shuffle=True)
# plotting
encoded_imgs = encoder.predict(x_test)
plt.scatter(encoded_imgs[:, 0], encoded_imgs[:, 1], c=y_test)
plt.colorbar()
plt.show()
保存與提取訓練好的模型
這裡需要安裝 hdf5 套件
以下是我在 Mac OS Sierra 系統的安裝方法:
以下是我在 Mac OS Sierra 系統的安裝方法:
brew install hdf5
sudo pip3 install h5py
sudo pip install h5py
# coding=utf-8
import numpy as np
np.random.seed(1337) # for reproducibility
from keras.models import Sequential
from keras.layers import Dense
from keras.models import load_model
# create some data
X = np.linspace(-1, 1, 200) # 製造 200 筆資料
np.random.shuffle(X) # randomize the data
Y = 0.5 * X + 2 + np.random.normal(0, 0.05, (200, ))
X_train, Y_train = X[:160], Y[:160] # first 160 data points
X_test, Y_test = X[160:], Y[160:] # last 40 data points
model = Sequential()
model.add(Dense(output_dim=1, input_dim=1))
model.compile(loss='mse', optimizer='sgd')
for step in range(301):
cost = model.train_on_batch(X_train, Y_train)
# save
print('test before save: ', model.predict(X_test[0:2]))
model.save('my_model.h5') # 命名並保存為 HDF5 file 需要額外安裝套件
del model # deletes the existing model
# load
model = load_model('my_model.h5')
print('test after load: ', model.predict(X_test[0:2]))
# 如果只想單純保留 weight 值,不保存結構,使用以下方法
"""
# save and load weights
model.save_weights('my_model_weights.h5')
model.load_weights('my_model_weights.h5')
# save and load fresh network without trained weights
from keras.models import model_from_json
json_string = model.to_json()
model = model_from_json(json_string)
"""
留言
張貼留言