Keras is a Python deep-learning framework authored by Francois Chollet that provides a convenient way
or simply
Keras: an API for specifying & training differentiable programs[1]
Keras does not execute low-level operations.
Instead it relies on a specialized, well-optimized tensor library to handle low level operations e.g TensorFlow
For handling operations such as tensor manipulation and differentiation, serving as the backend engine of Keras.
Keras handles the problem in a modular way.
"""
importing modules
"""
from keras import backend as BackEnd # the module that allows us to manipulate our Keras backend
import os # library that will give us system access to the keras backend file
from importlib import reload # the library that will we will use reload a function
Switching to CNTK[3] as our backend.
"""
# setting our backend to CNTK
i.e Microsoft Cognitive toolkit
"""
os.environ['KERAS_BACKEND'] = "cntk"
reload(BackEnd)
Switching to Theano[4] as our backend.
"""
# setting our backend to Theano
developed by the MILA lab at Université de Montréal,
"""
os.environ['KERAS_BACKEND'] = "theano"
reload(BackEnd)
"""
Let's make life easier by making a function;
this function will let us just use one line to change out backend
"""
def set_keras_backend(backend):# set_keras_backend will be the name of our function
# the argument we will take in will be our backend framework
if BackEnd.backend() != backend:
os.environ['KERAS_BACKEND'] = backend
reload(BackEnd)
assert BackEnd.backend() == backend
set_keras_backend("tensorflow")
import keras
from keras import layers
model = keras.Sequential()
model.add(layers.Dense(20, activation="relu", input_shape=(10,)))
model.add(layers.Dense(20, activation="relu",))
model.add(layers.Dense(10, activation="softmax"))
model.fit(x, y, epochs=10, batch_size=32)
import keras
from keras import layers
inputs = keras.Input(shape=(10,))
x = layers.Dense(20, activation="relu")(x)
x = layers.Dense(20, activation="relu")(x)
outputs = layers.Dense(10, activation="softmax")(x)
model = keras.Model(inputs, outputs)
model.fit(x, y, epochs=10, batch_size=32)
import keras
from keras import layers
class MyModel(keras.Model):
def__init__(self):
super(MyModel, self).__init__()
self.dense1 = layers.Dense(20, activation="relu")
self.dense2 = layers.Dense(20, activation="relu")
self.dense3 = layers.Dense(10, activation="softmax")
def call(self, inputs):
x = self.dense1(x)
x = self.dense2(x)
return self.dense3(x)
model = MyModel()
model.fit(x, y, epochs, batch_size=32)
# mnist comes with keras as numpy arrays
from keras.datasets import mnist
# loading the training and test set images along with the labels
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
# to see the number of training images and dimensions
train_images.shape
# viewing the number of labels "training"
len(train_labels)
# to view the stored form and stored format of the training labels
train_labels
# to see the number of testing images and dimensions
test_images.shape
# viewing the number of labels "testing"
len(test_labels)
# to view the stored form and stored format of the testing labels
test_labels
from keras import models
from keras import layers
network = models.Sequential()
# the sequential model is connected to 512 neurons in the network
network.add(layers.Dense(512, activation='relu', input_shape=(28 * 28,)))
# Each score will be the probability that the current digit image belongs to
# one of our 10 digit classes.
network.add(layers.Dense(10, activation='softmax'))
# it will return an array of 10 probability scores
# (summing to 1)
-> A loss function—How the network will be able to measure its performance on the training data, and thus how it will be able to steer itself in the right direction.
-> An optimizer—The mechanism through which the network will update itself based on the data it sees and its loss function.
-> Metrics to monitor during training and testing—Here, we’ll only care about accuracy (the fraction of the images that were correctly classified).
network.compile(optimizer='rmsprop',
loss='categorical_crossentropy',
metrics=['accuracy'])
Before training, we’ll preprocess the data by reshaping it into the shape the network expects and scaling it so that all values are in the [0, 1] interval.
Previously, our training images, for instance, were stored in an array of shape (60000, 28, 28) of type uint8 with values in the [0, 255] interval.
# We transform it into a float32 array of
# shape (60000, 28 * 28) with values between 0 and 1.
train_images = train_images.reshape((60000, 28 * 28))
train_images = train_images.astype('float32') / 255
# dividing by 255 is a form of data normalisation
test_images = test_images.reshape((10000, 28 * 28))
test_images = test_images.astype('float32') / 255
# Linking the labels and images
from keras.utils import to_categorical
# We need to categorically encode the labels
train_labels = to_categorical(train_labels)
test_labels = to_categorical(test_labels)
# We’re now ready to train the network, which in Keras is done via a call to the net-
# work’s fit method—we fit the model to its training data
#network.fit(train_images, train_labels, epochs=10, batch_size=128)
network.fit(train_images, train_labels,batch_size=128, epochs=10, verbose=1, validation_split=0.1)
# Now let’s check that the model performs well on the test set
test_loss, test_acc = network.evaluate(test_images, test_labels)
print('test_acc:', test_acc)