Neural Network Frameworks¶
Pytorch
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 | import numpy as np
import torch
import torch.nn as nn
import torchvision
batch_size_train = 32
batch_size_test = 32
transform = torchvision.transforms.Compose(
[torchvision.transforms.ToTensor()])
train_loader = torch.utils.data.DataLoader(
torchvision.datasets.MNIST('.', train=True, download=False,transform=transform),
batch_size=batch_size_train, shuffle=True)
test_loader = torch.utils.data.DataLoader(
torchvision.datasets.MNIST('.', train=False, download=False,transform=transform),
batch_size=batch_size_test, shuffle=True)
class MyModel(nn.Module):
def __init__(self):
super(MyModel, self).__init__()
self.conv = nn.Conv2d(1, 28, kernel_size=3)
self.maxpool = nn.MaxPool2d(2)
self.fullcon1= nn.Linear(28*13*13, 128)
self.dropout = nn.Dropout(0.2)
self.fullcon2 = nn.Linear(128, 10)
self.act = nn.ReLU()
def forward(self, x):
x = self.act(self.conv(x))
x = self.mqxpool(x)
x = x.view(x.size(0), -1)
x = self.act(self.fullcon1(x))
x = self.dropout(x)
x = self.fullcon2(x)
return x
model_pt = MyModel()
loss_object_pt = nn.CrossEntropyLoss()
optimizer_pt = torch.optim.Adam(model_pt.parameters())
train_loss = []
train_accuracy = {'correct':0,'total':0}
test_loss = []
test_accuracy = {'correct':0,'total':0}
def average(arg):
if isinstance(arg,list):
return sum(arg)/len(arg)
elif isinstance(arg,dict):
return arg['correct']/arg['total']
def reset_loss_n_accuracy(*args):
for arg in args:
if isinstance(arg,list):
arg = []
elif isinstance(arg,dict):
for key in arg.keys():
arg[key]=0
def compute_loss_n_accuracy(model, x, y, training, loss_recorder, accuracy_recorder):
if training:
optimizer_pt.zero_grad()
y_ = model(x)
else:
with torch.no_grad():
y_ = model(x)
l_ = loss_object_pt(y_, y)
loss_recorder.append(l_.item())
pred = y_.data.max(1, keepdim=True)[1]
accuracy_recorder['correct'] += pred.eq(y.data.view_as(pred)).sum().item()
accuracy_recorder['total'] += len(pred)
return l_
def show_loss_n_accuracy(epoch, train_loss, train_accuracy, test_loss, test_accuracy):
template = 'Epoch {}, Loss: {}, Accuracy: {}, Test Loss: {}, Test Accuracy: {}'
print(template.format(epoch + 1,
average(train_loss),
average(train_accuracy) * 100,
average(test_loss),
average(test_accuracy) * 100))
def train_step(model, images, labels, train_loss, train_accuracy):
loss = compute_loss_n_accuracy(model, images, labels, True, train_loss, train_accuracy)
loss.backward()
optimizer_pt.step()
def test_step(model, images, labels, test_loss, test_accuracy):
compute_loss_n_accuracy(model, images, labels, False, test_loss, test_accuracy)
EPOCHS = 5
for epoch in range(EPOCHS):
reset_loss_n_accuracy(train_loss, train_accuracy, test_loss, test_accuracy)
model_pt.train()
for batch_idx, (images, labels) in enumerate(train_loader):
train_step(model_pt, images, labels, train_loss, train_accuracy)
model_pt.eval()
for test_images, test_labels in test_loader:
test_step(model_pt, test_images, test_labels, test_loss, test_accuracy)
show_loss_n_accuracy(epoch, train_loss, train_accuracy, test_loss, test_accuracy)
|
TensorFlow
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 | import numpy as np
import tensorflow as tf
from tensorflow.keras import layers
from tensorflow.keras import Model
batch_size_train = 32
batch_size_test = 32
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
x_train = x_train[..., tf.newaxis].astype("float32")
x_test = x_test[..., tf.newaxis].astype("float32")
train_ds = tf.data.Dataset.from_tensor_slices(
(x_train, y_train)).shuffle(10000).batch(batch_size_train)
test_ds = tf.data.Dataset.from_tensor_slices((x_test, y_test)).batch(batch_size_test)
class MyModel(Model):
def __init__(self):
super(MyModel, self).__init__()
self.conv = layers.Conv2D(28, kernel_size=(3,3), input_shape=(28, 28, 1))
self.maxpool = layers.MaxPooling2D(pool_size=(2, 2))
self.flatten = layers.Flatten()
self.fullcon1 = layers.Dense(128, activation=tf.nn.relu)
self.dropout = layers.Dropout(0.2)
self.fullcon2 = layers.Dense(10, activation=tf.nn.softmax)
def call(self, x):
x = self.conv(x)
x = self.maxpool(x)
x = self.flatten(x)
x = self.fullcon1(x)
x = self.dropout(x)
x = self.fullcon2(x)
return x
model_tf = MyModel()
loss_object_tf = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
optimizer_tf = tf.keras.optimizers.Adam()
train_loss = tf.keras.metrics.Mean(name='train_loss')
train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='train_accuracy')
test_loss = tf.keras.metrics.Mean(name='test_loss')
test_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='test_accuracy')
def reset_loss_n_accuracy(*args):
for arg in args:
arg.reset_states()
def compute_loss_n_accuracy(model, x, y, training, loss_recorder, accuracy_recorder):
y_ = model(x, training=training)
l_ = loss_object_tf(y_true=y, y_pred=y_)
loss_recorder.update_state(l_)
accuracy_recorder.update_state(y_true=y, y_pred=y_)
return l_
def show_loss_n_accuracy(epoch, train_loss, train_accuracy, test_loss, test_accuracy):
template = 'Epoch {}, Loss: {}, Accuracy: {}, Test Loss: {}, Test Accuracy: {}'
print(template.format(epoch + 1,
train_loss.result(),
train_accuracy.result() * 100,
test_loss.result(),
test_accuracy.result() * 100))
def train_step(model, images, labels, train_loss, train_accuracy):
with tf.GradientTape() as tape:
loss = compute_loss_n_accuracy(model, images, labels, True, train_loss, train_accuracy)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer_tf.apply_gradients(zip(gradients, model.trainable_variables))
def test_step(model, images, labels, test_loss, test_accuracy):
compute_loss_n_accuracy(model, images, labels, False, test_loss, test_accuracy)
EPOCHS = 5
for epoch in range(EPOCHS):
reset_loss_n_accuracy(train_loss, train_accuracy, test_loss, test_accuracy)
for images, labels in train_ds:
train_step(model_tf, images, labels, train_loss, train_accuracy)
for test_images, test_labels in test_ds:
test_step(model_tf, test_images, test_labels, test_loss, test_accuracy)
show_loss_n_accuracy(epoch, train_loss, train_accuracy, test_loss, test_accuracy)
|