You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
89 lines
2.4 KiB
89 lines
2.4 KiB
from utils.nn_reader import read_csv
|
|
|
|
import torch
|
|
|
|
|
|
|
|
|
|
|
|
class Feedforward(torch.nn.Module):
|
|
def __init__(self, input_size, hidden_size, output_size):
|
|
super(Feedforward, self).__init__()
|
|
self.input_size = input_size
|
|
self.hidden_size = hidden_size
|
|
self.output_size = output_size
|
|
self.fc1 = torch.nn.Linear(self.input_size, self.hidden_size)
|
|
self.relu = torch.nn.ReLU()
|
|
self.fc2 = torch.nn.Linear(self.hidden_size, self.output_size)
|
|
self.sigmoid = torch.nn.Sigmoid()
|
|
self.softmax = torch.nn.Softmax(dim=1)
|
|
|
|
def forward(self, x):
|
|
hidden = self.fc1(x)
|
|
relu = self.relu(hidden)
|
|
output = self.fc2(relu)
|
|
output = self.softmax(output)
|
|
return output
|
|
|
|
|
|
"""
|
|
from sklearn.datasets import make_blobs
|
|
def blob_label(y, label, loc): # assign labels
|
|
target = numpy.copy(y)
|
|
for l in loc:
|
|
target[y == l] = label
|
|
return target
|
|
X_train, y_train = make_blobs(n_samples=40, n_features=2, cluster_std=1.5, shuffle=True)
|
|
X_train = torch.FloatTensor(X_train)
|
|
y_train = torch.FloatTensor(blob_label(y_train, 0, [0]))
|
|
y_train = torch.FloatTensor(blob_label(y_train, 1, [1,2,3]))
|
|
x_test, y_test = make_blobs(n_samples=10, n_features=2, cluster_std=1.5, shuffle=True)
|
|
x_test = torch.FloatTensor(x_test)
|
|
y_test = torch.FloatTensor(blob_label(y_test, 0, [0]))
|
|
y_test = torch.FloatTensor(blob_label(y_test, 1, [1,2,3]))
|
|
"""
|
|
|
|
|
|
X_train = torch.as_tensor(X_train)
|
|
X_test = torch.as_tensor(X_test)
|
|
y_train = torch.as_tensor(y_train)
|
|
|
|
|
|
model = Feedforward(28, 9, 3)
|
|
criterion = torch.nn.CrossEntropyLoss()
|
|
optimizer = torch.optim.SGD(model.parameters(), lr = 0.01)
|
|
|
|
|
|
|
|
model.eval()
|
|
y_pred = model(X_train)
|
|
y_pred = torch.Tensor([list(x).index(x.max()) for x in y_pred])
|
|
y_pred =
|
|
before_train = criterion(y_train, y_pred)
|
|
print('Test loss before training' , before_train.item())
|
|
|
|
|
|
|
|
model.train()
|
|
epoch = 20
|
|
for epoch in range(epoch):
|
|
optimizer.zero_grad()
|
|
# Forward pass
|
|
y_pred = model(X_train)
|
|
# Compute Loss
|
|
loss = criterion(y_pred.squeeze(), y_train)
|
|
|
|
print('Epoch {}: train loss: {}'.format(epoch, loss.item()))
|
|
# Backward pass
|
|
loss.backward()
|
|
optimizer.step()
|
|
|
|
|
|
model.eval()
|
|
y_pred = model(X_test)
|
|
after_train = criterion(y_pred.squeeze(), y_test)
|
|
print('Test loss after Training' , after_train.item())
|
|
|
|
|
|
|