2. 손글씨 인식 모델 만들기¶
In [1]:
import torch
import torch.nn as nn
import torch.optim as optim
import matplotlib.pyplot as plt
from sklearn.datasets import load_digits
from sklearn.model_selection import train_test_split
In [2]:
device = 'cuda' if torch.cuda.is_available() else 'cpu'
print(device)
cpu
In [3]:
digits = load_digits()
X_data = digits['data']
y_data = digits['target']
print(X_data.shape)
print(y_data.shape)
(1797, 64) (1797,)
In [4]:
fig, axes = plt.subplots(nrows=2, ncols=5, figsize=(14, 8))
for i, ax in enumerate(axes.flatten()):
ax.imshow(X_data[i].reshape(8, 8), cmap='gray')
ax.set_title(y_data[i])
ax.axis('off')
In [5]:
X_data = torch.FloatTensor(X_data)
y_data = torch.LongTensor(y_data)
print(X_data.shape)
print(y_data.shape)
torch.Size([1797, 64]) torch.Size([1797])
In [6]:
x_train, x_test, y_train, y_test = train_test_split(X_data, y_data, test_size=0.2, random_state=2024)
print(x_train.shape, y_train.shape)
print(x_test.shape, y_test.shape)
torch.Size([1437, 64]) torch.Size([1437]) torch.Size([360, 64]) torch.Size([360])
In [14]:
loader = torch.utils.data.DataLoader(
dataset=list(zip(x_train, y_train)),
batch_size=64,
shuffle=True,
drop_last=False
)
imgs, labels = next(iter(loader))
fig, axes = plt.subplots(nrows=8, ncols=8, figsize=(14, 14))
fig, axes = plt.subplots(nrows=8, ncols=8, figsize=(14,14))
for ax, img, label in zip(axes.flatten(), imgs, labels):
ax.imshow(img.reshape((8,8)), cmap='gray')
ax.set_title(str(label))
ax.axis('off')
In [8]:
model = nn.Sequential(
nn.Linear(64, 10),
)
optimizer = optim.Adam(model.parameters(), lr=0.01)
epochs = 50
for epoch in range(epochs + 1):
sum_losses = 0
sum_accs = 0
for x_batch, y_batch in loader:
y_pred = model(x_batch)
loss = nn.CrossEntropyLoss()(y_pred, y_batch)
optimizer.zero_grad()
loss.backward()
optimizer.step()
sum_losses = sum_losses + loss
y_prob = nn.Softmax(1)(y_pred)
y_pred_index = torch.argmax(y_prob, axis=1)
acc = (y_batch == y_pred_index).sum() / len(y_batch) * 100
sum_accs = sum_accs + acc
avg_loss = sum_losses / len(loader)
avg_acc = sum_accs / len(loader)
print(f'Epoch ; {epoch:4d}/{epochs} Loss:{avg_loss:.4f} Accuracy: {avg_acc:.2f}%')
Epoch ; 0/50 Loss:1.6272 Accuracy: 59.36% Epoch ; 1/50 Loss:0.3238 Accuracy: 89.33% Epoch ; 2/50 Loss:0.1836 Accuracy: 93.86% Epoch ; 3/50 Loss:0.1330 Accuracy: 95.18% Epoch ; 4/50 Loss:0.1141 Accuracy: 96.18% Epoch ; 5/50 Loss:0.1001 Accuracy: 96.74% Epoch ; 6/50 Loss:0.0825 Accuracy: 97.40% Epoch ; 7/50 Loss:0.0707 Accuracy: 97.76% Epoch ; 8/50 Loss:0.0641 Accuracy: 98.41% Epoch ; 9/50 Loss:0.0740 Accuracy: 97.83% Epoch ; 10/50 Loss:0.0647 Accuracy: 98.44% Epoch ; 11/50 Loss:0.0696 Accuracy: 97.83% Epoch ; 12/50 Loss:0.0537 Accuracy: 98.57% Epoch ; 13/50 Loss:0.0384 Accuracy: 98.83% Epoch ; 14/50 Loss:0.0346 Accuracy: 99.52% Epoch ; 15/50 Loss:0.0344 Accuracy: 99.32% Epoch ; 16/50 Loss:0.0308 Accuracy: 99.39% Epoch ; 17/50 Loss:0.0347 Accuracy: 98.95% Epoch ; 18/50 Loss:0.0247 Accuracy: 99.86% Epoch ; 19/50 Loss:0.0242 Accuracy: 99.52% Epoch ; 20/50 Loss:0.0219 Accuracy: 99.86% Epoch ; 21/50 Loss:0.0215 Accuracy: 99.73% Epoch ; 22/50 Loss:0.0243 Accuracy: 99.73% Epoch ; 23/50 Loss:0.0198 Accuracy: 99.73% Epoch ; 24/50 Loss:0.0228 Accuracy: 99.37% Epoch ; 25/50 Loss:0.0254 Accuracy: 99.37% Epoch ; 26/50 Loss:0.0232 Accuracy: 99.73% Epoch ; 27/50 Loss:0.0195 Accuracy: 99.73% Epoch ; 28/50 Loss:0.0146 Accuracy: 99.93% Epoch ; 29/50 Loss:0.0153 Accuracy: 99.86% Epoch ; 30/50 Loss:0.0213 Accuracy: 99.52% Epoch ; 31/50 Loss:0.0199 Accuracy: 99.52% Epoch ; 32/50 Loss:0.0164 Accuracy: 99.80% Epoch ; 33/50 Loss:0.0136 Accuracy: 99.86% Epoch ; 34/50 Loss:0.0128 Accuracy: 99.86% Epoch ; 35/50 Loss:0.0152 Accuracy: 99.80% Epoch ; 36/50 Loss:0.0164 Accuracy: 99.86% Epoch ; 37/50 Loss:0.0113 Accuracy: 100.00% Epoch ; 38/50 Loss:0.0094 Accuracy: 100.00% Epoch ; 39/50 Loss:0.0091 Accuracy: 100.00% Epoch ; 40/50 Loss:0.0092 Accuracy: 100.00% Epoch ; 41/50 Loss:0.0084 Accuracy: 100.00% Epoch ; 42/50 Loss:0.0074 Accuracy: 100.00% Epoch ; 43/50 Loss:0.0082 Accuracy: 100.00% Epoch ; 44/50 Loss:0.0075 Accuracy: 100.00% Epoch ; 45/50 Loss:0.0078 Accuracy: 99.93% Epoch ; 46/50 Loss:0.0109 Accuracy: 99.93% Epoch ; 47/50 Loss:0.0098 Accuracy: 100.00% Epoch ; 48/50 Loss:0.0074 Accuracy: 100.00% Epoch ; 49/50 Loss:0.0066 Accuracy: 100.00% Epoch ; 50/50 Loss:0.0058 Accuracy: 100.00%
In [11]:
plt.imshow(x_test[10].reshape(8, 8), cmap='gray')
print(y_test[10])
tensor(7)
In [12]:
y_pred = model(x_test)
y_pred[10]
Out[12]:
tensor([ -9.9989, -2.7055, -10.6654, -0.9536, -0.9967, -4.4301, -13.5802, 13.7799, 0.1467, 4.4320], grad_fn=<SelectBackward0>)
In [13]:
y_prob = nn.Softmax(1)(y_pred)
y_prob[10]
Out[13]:
tensor([4.7091e-11, 6.9253e-08, 2.4182e-11, 3.9926e-07, 3.8244e-07, 1.2344e-08, 1.3110e-12, 9.9991e-01, 1.1998e-06, 8.7136e-05], grad_fn=<SelectBackward0>)
In [15]:
for i in range(10):
print(f'숫자 {i}일 확률: {y_prob[10][i]:.2f}')
숫자 0일 확률: 0.00 숫자 1일 확률: 0.00 숫자 2일 확률: 0.00 숫자 3일 확률: 0.00 숫자 4일 확률: 0.00 숫자 5일 확률: 0.00 숫자 6일 확률: 0.00 숫자 7일 확률: 1.00 숫자 8일 확률: 0.00 숫자 9일 확률: 0.00
In [16]:
y_pred_index = torch.argmax(y_prob, axis=1)
accuracy = (y_test == y_pred_index).float().sum() / len(y_test) * 100
print(f'테스트 정확도: {accuracy:.2f}%')
테스트 정확도: 95.56%
In [ ]:
'코딩 > 머신러닝과 딥러닝' 카테고리의 다른 글
비선형 활성화 함수 (1) | 2024.07.17 |
---|---|
딥러닝 (0) | 2024.07.17 |
파이토치로 구현한 논리회귀 (0) | 2024.07.17 |
파이토치로 구현한 선형회귀 (0) | 2024.07.17 |
파이토치 (0) | 2024.07.17 |