출처 : 변성윤님 블로그.
출처 : 부스트캠프 AI Tech.
In [ ]:
from IPython.core.display import display, HTML
display(HTML("<style>.container { width:140% !important; }</style>"))
0. Libarary Import¶
In [9]:
import io
import os
import yaml
import pandas as pd
import numpy as np
from PIL import Image
import torch
import torch.nn as nn
import torch.nn.functional as F
from efficientnet_pytorch import EfficientNet
import albumentations
import albumentations.pytorch
import ipywidgets as widgets
from IPython.display import Image as display_image
from typing import Tuple
In [10]:
with open("config.yaml") as f:
config = yaml.load(f, Loader=yaml.FullLoader)
In [11]:
class MyEfficientNet(nn.Module) :
'''
EfiicientNet-b4의 출력층만 변경합니다.
한번에 18개의 Class를 예측하는 형태의 Model입니다.
'''
def __init__(self, num_classes: int = 18) :
super(MyEfficientNet, self).__init__()
self.EFF = EfficientNet.from_pretrained('efficientnet-b4', in_channels=3, num_classes=num_classes)
def forward(self, x) -> torch.Tensor:
x = self.EFF(x)
x = F.softmax(x, dim=1)
return x
In [12]:
def transform_image(image_bytes) -> torch.Tensor:
transform = albumentations.Compose([
albumentations.Resize(height=512, width=384),
albumentations.Normalize(mean=(0.5, 0.5, 0.5),
std=(0.2, 0.2, 0.2)),
albumentations.pytorch.transforms.ToTensorV2()
])
image = Image.open(io.BytesIO(image_bytes))
image = image.convert('RGB')
image_array = np.array(image)
return transform(image=image_array)['image'].unsqueeze(0)
In [13]:
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = MyEfficientNet(num_classes=18).to(device)
model.load_state_dict(torch.load(config['model_path'], map_location=device))
# model.eval()
Loaded pretrained weights for efficientnet-b4
Out[13]:
<All keys matched successfully>
In [14]:
def get_prediction(image_bytes: bytes) -> Tuple[torch.Tensor, torch.Tensor]:
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
tensor = transform_image(image_bytes=image_bytes).to(device)
outputs = model.forward(tensor)
_, y_hat = outputs.max(1)
return tensor, y_hat
2. Inference¶
- TODO : 파일 업로더 생성
- TODO : 버튼 클릭시 이미지 보이기
- TODO : 인퍼런스 버튼 클릭시 인퍼런스 실행
In [15]:
uploader = widgets.FileUpload(
accept ='.png, .jpg, .jpeg',
multiple=False
)
In [16]:
display(uploader)
In [19]:
def on_display_click_callback(clicked_button: widgets.Button) -> None:
global content
uploaded_filename = next(iter(uploader.value))
content = uploader.value[uploaded_filename]['content']
display_image_space.value = content
display_button = widgets.Button(description = "Display Image")
display_image_space = widgets.Image()
display_button.on_click(on_display_click_callback)
display(display_button, display_image_space)
In [32]:
def on_inference_click_callback(clicked_button: widgets.Button) -> None:
with inference_output:
inference_output.clear_output()
_, output = get_prediction(content)
print(config['classes'][output.item()])
In [33]:
inference_button = widgets.Button(description="Inference!")
inference_output = widgets.Output(layout = {'border' : '1px solid baclk'})
inference_button.on_click(on_inference_click_callback)
display(inference_button, inference_output)
'MLOps' 카테고리의 다른 글
MLOps - 7. Streamlit (0) | 2022.05.23 |
---|---|
MLOps - 6. Streamlit (0) | 2022.05.22 |
MLOps - 4. Voila, ipywidget (0) | 2022.05.20 |
MLOps - 3. 머신러닝 프로젝트 라이프 사이클 (0) | 2022.05.19 |
MLOps - 2. Model Serving (0) | 2022.05.18 |
댓글