-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathtest.py
106 lines (93 loc) · 3.92 KB
/
test.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
import cv2
from torch.utils import data
from tqdm import tqdm
import network
from datasets import PascalPartValSegmentation
from utils import ext_transforms as et, utils, add_void
from torchvision.transforms import transforms
import torch
import numpy as np
import matplotlib
# matplotlib.use('Qt4Agg')
from PIL import Image
import matplotlib.pyplot as plt
from utils.ext_transforms import get_affine_transform
palette = [0,0,0,
128,0,0,
0,128,0,
128,128,0,
0,0,128,
128,0,128,
0,128,128]
num_classes = 7
output_stride = 16
device = 'cuda'
# model_name = 'deeplabv3plus_resnet101v2'
model_name = 'ACE2P_resnet101'
# checkpoint_path = 'checkpoints/best_deeplabv3plusedgev2_resnet101v2_pascalpart_os16_sgd_rmi_el_6960_mixwh_ms.pth'
checkpoint_path = r"ace2p_initial_abn.pth"
# img_path = 'samples/23_image.png'
# lbl_path = 'samples/1_target.png'
model_map = network.model_map
model = model_map[model_name](num_classes=num_classes, output_stride=output_stride, pretrained_backbone=True,
use_abn=False)
chk = torch.load(checkpoint_path)
model.load_state_dict(chk['model_state'])
# model.load_state_dict(chk)
model.to(device)
model.eval()
val_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
# transforms.Normalize(mean=[0.406, 0.456, 0.485], std=[0.225, 0.224, 0.229]),
])
denorm = utils.Denormalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
val_dst = PascalPartValSegmentation(root=r"C:\Users\MohammadReza\Desktop\Thesis\Codes\Self-Correction-Human-Parsing-master\inputs", ext='.jpg', crop_size=[512, 512], ignore_label=255,
transform=val_transform)
val_loader = data.DataLoader(val_dst, batch_size=1, shuffle=False, num_workers=0)
for (image, meta) in tqdm(val_loader):
with torch.no_grad():
images = image[:, [2, 1, 0]].to(device, dtype=torch.float32)
metas = meta
outputs = model(images)
if 'ACE2P' in model_name:
preds = outputs['preds'][0][1].detach().cpu().numpy()[0]
else:
preds = outputs['preds'].detach().cpu().numpy()[0]
preds = val_dst.transform_logits(preds.transpose((1,2,0)), metas['center'].numpy()[0], metas['scale'].numpy()[0], metas['width'].numpy()[0], metas['height'].numpy()[0], input_size=[512, 512])
preds = preds.argmax(2)
img = (denorm(images[0].detach().cpu().numpy()) * 255).transpose(1, 2, 0).astype(np.uint8)
im = Image.fromarray(preds.astype('uint8'))
im.putpalette(palette)
# targets = labels.cpu().numpy()
# gt = Image.open(
# r"C:\Users\MohammadReza\Desktop\Thesis\Self-Correction-Human-Parsing-Results\gt\IMG_20201114_124215_987.png")
# gt_ = np.array(gt)
# gt_[(gt_ == (128, 0, 0)).all(2)] = 1
# gt_[(gt_ == (0, 128, 0)).all(2)] = 2
# gt_[(gt_ == (128, 128, 0)).all(2)] = 3
# gt_[(gt_ == (0, 0, 128)).all(2)] = 4
# gt_[(gt_ == (128, 0, 128)).all(2)] = 5
# gt_[(gt_ == (0, 128, 128)).all(2)] = 6
# h, w, _ = gt_.shape
# person_center, s = PascalPartValSegmentation._box2cs(val_dst, [0, 0, w - 1, h - 1])
# r = 0
#
# trans = get_affine_transform(person_center, s, r, val_dst.crop_size)
# gt_ = cv2.warpAffine(
# gt_,
# trans,
# (int(val_dst.crop_size[1]), int(val_dst.crop_size[0])),
# flags=cv2.INTER_LINEAR,
# borderMode=cv2.BORDER_CONSTANT,
# borderValue=(0, 0, 0))
# from metrics import StreamSegMetrics
#
# met = StreamSegMetrics(7)
# met.update(np.expand_dims(gt_[..., 0], 0), np.expand_dims(preds, 0))
# print(met.get_results())
im.save(metas['name'][0][:-3]+'png')
plt.imshow(im)
plt.show()