-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathcolorimg.py
69 lines (57 loc) · 3.32 KB
/
colorimg.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
# to run the code run the following command
# python colorimg.py --image images/img1.jpg --prototxt models/colorization_deploy_v2.prototxt --model models/colorization_release_v2.caffemodel --points models/pts_in_hull.npy
# python colorimg.py --image images/img2.jpg --prototxt models/colorization_deploy_v2.prototxt --model models/colorization_release_v2.caffemodel --points models/pts_in_hull.npy
# python colorimg.py --image images/img3.jpg --prototxt models/colorization_deploy_v2.prototxt --model models/colorization_release_v2.caffemodel --points models/pts_in_hull.npy
# python colorimg.py --image images/img4.jpg --prototxt models/colorization_deploy_v2.prototxt --model models/colorization_release_v2.caffemodel --points models/pts_in_hull.npy
# importing the necessary packages
import numpy as np
import argparse
import cv2
# constructing argument parser and parsing the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", type=str, required=True,
help="path to input black and white image")
ap.add_argument("-p", "--prototxt", type=str, required=True,
help="path to Caffe prototxt file")
ap.add_argument("-m", "--model", type=str, required=True,
help="path to Caffe pre-trained model")
ap.add_argument("-c", "--points", type=str, required=True,
help="path to cluster center points")
args = vars(ap.parse_args())
# loading serialized black and white colorized model and cluster center points from disk
print("[INFO] loading model...")
net = cv2.dnn.readNetFromCaffe(args["prototxt"], args["model"])
pts = np.load(args["points"])
# adding cluster centers as 1x1 convolutions to the model
class8 = net.getLayerId("class8_ab")
conv8 = net.getLayerId("conv8_313_rh")
pts = pts.transpose().reshape(2, 313, 1, 1)
net.getLayer(class8).blobs = [pts.astype("float32")]
net.getLayer(conv8).blobs = [np.full([1, 313], 2.606, dtype="float32")]
# loading image, scaling px intensity range to [0, 1], converting from BGR to Lab
image = cv2.imread(args["image"])
scaled = image.astype("float32") / 255.0
lab = cv2.cvtColor(scaled, cv2.COLOR_BGR2LAB)
# resize the lab image to 224x224(the dimensions the colorization network accepts), split channels, extract the 'L' channel, and then perform mean centering
resized = cv2.resize(lab, (224, 224))
L = cv2.split(resized)[0]
L -= 50
# passing the L channel to predict the 'a' and 'b' channel values
'print("[INFO] colorizing image...")'
net.setInput(cv2.dnn.blobFromImage(L))
ab = net.forward()[0, :, :, :].transpose((1, 2, 0))
# resizing predicted 'ab' values to fit same dimensions as input images
ab = cv2.resize(ab, (image.shape[1], image.shape[0]))
# grab unresized 'l' and concat it with predicted 'ab' channels
L = cv2.split(lab)[0]
colorized = np.concatenate((L[:, :, np.newaxis], ab), axis=2)
# convert the image from Lab to RGB and clip the values outside[0, 1]
colorized = cv2.cvtColor(colorized, cv2.COLOR_LAB2BGR)
colorized = np.clip(colorized, 0, 1)
# the colorized image is converted from a float in range [0, 1] to unsigned 8bit int in range [0, 255]
colorized = (255 * colorized).astype("uint8")
# show the original and output colorized images
cv2.imshow("Black and White", image)
cv2.imshow("Colorized", colorized)
cv2.imwrite("results/colorized.jpg", colorized)
cv2.waitKey(0)