-
Notifications
You must be signed in to change notification settings - Fork 0
/
2_hdr_scalin.py
347 lines (273 loc) · 13.4 KB
/
2_hdr_scalin.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
# -*- coding: utf-8 -*-
"""HDRI_save_crf_rad_map.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1YasZa2obtm_BYde8JAaTtB6bbdHhjcQJ
"""
import numpy as np
from numpy import asarray
import cv2
import random
import glob
import os
from natsort import natsorted
from PIL import Image
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
import pandas as pd
from google.colab import drive
drive.mount('/content/drive')
def linearWeight(pixel_value):
""" Linear weighting function based on pixel intensity that reduces the
weight of pixel values that are near saturation.
Parameters
----------
pixel_value : np.uint8
A pixel intensity value from 0 to 255
Returns
-------
weight : np.float64
The weight corresponding to the input pixel intensity
"""
z_min, z_max = 0., 255.
if pixel_value <= (z_min + z_max) / 2:
return pixel_value - z_min
return z_max - pixel_value
def sampleIntensities(images):
"""Randomly sample pixel intensities from the exposure stack.
Parameters
----------
images : list<numpy.ndarray>
A list containing a stack of single-channel (i.e., grayscale)
layers of an HDR exposure stack
Returns
-------
intensity_values : numpy.array, dtype=np.uint8
An array containing a uniformly sampled intensity value from each
exposure layer (shape = num_intensities x num_images)
"""
z_min, z_max = 0, 255
num_intensities = z_max - z_min + 1
num_images = len(images)
intensity_values = np.zeros((num_intensities, num_images), dtype=np.uint8)
# Find the middle image to use as the source for pixel intensity locations
mid_img = images[num_images // 2]
for i in range(z_min, z_max + 1):
rows, cols = np.where(mid_img == i)
if len(rows) != 0:
idx = random.randrange(len(rows))
for j in range(num_images):
intensity_values[i, j] = images[j][rows[idx], cols[idx]]
return intensity_values
def computeResponseCurve(intensity_samples, log_exposures, smoothing_lambda, weighting_function):
"""Find the camera response curve for a single color channel
Parameters
----------
intensity_samples : numpy.ndarray
Stack of single channel input values (num_samples x num_images)
log_exposures : numpy.ndarray
Log exposure times (size == num_images)
smoothing_lambda : float
A constant value used to correct for scale differences between
data and smoothing terms in the constraint matrix -- source
paper suggests a value of 100.
weighting_function : callable
Function that computes a weight from a pixel intensity
Returns
-------
numpy.ndarray, dtype=np.float64
Return a vector g(z) where the element at index i is the log exposure
of a pixel with intensity value z = i (e.g., g[0] is the log exposure
of z=0, g[1] is the log exposure of z=1, etc.)
"""
z_min, z_max = 0, 255
intensity_range = 255 # difference between min and max possible pixel value for uint8
num_samples = intensity_samples.shape[0]
num_images = len(log_exposures)
# NxP + [(Zmax-1) - (Zmin + 1)] + 1 constraints; N + 256 columns
mat_A = np.zeros((num_images * num_samples + intensity_range, num_samples + intensity_range + 1), dtype=np.float64)
mat_b = np.zeros((mat_A.shape[0], 1), dtype=np.float64)
# 1. Add data-fitting constraints:
k = 0
for i in range(num_samples):
for j in range(num_images):
z_ij = intensity_samples[i, j]
w_ij = weighting_function(z_ij)
mat_A[k, z_ij] = w_ij
mat_A[k, (intensity_range + 1) + i] = -w_ij
mat_b[k, 0] = w_ij * log_exposures[j]
k += 1
# 2. Add smoothing constraints:
for z_k in range(z_min + 1, z_max):
w_k = weighting_function(z_k)
mat_A[k, z_k - 1] = w_k * smoothing_lambda
mat_A[k, z_k ] = -2 * w_k * smoothing_lambda
mat_A[k, z_k + 1] = w_k * smoothing_lambda
k += 1
# 3. Add color curve centering constraint:
mat_A[k, (z_max - z_min) // 2] = 1
inv_A = np.linalg.pinv(mat_A)
x = np.dot(inv_A, mat_b)
g = x[0: intensity_range + 1]
return g[:, 0]
def computeRadianceMap(images, log_exposure_times, response_curve, weighting_function):
"""Calculate a radiance map for each pixel from the response curve.
Parameters
----------
images : list
Collection containing a single color layer (i.e., grayscale)
from each image in the exposure stack. (size == num_images)
log_exposure_times : numpy.ndarray
Array containing the log exposure times for each image in the
exposure stack (size == num_images)
response_curve : numpy.ndarray
Least-squares fitted log exposure of each pixel value z
weighting_function : callable
Function that computes the weights
Returns
-------
numpy.ndarray(dtype=np.float64)
The image radiance map (in log space)
"""
img_shape = images[0].shape
img_rad_map = np.zeros(img_shape, dtype=np.float64)
num_images = len(images)
for i in range(img_shape[0]):
for j in range(img_shape[1]):
g = np.array([response_curve[images[k][i, j]] for k in range(num_images)])
w = np.array([weighting_function(images[k][i, j]) for k in range(num_images)])
SumW = np.sum(w)
if SumW > 0:
img_rad_map[i, j] = np.sum(w * (g - log_exposure_times) / SumW)
else:
img_rad_map[i, j] = g[num_images // 2] - log_exposure_times[num_images // 2]
return img_rad_map
def globalToneMapping(image, gamma):
"""Global tone mapping using gamma correction
----------
images : <numpy.ndarray>
Image needed to be corrected
gamma : floating number
The number for gamma correction. Higher value for brighter result; lower for darker
Returns
-------
numpy.ndarray
The resulting image after gamma correction
"""
image_corrected = cv2.pow(image/255., 1.0/gamma)
return image_corrected
def intensityAdjustment(image, template):
"""Tune image intensity based on template
----------
images : <numpy.ndarray>
image needed to be adjusted
template : <numpy.ndarray>
Typically we use the middle image from image stack. We want to match the image
intensity for each channel to template's
Returns
-------
numpy.ndarray
The resulting image after intensity adjustment
"""
m, n, channel = image.shape
output = np.zeros((m, n, channel))
for ch in range(channel):
image_avg, template_avg = np.average(image[:, :, ch]), np.average(template[:, :, ch])
output[..., ch] = image[..., ch] * (template_avg / image_avg)
return output
def computeHDR(images, log_exposure_times, smoothing_lambda=100., gamma=1):
"""Computational pipeline to produce the HDR images
----------
images : list<numpy.ndarray>
A list containing an exposure stack of images
log_exposure_times : numpy.ndarray
The log exposure times for each image in the exposure stack
smoothing_lambda : np.int (Optional)
A constant value to correct for scale differences between
data and smoothing terms in the constraint matrix -- source
paper suggests a value of 100.
Returns
-------
numpy.ndarray
The resulting HDR with intensities scaled to fit uint8 range
"""
num_channels = images[0].shape[2]
hdr_image = np.zeros(images[0].shape, dtype=np.float64)
response_curve_list = []
img_rad_map_list = []
for channel in range(num_channels):
# Collect the current layer of each input image from the exposure stack
layer_stack = [img[:, :, channel] for img in images]
# Sample image intensities
intensity_samples = sampleIntensities(layer_stack)
# Compute Response Curve
response_curve = computeResponseCurve(intensity_samples, log_exposure_times, smoothing_lambda, linearWeight)
response_curve_list.append(response_curve)
# Build radiance map
img_rad_map = computeRadianceMap(layer_stack, log_exposure_times, response_curve, linearWeight)
img_rad_map_list.append(img_rad_map)
# Normalize hdr layer to (0, 255)
hdr_image[..., channel] = cv2.normalize(img_rad_map, None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX)
#cv2.imwrite("test_hdr.png", hdr_image)
#np.save("npy_merg.npy", hdr_image)
# Global tone mapping
image_mapped = globalToneMapping(hdr_image, gamma)
# Adjust image intensity based on the middle image from image stack
template = images[len(images)//2]
image_tuned = intensityAdjustment(image_mapped, template)
#cv2.imwrite("test_tuned.png", image_tuned)
#np.save("npy_tuned.npy", image_tuned)
# Output image
output = cv2.normalize(image_tuned, None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX)
# cv2.imwrite("test.png", output)
# np.save("npy.npy", output)
# return output.astype(np.uint8)
return output.astype(np.uint8), image_tuned.astype(np.uint8), response_curve_list, img_rad_map_list
def get_merg_from_img_array(img_list_arr, exposure_list, output_tuned):
"""
This method takes stacked image array and corresponding exposure list as an input and merge the image files and save the output results
img_list_arr = array containg stack of image arrays (merge input)
exposure_list = list of exposure time for all the images in the img_list_arr (oder of this list should be corresponding to the arrays in the img_list_arr)
output_tuned = if tuned images should also be saved then set this flag to True, otherwise False
returns None
"""
output, tuned, rc_list, irm_list = computeHDR(img_list_arr, exposure_list, smoothing_lambda=100., gamma=1) # Merging : It returns output and tuned arrays
# Following is the method which was modified from the HDRI-debevec-Python libray main.py file
cv2.imwrite(os.path.join(path, "img",str(ts)+'_output_test.png'), output) # Saving merged image with timestamped name at img folder
np.save(os.path.join(path, "npy")+"/"+str(ts)+'_output_npy.npy',output) # Saving merged npy array with timestamped name at npy folder
np.save(os.path.join(path, "npy")+"/"+str(ts)+'_response_curve.npy',np.array(rc_list)) # Saving response curve for each channel in an array
np.save(os.path.join(path, "npy")+"/"+str(ts)+'_image_rad_map.npy',np.array(irm_list)) # Saving Radiance Mapped Image as an array for each channel
#if tuned output is required
#if output_tuned:
# cv2.imwrite(os.path.join(path, "img", str(ts)+'_tuned.png'), tuned)
#np.save(os.path.join(path, "npy")+"/"+str(ts)+'_tuned.npy',tuned)
"""
## PARAMS"""
# INSTRUCTION : Create folder name img and npy at the location of 'path'
path = "/content/drive/MyDrive/Upwork/Paul_M/Task_6" # Base path where notebook will be run
data_path = os.path.join(path, "HDRI_Scaling") # Path to the array folder
folder_name = "test_series" # Name of the folder with all the images
file_format = ".bmp" # extenstion of the image files
chunk_size = 9 # No of images in one chunk
#Extracting all the paths and exposure list in a list
img_path_list = [[int(f.split("/")[-1].split("_")[0]), int(f.split("/")[-1].split("_")[1].split(".")[0]), os.path.join(data_path,folder_name,f)] for f in os.listdir(os.path.join(data_path,folder_name)) if file_format in f]
s_df = pd.DataFrame(img_path_list, columns = ["time_stamp", "exposure_time","img_path"]) # Saving files into data frame
s_df.sort_values(by = ['time_stamp','exposure_time'], inplace = True) # Sorting files by unix timestamp and exposure time
s_df.head()
# Commented out IPython magic to ensure Python compatibility.
# %%time
# excluded_ts_list = [] # This list contains all timestamps that contains images less than chunk size
# for ts in s_df.time_stamp.unique().tolist()[:1]: # IMPORTANT NOTE : use unique_sorted_ts[:some_small_int] to see this working because it takes alot of time to run it for entire list
# chunk_df = s_df[s_df["time_stamp"] == ts]
# if chunk_df.shape[0] >= chunk_size: # Validating for the timestamps that contains minimum no of frames to be merged
# c_df = chunk_df.iloc[0:chunk_size]
# img_list_arr = np.array([np.asarray(Image.open(p)) for p in c_df.img_path.tolist()]) # Reading all images into a list and converting it to numpy array
# exposure_list = c_df.exposure_time.tolist()
#
# # Merging the images
# get_merg_from_img_array(img_list_arr, # Array containg stack of image arrays (merge input)
# exposure_list,# list of exposure time for all the corresponding images
# True) # Flag to save/not save the tuned image
# else:
# excluded_ts_list.append(ts) # Saving the exluded timestamp with non sufficient amount of frames in it
# print("Time stamp contains less image frames than chunk size :"+str(ts))