@@ -120,11 +120,9 @@ def modcrop(image, scale=3):
120
120
121
121
def train_input_worker (args ):
122
122
image_data , config = args
123
- image_size , label_size , stride , scale , distort = config
123
+ image_size , label_size , stride , scale , padding , distort = config
124
124
125
125
single_input_sequence , single_label_sequence = [], []
126
- padding = abs (image_size - label_size ) // 2 # eg. for 3x: (21 - 11) / 2 = 5
127
- label_padding = abs ((image_size - 4 ) - label_size ) // 2 # eg. for 3x: (21 - (11 - 4)) / 2 = 7
128
126
129
127
input_ , label_ = preprocess (image_data , scale , distort = distort )
130
128
@@ -133,10 +131,10 @@ def train_input_worker(args):
133
131
else :
134
132
h , w = input_ .shape
135
133
136
- for x in range (0 , h - image_size - padding + 1 , stride ):
137
- for y in range (0 , w - image_size - padding + 1 , stride ):
138
- sub_input = input_ [x + padding : x + padding + image_size , y + padding : y + padding + image_size ]
139
- x_loc , y_loc = x + label_padding , y + label_padding
134
+ for x in range (0 , h - image_size + 1 , stride ):
135
+ for y in range (0 , w - image_size + 1 , stride ):
136
+ sub_input = input_ [x : x + image_size , y : y + image_size ]
137
+ x_loc , y_loc = x + padding , y + padding
140
138
sub_label = label_ [x_loc * scale : x_loc * scale + label_size , y_loc * scale : y_loc * scale + label_size ]
141
139
142
140
sub_input = sub_input .reshape ([image_size , image_size , 1 ])
@@ -165,7 +163,7 @@ def thread_train_setup(config):
165
163
pool = Pool (config .threads )
166
164
167
165
# Distribute |images_per_thread| images across each worker process
168
- config_values = [config .image_size , config .label_size , config .stride , config .scale , config .distort ]
166
+ config_values = [config .image_size , config .label_size , config .stride , config .scale , config .padding // 2 , config . distort ]
169
167
images_per_thread = len (data ) // config .threads
170
168
workers = []
171
169
for thread in range (config .threads ):
@@ -202,14 +200,12 @@ def train_input_setup(config):
202
200
Read image files, make their sub-images, and save them as a h5 file format.
203
201
"""
204
202
sess = config .sess
205
- image_size , label_size , stride , scale = config .image_size , config .label_size , config .stride , config .scale
203
+ image_size , label_size , stride , scale , padding = config .image_size , config .label_size , config .stride , config .scale , config . padding // 2
206
204
207
205
# Load data path
208
206
data = prepare_data (sess , dataset = config .data_dir )
209
207
210
208
sub_input_sequence , sub_label_sequence = [], []
211
- padding = abs (image_size - label_size ) // 2 # eg. for 3x: (21 - 11) / 2 = 5
212
- label_padding = abs ((image_size - 4 ) - label_size ) // 2 # eg. for 3x: (21 - (11 - 4)) / 2 = 7
213
209
214
210
for i in range (len (data )):
215
211
input_ , label_ = preprocess (data [i ], scale , distort = config .distort )
@@ -219,10 +215,10 @@ def train_input_setup(config):
219
215
else :
220
216
h , w = input_ .shape
221
217
222
- for x in range (0 , h - image_size - padding + 1 , stride ):
223
- for y in range (0 , w - image_size - padding + 1 , stride ):
224
- sub_input = input_ [x + padding : x + padding + image_size , y + padding : y + padding + image_size ]
225
- x_loc , y_loc = x + label_padding , y + label_padding
218
+ for x in range (0 , h - image_size + 1 , stride ):
219
+ for y in range (0 , w - image_size + 1 , stride ):
220
+ sub_input = input_ [x : x + image_size , y : y + image_size ]
221
+ x_loc , y_loc = x + padding , y + padding
226
222
sub_label = label_ [x_loc * scale : x_loc * scale + label_size , y_loc * scale : y_loc * scale + label_size ]
227
223
228
224
sub_input = sub_input .reshape ([image_size , image_size , 1 ])
@@ -242,14 +238,12 @@ def test_input_setup(config):
242
238
Read image files, make their sub-images, and save them as a h5 file format.
243
239
"""
244
240
sess = config .sess
245
- image_size , label_size , stride , scale = config .image_size , config .label_size , config .stride , config .scale
241
+ image_size , label_size , stride , scale , padding = config .image_size , config .label_size , config .stride , config .scale , config . padding // 2
246
242
247
243
# Load data path
248
244
data = prepare_data (sess , dataset = "Test" )
249
245
250
246
sub_input_sequence , sub_label_sequence = [], []
251
- padding = abs (image_size - label_size ) // 2 # eg. (21 - 11) / 2 = 5
252
- label_padding = abs ((image_size - 4 ) - label_size ) // 2 # eg. for 3x: (21 - (11 - 4)) / 2 = 7
253
247
254
248
pic_index = 2 # Index of image based on lexicographic order in data folder
255
249
input_ , label_ = preprocess (data [pic_index ], config .scale )
@@ -260,13 +254,13 @@ def test_input_setup(config):
260
254
h , w = input_ .shape
261
255
262
256
nx , ny = 0 , 0
263
- for x in range (0 , h - image_size - padding + 1 , stride ):
257
+ for x in range (0 , h - image_size + 1 , stride ):
264
258
nx += 1
265
259
ny = 0
266
- for y in range (0 , w - image_size - padding + 1 , stride ):
260
+ for y in range (0 , w - image_size + 1 , stride ):
267
261
ny += 1
268
- sub_input = input_ [x + padding : x + padding + image_size , y + padding : y + padding + image_size ]
269
- x_loc , y_loc = x + label_padding , y + label_padding
262
+ sub_input = input_ [x : x + image_size , y : y + image_size ]
263
+ x_loc , y_loc = x + padding , y + padding
270
264
sub_label = label_ [x_loc * scale : x_loc * scale + label_size , y_loc * scale : y_loc * scale + label_size ]
271
265
272
266
sub_input = sub_input .reshape ([image_size , image_size , 1 ])
0 commit comments