Commit 1a6f7789 authored by Yipeng Hu's avatar Yipeng Hu

ref #5 config updated with gpu tested

parent 39d7413b
......@@ -100,39 +100,39 @@ def residual_unet(input):
layer = resnet_block(layer, var_list[2])
skip_layers.append(layer)
layer = downsample_maxpool(layer, var_list[3])
layer = conv3d(layer, var_list[4])
layer = conv3d(layer, var_list[4])
# encoder-s1
layer = resnet_block(layer, var_list[5])
layer = resnet_block(layer, var_list[6])
skip_layers.append(layer)
layer = downsample_maxpool(layer, var_list[7])
layer = conv3d(layer, var_list[8])
layer = conv3d(layer, var_list[8])
# encoder-s2
layer = resnet_block(layer, var_list[9])
layer = resnet_block(layer, var_list[10])
skip_layers.append(layer)
layer = downsample_maxpool(layer, var_list[11])
layer = conv3d(layer, var_list[12])
layer = conv3d(layer, var_list[12])
# deep-layers-s3
layer = resnet_block(layer, var_list[13])
layer = resnet_block(layer, var_list[14])
layer = resnet_block(layer, var_list[15])
layer = resnet_block(layer, var_list[14])
layer = resnet_block(layer, var_list[15])
# decoder-s2
layer = deconv3d(layer, var_list[16], skip_layers[2].shape) + skip_layers[2]
layer = resnet_block(layer, var_list[17])
layer = resnet_block(layer, var_list[18])
layer = resnet_block(layer, var_list[17])
layer = resnet_block(layer, var_list[18])
# decoder-s1
layer = deconv3d(layer, var_list[19], skip_layers[1].shape) + skip_layers[1]
layer = resnet_block(layer, var_list[20])
layer = resnet_block(layer, var_list[21])
layer = resnet_block(layer, var_list[20])
layer = resnet_block(layer, var_list[21])
# decoder-s0
layer = deconv3d(layer, var_list[22], skip_layers[0].shape) + skip_layers[0]
layer = resnet_block(layer, var_list[23])
layer = resnet_block(layer, var_list[24])
layer = resnet_block(layer, var_list[23])
layer = resnet_block(layer, var_list[24])
# output-layer
layer = tf.sigmoid(conv3d(layer, var_list[25], activation=False))
return layer
'''
def dense(input, weights):
......@@ -216,22 +216,23 @@ for step in range(total_iter):
# find out data indices for a minibatch
minibatch_idx = step % num_minibatch # minibatch index
indices_mb = indices_train[minibatch_idx*size_minibatch:(minibatch_idx+1)*size_minibatch]
# halve image size so this can be reasonably tested, e.g. on a CPU
# halve image size so this can be reasonably tested, e.g. on a CPU
input_mb = DataFeeder.load_images_train(indices_mb)[:, ::2, ::2, ::2, :]
label_mb = DataFeeder.load_labels_train(indices_mb)[:, ::2, ::2, ::2, :]
# update the variables
loss_train = train_step(residual_unet, var_list, optimizer, input_mb, label_mb)
# print training information
if (step % 1) == 0:
tf.print('Step', step, ': training-loss=', loss_train)
if (step % 100) == 0:
tf.print('Step', step, ': training-loss=', loss_train)
# --- simple tests during training ---
if (step % 100) == 0:
if (step % 1000) == 0:
indices_test = [random.randrange(30) for i in range(size_minibatch)] # select size_minibatch test data
input_test = DataFeeder.load_images_test(indices_test)[:, ::2, ::2, ::2, :]
pred_test = residual_unet(input_test)
pred_test = residual_unet(input_test)
# save the segmentation
for idx in range(size_minibatch):
np.save("./label_test%02d_step%06d.npy" % (indices_test[idx], step), pred_test[idx, ...])
tf.print('Test results saved.')
\ No newline at end of file
tf.print('Test results saved.')
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment