Commit d9bff132 authored by Yipeng Hu's avatar Yipeng Hu

ref #5 dice loss bug fixed

parent 12f23d37
......@@ -175,7 +175,7 @@ def loss_crossentropy(pred, target):
def loss_dice(pred, target, eps=1e-6):
dice_numerator = 2 * tf.reduce_sum(pred*target, axis=[1,2,3,4])
dice_denominator = eps + tf.reduce_sum(target, axis=[1,2,3,4]) + tf.reduce_sum(target, axis=[1,2,3,4])
dice_denominator = eps + tf.reduce_sum(pred, axis=[1,2,3,4]) + tf.reduce_sum(target, axis=[1,2,3,4])
return 1 - tf.reduce_mean(dice_numerator/dice_denominator)
def train_step(model, input, labels):
......@@ -221,36 +221,26 @@ for step in range(total_iter):
# find out data indices for a minibatch
minibatch_idx = step % num_minibatch # minibatch index
indices_mb = indices_train[minibatch_idx*size_minibatch:(minibatch_idx+1)*size_minibatch]
# halve image size so this can be reasonably tested, e.g. on a CPU
input_mb = DataFeeder.load_images_train(indices_mb)[:, ::2, ::2, ::2, :]
label_mb = DataFeeder.load_labels_train(indices_mb)[:, ::2, ::2, ::2, :]
# update the variables
input_mb = DataFeeder.load_images_train(indices_mb)
label_mb = DataFeeder.load_labels_train(indices_mb)
with tf.GradientTape() as tape:
# tape.watched(var_list): trainable variables are automatically "watched".
current_loss = loss_dice(residual_unet(input_mb), label_mb)
gradients = tape.gradient(current_loss, var_list)
loss_train = loss_dice(residual_unet(input_mb), label_mb)
gradients = tape.gradient(loss_train, var_list)
optimizer.apply_gradients(zip(gradients, var_list))
print(tf.reduce_mean(current_loss))
'''
# train_step(residual_unet, DataFeeder.load_images_train(indices_mb) , DataFeeder.load_labels_train(indices_mb) )
# print training information
if (step % 10) == 0:
loss_train = sess.run(loss, feed_dict=trainFeed)
print('Step %d: Loss=%f' % (step, loss_train))
if (step % 100) == 0:
dice_train = sess.run(dice, feed_dict=trainFeed)
print('Individual training-Dice:')
print(dice_train)
if (step % 1) == 0:
print('Step %d: training-loss=%f' % (step, loss_train))
# --- simple tests during training ---
if (step % 500) == 0:
if (step % 50) == 0:
indices_test = [random.randrange(30) for i in range(size_minibatch)] # select size_minibatch test data
testFeed = {ph_image: DataFeeder.load_images_test(indices_test)}
layer1d_test = sess.run(layer1d, feed_dict=testFeed)
input_test = DataFeeder.load_images_test(indices_test)[:, ::2, ::2, ::2, :]
pred_test = residual_unet(input_test)
# save the segmentation
for idx in range(size_minibatch):
np.save("./label_test%02d_step%06d.npy" % (indices_test[idx], step), layer1d_test[idx, ...])
np.save("./label_test%02d_step%06d.npy" % (indices_test[idx], step), pred_test[idx, ...])
print('Test results saved.')
'''
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment