Commit 5fbbe004 authored by mathpluscode's avatar mathpluscode

test pipeline works

parent 54d33344
Pipeline #3237 failed with stages
in 59 seconds
#!/usr/bin/python
# -*- coding: utf-8 -*-
import sys
from yfmil3id2019.ui.test_command_line import test
if __name__ == "__main__":
sys.exit(test(sys.argv[1:]))
......@@ -98,7 +98,7 @@ def extract_image_mask_fnames(folders, has_mask, keep_ratio, skip):
return img_fnames
def load_image_mask(img_path, has_mask, preprocess):
def load_image_mask(img_path, has_mask, preprocess, cut_border):
"""
load image from path and perform basic preprocessing i.e. cut border
:param img_path:
......@@ -113,19 +113,20 @@ def load_image_mask(img_path, has_mask, preprocess):
x = tf.concat([image, mask], axis=-1)
else:
x = image
# cut black border
width, border_width = preprocess['shape']['orig'][1], preprocess['shape']['border']
x = x[:, border_width:(width - border_width), :]
x = resize_image(images=x, size=preprocess['shape']['input'])
if cut_border: # cut black border
width, border_width = preprocess['shape']['orig'][1], preprocess['shape']['border']
x = x[:, border_width:(width - border_width), :]
x = resize_image(images=x, size=preprocess['shape']['input'])
return x
def load_dataset(image_fnames, has_mask, preprocess, num_parallel_calls):
def load_dataset(image_fnames, has_mask, preprocess, num_parallel_calls, cut_border=True):
if image_fnames is None:
return None
ds = tf.data.Dataset.from_tensor_slices(image_fnames)
ds = ds.map(lambda x: load_image_mask(x, has_mask, preprocess), num_parallel_calls=num_parallel_calls)
ds = ds.map(lambda x: load_image_mask(x, has_mask, preprocess, cut_border), num_parallel_calls=num_parallel_calls)
return ds
......@@ -191,3 +192,20 @@ def build_dataset(folders_lbl, folders_unlbl, mean_std_path, training, config):
dataset = dataset.map(lambda x: dict(x=x, mean=mean, std=std), num_parallel_calls=num_parallel_calls)
dataset = dataset.prefetch(buffer_size=batch_size)
return dataset
def build_test_dataset(img_fnames, mean_std_path, config):
preprocess = config['data']['preprocess']
batch_size = config['model']['opt']['batch_size']
num_parallel_calls = config['tf']['num_parallel_calls']
dataset = load_dataset(img_fnames, has_mask=False, preprocess=preprocess, num_parallel_calls=num_parallel_calls, cut_border=False)
mean = decode_png(mean_std_path[0], channels=3)
std = decode_png(mean_std_path[1], channels=3)
mean = tf.expand_dims(mean[:1, :1, :], axis=0) # shape = [1,1,1,3]
std = tf.expand_dims(std[:1, :1, :], axis=0) # shape = [1,1,1,3]
dataset = dataset.batch(batch_size=batch_size, drop_remainder=False)
dataset = dataset.map(lambda x: dict(x=x, mean=mean, std=std), num_parallel_calls=num_parallel_calls)
dataset = dataset.prefetch(buffer_size=batch_size)
return dataset
import os
import numpy as np
import tensorflow as tf
import cv2
import matplotlib
matplotlib.use('agg')
from matplotlib.image import imsave, imread
from yfmil3id2019.src.model.model import model_fn
from yfmil3id2019.src.util import init_log_dir, make_dir, set_tf_logger
from yfmil3id2019.src.wrapper.util import ConfigProto
from yfmil3id2019.src.data.load import build_test_dataset
def save_predict_results(results, imgs, data_path):
dir_pred = data_path + '/preds/'
make_dir(dir_pred)
preds = list(results) # iterator to list, to get the predictions
for sample_id, pred_dict in enumerate(preds):
# get img size
orig = imread(imgs[sample_id] + '.png')
orig_size = (orig.shape[1], orig.shape[0])
# get predictions
image = pred_dict['images']
mask = pred_dict['masks'] if 'masks' in pred_dict.keys() else None
pred = pred_dict['preds']
pred_orig = cv2.resize(pred, dsize=orig_size)
# output
img_name = imgs[sample_id].split('/')[-1]
image = (image - np.min(image)) / (np.max(image) - np.min(image))
imsave(dir_pred + '/%s_image.png' % img_name, image)
if mask is not None:
imsave(dir_pred + '/%s_mask.png' % img_name, mask, vmin=0, vmax=1, cmap='gray')
imsave(dir_pred + '/%s_prob.png' % img_name, pred, vmin=0, vmax=1, cmap='gray')
imsave(dir_pred + '/%s_pred.png' % img_name, np.round(pred), vmin=0, vmax=1, cmap='gray')
imsave(dir_pred + '/%s_pred_orig.png' % img_name, np.round(pred_orig), vmin=0, vmax=1, cmap='gray')
def test_app(config, model_path, data_path, best):
"""
generate predictions for images under data_path
:param config:
:param model_path:
:param data_path:
:param best:
:return:
"""
# create log folder
app_name = 'test'
cwd = os.getcwd() + '/'
dir_log = init_log_dir(cwd + config['dir']['log'], app_name)
set_tf_logger(dir_log, app_name)
# init configs
session_config = ConfigProto(device_count={'GPU': 0})
run_config = tf.estimator.RunConfig(session_config=session_config, **config['tf']['run'])
# find checkpoint
dir_run = model_path
if best:
best_dir = dir_run + '/export/best/'
files = [f.path for f in os.scandir(best_dir)]
ckpt_to_initialize_from = '.'.join(files[0].split('.')[:-1])
else:
final_dir = dir_run + '/'
files = [f.path for f in os.scandir(final_dir)]
files = [x for x in files if 'ckpt' in x and x.endswith('.index')]
files = sorted(files, key=lambda x: int(x.split('.')[-2].split('-')[-1]), reverse=True) # sort chkpts
ckpt_to_initialize_from = '.'.join(files[0].split('.')[:-1]) # use the last one
# get mean and std
mean_std_path = [dir_run + '/mean.png', dir_run + '/std.png']
# build dataset
fnames = [f.path for f in os.scandir(data_path)]
img_fnames = [x[:-4] for x in fnames if x.endswith(".png") and not x.endswith("Mask.png")]
def test_input_fn():
return build_test_dataset(img_fnames, mean_std_path, config)
warm_start_from = tf.estimator.WarmStartSettings(ckpt_to_initialize_from=ckpt_to_initialize_from)
model = tf.estimator.Estimator(model_fn=model_fn, warm_start_from=warm_start_from, config=run_config, params=config)
results = model.predict(input_fn=test_input_fn)
save_predict_results(results=results, imgs=img_fnames, data_path=data_path)
# TODO calculate metrics if mask is available
import argparse
import logging
import os
import yaml
from yfmil3id2019.ui.test_app import test_app
def test(args=None):
logging.getLogger("tensorflow").setLevel(logging.FATAL)
# parse args
parser = argparse.ArgumentParser(description='yfmil3id2019_eval')
parser.add_argument('-p',
'--model_path',
required=True,
help='Path of model/log')
parser.add_argument('-d',
'--data_path',
required=True,
help='Path of images')
parser.add_argument('-g',
'--gpu',
required=True,
help='GPU ID')
parser.add_argument('--best',
dest='best',
action='store_true',
help='use the best model instead of the final one, otherwise use the last checkpoint')
parser.add_argument('-b',
'--bs',
default=64,
help='batch size')
parser.set_defaults(best=False)
args = parser.parse_args(args)
# load config
model_path = args.model_path
if model_path[-1] == '/':
model_path = model_path[:-1]
data_path = args.data_path
if data_path[-1] == '/':
data_path = data_path[:-1]
config_path = model_path + '/config_backup.yaml'
with open(config_path) as file:
config = yaml.load(file)
# modify config
config['model']['opt']['batch_size'] = args.bs
config['tf']['gpu'] = args.gpu
os.environ['CUDA_VISIBLE_DEVICES'] = config['tf']['gpu']
os.environ['TF_FORCE_GPU_ALLOW_GROWTH'] = 'true'
test_app(config=config, model_path=model_path, data_path=data_path, best=args.best)
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment