#!/usr/bin/env python # # file: $ISIP_EXP/tuh_dpath/exp_0086/scripts/decode.py # # revision history: # 20191206 (TE): first version # # usage: # python decode.py odir mdl_dir data # # arguments: # odir: the directory where the hypotheses will be stored # mdl_dir: input model directory # data: the input data list to be decoded # # This script decodes data using a simple MLP model. #------------------------------------------------------------------------------ # import system modules # import sys import os import numpy as np # import the model functions # from model import * # import tensorflow module # import tensorflow as tf from tensorflow.keras.models import model_from_json #----------------------------------------------------------------------------- # # global variables are listed here # #----------------------------------------------------------------------------- # general global values # SPACE = " " MDL_NAME="model.json" WGT_NAME="model.h5" HYP_EXT = ".hyp" NUM_ARGS = 3 # for reproducibility, we seed the RNGs # set_seed(SEED) # ensure single threaded application to prevent # non-deterministic operations # tf.config.threading.set_inter_op_parallelism_threads(1) tf.config.threading.set_intra_op_parallelism_threads(1) #------------------------------------------------------------------------------ # # the main program starts here # #------------------------------------------------------------------------------ # function: main # # arguments: none # # return: none # # This method is the main function. # def main(argv): # ensure we have the correct number of arguments # if(len(argv) != NUM_ARGS): print("usage: python decode.py [ODIR] [MDL_PATH] [EVAL_SET]") exit(-1) # define local variables # odir = argv[0] mdl_path = argv[1] fname = argv[2] num_feats = DEF_NUM_FEATS if("DL_NUM_FEATS" in os.environ): num_feats = int(os.environ["DL_NUM_FEATS"]) # if the odir doesn't exist, we make it # if not os.path.exists(odir): os.makedirs(odir) # get the hyp file name # hyp_name = os.path.splitext(os.path.basename(fname))[0] + HYP_EXT # get a file pointer # try: eval_fp = open(fname, "r") except (IOError, KeyError) as e: print("[%s]: %s" % (fname, e)) exit(-1) # get array of the data # data: [[0, 1, ... 26], [27, 28, ...] ...] # labels: [0, 0, 1, ...] # eval_data, _ = get_data(eval_fp, num_feats) # close the file # eval_fp.close() # open the model json file to read # try: json_file = open(os.path.join(mdl_path, MDL_NAME), "r") except IOError as e: print("[%s]: %s" % (os.path.join(mdl_path, MDL_NAME), e)) exit(-1) # read the content of the json file # loaded_json_model = json_file.read() # close the file # json_file.close() # load the model from the json file # loaded_model = model_from_json(loaded_json_model) # load the weights from the weights file # loaded_model.load_weights(os.path.join(mdl_path, WGT_NAME)) # the output file # try: ofile = open(os.path.join(odir, hyp_name), 'w+') except IOError as e: print("[%s]: %s" % (hyp_name, e.strerror)) exit(-1) # get the number of data points # num_points = len(eval_data) # for each data point # for index, data_point in enumerate(eval_data): # print informational message # print("decoding %4d out of %d" % (index+1, num_points)) # pass the input through the model # output = list(map(float, loaded_model.predict_on_batch(np.array([data_point]))[0])) # write the highest probability to the file # ofile.write(str(int(output.index(max(output)))) + SPACE + SPACE.join([str(point) for point in data_point]) + NEW_LINE) # close the file # ofile.close() # exit gracefully # return True # # end of function # begin gracefully # if __name__ == '__main__': main(sys.argv[1:]) # # end of file