## import system modules # import os import sys ## import ML modules # import tensorflow as tf import numpy as np import pickle from keras.utils import to_categorical from sklearn import preprocessing ## Default constants # NO_OF_CLASSES = 2 BATCH_SIZE = 32 FEAT_DIM = 26 N_nodes_hl1 = 300 N_nodes_hl2 = 30 N_nodes_hl3 = 30 ## This method does decoding the test data using pretrained model. ## input arguments are feature and labels in numpy array format, the absolute location of the model directory with model name and the output hypfile. # def dcd_neural_network(test_dat, test_lab, mdl_abs_dir, hyp_file): ## collect information related to trained model # mdl_name = os.path.basename(mdl_abs_dir) mdl_dir = os.path.dirname(mdl_abs_dir) ## initialize the session # with tf.Session() as sess: saver = tf.train.import_meta_graph(mdl_abs_dir) ## load the network saver.restore(sess, tf.train.latest_checkpoint(mdl_dir)) ## collect weights from last checkpoint prediction = sess.graph.get_tensor_by_name("op_to_restore:0") ## get an op which will give us output probabilities ## initialize the graph with default graph = tf.get_default_graph() ## collect input/output placeholders # x = graph.get_tensor_by_name("input:0") y_ = graph.get_tensor_by_name("output:0") cls_pred = np.zeros(shape=len(test_dat), dtype=np.int) ## output array initialization feed_dict = {x: test_dat} ## test data ## run prediction # cls_pred = sess.run(prediction, feed_dict = feed_dict) ## end of session # ## calculate the accuracy of the detected results # with tf.name_scope("prediction_acc"): correct_pred = tf.equal(tf.argmax(test_lab, 1), tf.argmax(cls_pred,1)) with tf.name_scope("accuracy"): accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) ## print the accuracy # with tf.Session() as sess: print (sess.run(accuracy)) ## boolean results. This is not necessary but used to generate the output files # with tf.Session() as sess: bool_pred = sess.run(correct_pred) ## write the hyp file # write_ofile(hyp_file, bool_pred) ## end of method # ## write predicted labels in output file # def write_ofile(ofile, pred): with open(ofile, 'wb') as fout: for ele in pred: if ele: fout.write("1\n") else: fout.write("0\n") ## collecing labels and features from the input files # def extract_data(fp_a): dat = readflines(fp_a) ## initialize the feature and label list # feats = [] labs = [] ## collect all the features and labels # for line in dat: l_fields = line.split() ## convert strings to a int/float32 datatype # feats_m = map(float,l_fields[1:]) labs_m = map(int,l_fields[0]) feats.append(feats_m) labs.append(labs_m) feats = np.asarray(feats) labs = np.asarray(labs) ## return feat and labels as tuples # return (feats, labs) ## This method reads lines of a filelist and returns them as a list # def readflines(list_a): with open(list_a, 'r') as fl: return fl.read().splitlines() ## create a directory tree if the path doesn't exist # def create_dirtree(dirtree_a): if not os.path.exists(dirtree_a): os.makedirs(dirtree_a) def main(): labfeatslist = "../exam_dat/dev.txt" mdl_abs_dir = "../tf_output/mlp_mdl.meta" normalize_f = True hyp_file = "../tf_hyp.txt" ## collect decoding data # feats, labs = extract_data(labfeatslist) ## normalize features # if normalize_f: max_abs_scalar = preprocessing.MaxAbsScaler() max_abs_scalar.fit(feats) feats = max_abs_scalar.transform(feats) ## do one hot coding for labels. This necessary when using softmax # labs_cat = to_categorical(labs, num_classes = NO_OF_CLASSES) ## decode data using trained network # dcd_neural_network(feats, labs, mdl_abs_dir, hyp_file) ## end of main # if __name__ == "__main__": main()