import argparse import os from metrics import PromptDSTEvaluator def main(): parser = argparse.ArgumentParser() # Required parameters parser.add_argument('-o', '--output_file', default=None, type=str, required=True, help="The path of the outputs JSON file .") # parse the arguments args = parser.parse_args() if args.output_file is None: print('No output file provided for evaluation!') return # Assertion check for file availability assert os.path.isfile(args.output_file) # create an evaluator instance for Prompt-based DST evaluator = PromptDSTEvaluator(args.output_file) # compute Joint Goal Accuracy evaluator.compute_joint_goal_accuracy() # compute JGA for the values that are correctly extracted in a turn evaluator.compute_jga_for_correct_values() if __name__ == "__main__": main()