diff --git a/classifier/nn.py b/classifier/nn.py index 8550ccb..1f0f51e 100644 --- a/classifier/nn.py +++ b/classifier/nn.py @@ -104,10 +104,10 @@ class BiLstmClassifier(Model): output_dict['probabilities'] = class_probabilities output_dict['positive_label'] = label output_dict['prediction'] = label - citation_text = [] - for batch_text in output_dict['tokens']: - citation_text.append([self.vocab.get_token_from_index(token_id.item()) for token_id in batch_text]) - output_dict['tokens'] = citation_text + # citation_text = [] + # for batch_text in output_dict['tokens']: + # citation_text.append([self.vocab.get_token_from_index(token_id.item()) for token_id in batch_text]) + # output_dict['tokens'] = citation_text return output_dict diff --git a/configs/basic_model.json b/configs/basic_model.json index 55fac2a..e802261 100644 --- a/configs/basic_model.json +++ b/configs/basic_model.json @@ -49,9 +49,9 @@ "trainer": { "optimizer": { "type": "adagrad", - "lr": 0.001 + "lr": 0.005 }, - "num_epochs": 20, + "num_epochs": 10, "cuda_device": 3 } } diff --git a/testing/intent_predictor.py b/testing/intent_predictor.py index d1541d3..f7574cb 100644 --- a/testing/intent_predictor.py +++ b/testing/intent_predictor.py @@ -41,8 +41,9 @@ def make_predictions(model: Model, dataset_reader: DatasetReader, file_path: str i += 1 true_list.append(citation.intent) output = predictor.predict(citation.text, citation.intent) - prediction_list.append({vocab.get_token_from_index(label_id, 'labels'): prob - for label_id, prob in enumerate(output['probs'])}) + prediction_list.append(output['prediction']) + # prediction_list.append({vocab.get_token_from_index(label_id, 'labels'): prob + # for label_id, prob in enumerate(output['probabilities'])}) if i == 10: break @@ -59,4 +60,5 @@ def load_model_and_run_predictions(saved_model_dir: str): y_pred, y_true = make_predictions(model_archive.model, citation_dataset_reader, test_file_path) - print(y_pred) + print('Predictions ', y_pred) + print('True Labels ', y_true)