diff --git a/classifier/linear_model.py b/classifier/linear_model.py index 5c86f92..93cd669 100644 --- a/classifier/linear_model.py +++ b/classifier/linear_model.py @@ -212,8 +212,8 @@ def get_sample_weights_with_features(theta_bias: float = None, random_state: int :return: returns a dictionary of random weights for each feature """ weights = {THETA_BIAS_FEATURE: theta_bias} - random.seed(random_state) for feature in FEATURE_LIST: + random.seed(random_state) weights[feature] = round(random.uniform(-1.0, 1.0), 5) return weights diff --git a/eval/metrics.py b/eval/metrics.py index 9719648..f844a3d 100644 --- a/eval/metrics.py +++ b/eval/metrics.py @@ -177,4 +177,9 @@ class Result: def print_result(self): """ Prints F1 Score""" - print('F1 Score :: ', self.f1_score, ' Label :: ', self.label, ' Average :: ', self.average) + print_line = 'F1 Score :: ' + str(self.f1_score) + if self.label: + print_line += ' Label :: ' + self.label + if self.average: + print_line += ' Average :: ' + self.average + print(print_line) diff --git a/presentation/presentation.pdf b/presentation/presentation.pdf index 01b66fb..b42965c 100644 Binary files a/presentation/presentation.pdf and b/presentation/presentation.pdf differ diff --git a/presentation/presentation.tex b/presentation/presentation.tex index fed11b5..380c43d 100644 --- a/presentation/presentation.tex +++ b/presentation/presentation.tex @@ -98,7 +98,6 @@ Base Classifier: {\bf {\color{red} Perceptron}} \item Linear Classifier \item Binary Classifier \end{itemize} -\bigskip \begin{minted}[autogobble, breaklines,breakanywhere, fontfamily=helvetica, fontsize=\small]{python} class Perceptron: @@ -112,6 +111,8 @@ class MultiClassPerceptron: def predict(self, X_test: list) \end{minted} +\bigskip +- {\bf Parameters} and {\bf Hyperparameters} \end{frame} @@ -134,6 +135,7 @@ Lexicons and Regular Expressions ($\approx$ 30 Features) ..... } \end{minted} + \bigskip \item REGEX \begin{itemize} \item $ACRONYM$ @@ -185,13 +187,13 @@ Lexicons and Regular Expressions ($\approx$ 30 Features) \begin{tabular}{| l | c | c |} \hline {\bf Averaging} & {\bf Score} \\ \hline \hline - MACRO & 0.59 \\ + MICRO & 0.64 \\ \hline - MICRO & 0.57 \\ + MACRO & 0.57 \\ \hline - background & 0.63 \\ - method & 0.48 \\ - result & 0.55 \\ + background & 0.72 \\ + method & 0.54 \\ + result & 0.46 \\ \hline \end{tabular}} diff --git a/testing/model_testing.py b/testing/model_testing.py index aa10bee..18bbc41 100644 --- a/testing/model_testing.py +++ b/testing/model_testing.py @@ -18,10 +18,10 @@ labels = set([inst.true_label for inst in X_train_inst]) X_test_inst = read_csv_file(test_file_path, '\t') # number of training iterations -epochs = int(len(X_train_inst)*0.9) +epochs = int(len(X_train_inst)*1.5) # create MultiClassPerceptron classifier object -clf = MultiClassPerceptron(epochs=epochs, learning_rate=0.9, random_state=42) +clf = MultiClassPerceptron(epochs=epochs, learning_rate=0.75, random_state=101) # train the model clf.fit(X_train=X_train_inst, labels=list(labels)) @@ -34,9 +34,9 @@ y_true = [inst.true_label for inst in X_test_inst] # Model Evaluation f1_score_micro = f1_score(y_true, y_test, labels, const.AVG_MICRO) -# f1_score_macro = f1_score(y_true, y_test, labels, const.AVG_MACRO) -# f1_score_none = f1_score(y_true, y_test, labels, None) +f1_score_macro = f1_score(y_true, y_test, labels, const.AVG_MACRO) +f1_score_none = f1_score(y_true, y_test, labels, None) # Print F1 Score -for result in f1_score_micro: +for result in f1_score_micro + f1_score_macro + f1_score_none: result.print_result()