Prepare for experiments with different inverse prompt weights

main
Pavan Mandava 3 years ago
parent 21fd9cac84
commit aacf5fd181

@ -17,7 +17,7 @@ PROMPT_TEMPLATES = {
"generate": "belief states: value = $value, slot ="
},
"inverse-prompt": {
"training": INVERSE_PROMPTS["i2"],
"training": INVERSE_PROMPTS["i1"],
},
"prompt-ensemble": {
"training": {

@ -51,5 +51,4 @@ mkdir -p "${OUTPUTS_DIR}"
python prompt_decode.py \
--output_dir="${OUTPUTS_DIR}" \
--tuned_model_path="${FINE_TUNED_MODEL_PATH}" \
--test_data_file="${TEST_DATA_FILE}" \
--with_prompt_ensemble
--test_data_file="${TEST_DATA_FILE}"

@ -50,11 +50,7 @@ echo "Trained Models (epochs) will be saved in ${SAVE_DIR}"
# different number of epoch for different training sets
# when using prompt ensemble for training, preferably use more number of epochs.
if [ "$data_split" = "5-dpd" ] || [ "$data_split" = "10-dpd" ]; then
epochs=5
else
epochs=8
fi
python prompt_train.py \
--save_model_dir="${SAVE_DIR}" \
@ -63,6 +59,5 @@ python prompt_train.py \
--validation_file=../data/prompt-learning/valid/valid.soloist.json \
--num_epochs $epochs \
--learning_rate 5e-5 \
--with_prompt_ensemble \
--with_inverse_prompt \
--inverse_prompt_weight 0.1
--inverse_prompt_weight 0.3
Loading…
Cancel
Save