From 7109c43e9673c9c58b8ee2f29e3edd5d2ed52891 Mon Sep 17 00:00:00 2001 From: Gaofeng Cheng <770579626@qq.com> Date: Mon, 12 Dec 2016 21:06:05 +0800 Subject: [PATCH 1/2] change dropout_parser strategy --- .../local/chain/tuning/run_tdnn_lstm_1i_dp.sh | 293 ++++++++++++++++++ egs/wsj/s5/steps/libs/nnet3/train/common.py | 16 +- egs/wsj/s5/steps/libs/nnet3/xconfig/lstm.py | 25 +- 3 files changed, 325 insertions(+), 9 deletions(-) create mode 100644 egs/ami/s5b/local/chain/tuning/run_tdnn_lstm_1i_dp.sh diff --git a/egs/ami/s5b/local/chain/tuning/run_tdnn_lstm_1i_dp.sh b/egs/ami/s5b/local/chain/tuning/run_tdnn_lstm_1i_dp.sh new file mode 100644 index 00000000000..ff083875835 --- /dev/null +++ b/egs/ami/s5b/local/chain/tuning/run_tdnn_lstm_1i_dp.sh @@ -0,0 +1,293 @@ +#!/bin/bash + +# same as 1g but with TDNN output dim 1024 instead of 512 +# (num-params 1g:21309812 1i: 43447156) +# results on sdm1 using ihm ali +#System tdnn_lstm1g_sp_bi_ihmali_ld5 tdnn_lstm1i_sp_bi_ihmali_ld5 +#WER on dev 38.3 37.6 +#WER on eval 41.6 40.9 +#Final train prob -0.138017 -0.114135 +#Final valid prob -0.238659 -0.245208 +#Final train prob (xent) -1.66834 -1.47648 +#Final valid prob (xent) -2.17419 -2.16365 + + +set -e -o pipefail + +# First the options that are passed through to run_ivector_common.sh +# (some of which are also used in this script directly). +stage=0 +mic=ihm +nj=30 +min_seg_len=1.55 +use_ihm_ali=false +train_set=train_cleaned +gmm=tri3_cleaned # the gmm for the target data +ihm_gmm=tri3 # the gmm for the IHM system (if --use-ihm-ali true). +num_threads_ubm=32 +nnet3_affix=_cleaned # cleanup affix for nnet3 and chain dirs, e.g. _cleaned +dropout_schedule= +chunk_width=150 +chunk_left_context=40 +chunk_right_context=0 +label_delay=5 +# The rest are configs specific to this script. Most of the parameters +# are just hardcoded at this level, in the commands below. +train_stage=-10 +tree_affix= # affix for tree directory, e.g. "a" or "b", in case we change the configuration. +tlstm_affix=1i #affix for TDNN-LSTM directory, e.g. "a" or "b", in case we change the configuration. +common_egs_dir= # you can set this to use previously dumped egs. + + +# decode options +extra_left_context=50 +frames_per_chunk= + +# End configuration section. +echo "$0 $@" # Print the command line for logging + +. ./cmd.sh +. ./path.sh +. ./utils/parse_options.sh + + +if ! cuda-compiled; then + cat <data/lang_chain/topo + fi +fi + +if [ $stage -le 13 ]; then + # Get the alignments as lattices (gives the chain training more freedom). + # use the same num-jobs as the alignments + steps/align_fmllr_lats.sh --nj 100 --cmd "$train_cmd" ${lores_train_data_dir} \ + data/lang $gmm_dir $lat_dir + rm $lat_dir/fsts.*.gz # save space +fi + +if [ $stage -le 14 ]; then + # Build a tree using our new topology. We know we have alignments for the + # speed-perturbed data (local/nnet3/run_ivector_common.sh made them), so use + # those. + if [ -f $tree_dir/final.mdl ]; then + echo "$0: $tree_dir/final.mdl already exists, refusing to overwrite it." + exit 1; + fi + steps/nnet3/chain/build_tree.sh --frame-subsampling-factor 3 \ + --context-opts "--context-width=2 --central-position=1" \ + --leftmost-questions-truncate -1 \ + --cmd "$train_cmd" 4200 ${lores_train_data_dir} data/lang_chain $ali_dir $tree_dir +fi + +xent_regularize=0.1 + +if [ $stage -le 15 ]; then + echo "$0: creating neural net configs using the xconfig parser"; + + num_targets=$(tree-info $tree_dir/tree |grep num-pdfs|awk '{print $2}') + learning_rate_factor=$(echo "print 0.5/$xent_regularize" | python) + + mkdir -p $dir/configs + cat < $dir/configs/network.xconfig + input dim=100 name=ivector + input dim=40 name=input + + # please note that it is important to have input layer with the name=input + # as the layer immediately preceding the fixed-affine-layer to enable + # the use of short notation for the descriptor + fixed-affine-layer name=lda input=Append(-1,0,1,ReplaceIndex(ivector, t, 0)) affine-transform-file=$dir/configs/lda.mat + + # the first splicing is moved before the lda layer, so no splicing here + relu-renorm-layer name=tdnn1 dim=1024 + relu-renorm-layer name=tdnn2 input=Append(-1,0,1) dim=1024 + relu-renorm-layer name=tdnn3 input=Append(-1,0,1) dim=1024 + + # check steps/libs/nnet3/xconfig/lstm.py for the other options and defaults + lstmp-layer name=lstm1 cell-dim=1024 recurrent-projection-dim=256 non-recurrent-projection-dim=256 delay=-3 dropout-proportion=0.0 + relu-renorm-layer name=tdnn4 input=Append(-3,0,3) dim=1024 + relu-renorm-layer name=tdnn5 input=Append(-3,0,3) dim=1024 + relu-renorm-layer name=tdnn6 input=Append(-3,0,3) dim=1024 + lstmp-layer name=lstm2 cell-dim=1024 recurrent-projection-dim=256 non-recurrent-projection-dim=256 delay=-3 dropout-proportion=0.0 + relu-renorm-layer name=tdnn7 input=Append(-3,0,3) dim=1024 + relu-renorm-layer name=tdnn8 input=Append(-3,0,3) dim=1024 + relu-renorm-layer name=tdnn9 input=Append(-3,0,3) dim=1024 + lstmp-layer name=lstm3 cell-dim=1024 recurrent-projection-dim=256 non-recurrent-projection-dim=256 delay=-3 dropout-proportion=0.0 + + ## adding the layers for chain branch + output-layer name=output input=lstm3 output-delay=$label_delay include-log-softmax=false dim=$num_targets max-change=1.5 + + # adding the layers for xent branch + # This block prints the configs for a separate output that will be + # trained with a cross-entropy objective in the 'chain' models... this + # has the effect of regularizing the hidden parts of the model. we use + # 0.5 / args.xent_regularize as the learning rate factor- the factor of + # 0.5 / args.xent_regularize is suitable as it means the xent + # final-layer learns at a rate independent of the regularization + # constant; and the 0.5 was tuned so as to make the relative progress + # similar in the xent and regular final layers. + output-layer name=output-xent input=lstm3 output-delay=$label_delay dim=$num_targets learning-rate-factor=$learning_rate_factor max-change=1.5 + +EOF + + steps/nnet3/xconfig_to_configs.py --xconfig-file $dir/configs/network.xconfig --config-dir $dir/configs/ +fi + +if [ $stage -le 16 ]; then + if [[ $(hostname -f) == *.clsp.jhu.edu ]] && [ ! -d $dir/egs/storage ]; then + utils/create_split_dir.pl \ + /export/b0{5,6,7,8}/$USER/kaldi-data/egs/ami-$(date +'%m_%d_%H_%M')/s5b/$dir/egs/storage $dir/egs/storage + fi + + steps/nnet3/chain/train.py --stage $train_stage \ + --cmd "$decode_cmd" \ + --feat.online-ivector-dir $train_ivector_dir \ + --feat.cmvn-opts "--norm-means=false --norm-vars=false" \ + --chain.xent-regularize $xent_regularize \ + --chain.leaky-hmm-coefficient 0.1 \ + --chain.l2-regularize 0.00005 \ + --chain.apply-deriv-weights false \ + --chain.lm-opts="--num-extra-lm-states=2000" \ + --egs.dir "$common_egs_dir" \ + --egs.opts "--frames-overlap-per-eg 0" \ + --egs.chunk-width $chunk_width \ + --egs.chunk-left-context $chunk_left_context \ + --egs.chunk-right-context $chunk_right_context \ + --trainer.dropout-schedule $dropout_schedule \ + --trainer.num-chunk-per-minibatch 64 \ + --trainer.frames-per-iter 1500000 \ + --trainer.num-epochs 4 \ + --trainer.optimization.shrink-value 0.99 \ + --trainer.optimization.num-jobs-initial 2 \ + --trainer.optimization.num-jobs-final 12 \ + --trainer.optimization.initial-effective-lrate 0.001 \ + --trainer.optimization.final-effective-lrate 0.0001 \ + --trainer.max-param-change 2.0 \ + --trainer.deriv-truncate-margin 8 \ + --cleanup.remove-egs true \ + --feat-dir $train_data_dir \ + --tree-dir $tree_dir \ + --lat-dir $lat_dir \ + --dir $dir +fi + + +graph_dir=$dir/graph_${LM} +if [ $stage -le 17 ]; then + # Note: it might appear that this data/lang_chain directory is mismatched, and it is as + # far as the 'topo' is concerned, but this script doesn't read the 'topo' from + # the lang directory. + utils/mkgraph.sh --left-biphone --self-loop-scale 1.0 data/lang_${LM} $dir $graph_dir +fi + +if [ $stage -le 18 ]; then + rm $dir/.error 2>/dev/null || true + + [ -z $extra_left_context ] && extra_left_context=$chunk_left_context; + [ -z $frames_per_chunk ] && frames_per_chunk=$chunk_width; + + for decode_set in dev eval; do + ( + steps/nnet3/decode.sh --acwt 1.0 --post-decode-acwt 10.0 \ + --nj $nj --cmd "$decode_cmd" \ + --extra-left-context $extra_left_context \ + --frames-per-chunk "$frames_per_chunk" \ + --online-ivector-dir exp/$mic/nnet3${nnet3_affix}/ivectors_${decode_set}_hires \ + --scoring-opts "--min-lmwt 5 " \ + $graph_dir data/$mic/${decode_set}_hires $dir/decode_${decode_set} || exit 1; + ) || touch $dir/.error & + done + wait + if [ -f $dir/.error ]; then + echo "$0: something went wrong in decoding" + exit 1 + fi +fi +exit 0 diff --git a/egs/wsj/s5/steps/libs/nnet3/train/common.py b/egs/wsj/s5/steps/libs/nnet3/train/common.py index 7009b1cba6e..6c73d417009 100644 --- a/egs/wsj/s5/steps/libs/nnet3/train/common.py +++ b/egs/wsj/s5/steps/libs/nnet3/train/common.py @@ -395,8 +395,8 @@ def _parse_dropout_string(num_archives_to_process, dropout_str): "at least the start and end dropouts") # Starting dropout proportion - dropout_values.append((0, float(parts[0]))) - + dropout_values.append((0, float(parts[0]))) + data_fraction_one_previous='' # used to control situations like: 0.2@0.75,0@0.75 for i in range(1, len(parts) - 1): value_x_pair = parts[i].split('@') if len(value_x_pair) == 1: @@ -407,9 +407,15 @@ def _parse_dropout_string(num_archives_to_process, dropout_str): else: assert len(value_x_pair) == 2 dropout_proportion, data_fraction = value_x_pair - dropout_values.append( - (float(data_fraction) * num_archives_to_process, - float(dropout_proportion))) + if data_fraction == data_fraction_one_previous : + dropout_values.append( + (float(data_fraction) * num_archives_to_process + 1.0, + float(dropout_proportion))) + else: + dropout_values.append( + (float(data_fraction) * num_archives_to_process, + float(dropout_proportion))) + _, data_fraction_one_previous = value_x_pair dropout_values.append((num_archives_to_process, float(parts[-1]))) except Exception: diff --git a/egs/wsj/s5/steps/libs/nnet3/xconfig/lstm.py b/egs/wsj/s5/steps/libs/nnet3/xconfig/lstm.py index 1ac860ffa9c..023eece93da 100644 --- a/egs/wsj/s5/steps/libs/nnet3/xconfig/lstm.py +++ b/egs/wsj/s5/steps/libs/nnet3/xconfig/lstm.py @@ -249,7 +249,8 @@ def set_default_configs(self): 'ng-affine-options' : ' max-change=0.75 ', 'self-repair-scale-nonlinearity' : 0.00001, 'zeroing-interval' : 20, - 'zeroing-threshold' : 15.0 + 'zeroing-threshold' : 15.0, + 'dropout-proportion' : -1.0 # -1.0 stands for no dropout will be added } def set_derived_configs(self): @@ -280,6 +281,11 @@ def check_configs(self): raise xparser_error("{0} has invalid value {2}.".format(self.layer_type, key, self.config[key])) + if ((self.config['dropout-proportion'] > 1.0 or + self.config['dropout-proportion'] < 0.0) and + self.config['dropout-proportion'] != -1.0 ): + raise xparser_error("dropout-proportion has invalid value {0}.".format(self.config['dropout-proportion'])) + def auxiliary_outputs(self): return ['c_t'] @@ -339,6 +345,8 @@ def generate_lstm_config(self): abs(delay))) affine_str = self.config['ng-affine-options'] pes_str = self.config['ng-per-element-scale-options'] + lstm_dropout_value = self.config['dropout-proportion'] + lstm_dropout_str = 'dropout-proportion='+str(self.config['dropout-proportion']) # Natural gradient per element scale parameters # TODO: decide if we want to keep exposing these options @@ -418,13 +426,21 @@ def generate_lstm_config(self): # add the recurrent connections configs.append("# projection matrices : Wrm and Wpm") + if lstm_dropout_value != -1.0: + configs.append("component name={0}.W_rp.m.dropout type=DropoutComponent dim={1} {2}".format(name, cell_dim, lstm_dropout_str)) configs.append("component name={0}.W_rp.m type=NaturalGradientAffineComponent input-dim={1} output-dim={2} {3}".format(name, cell_dim, rec_proj_dim + nonrec_proj_dim, affine_str)) configs.append("component name={0}.r type=BackpropTruncationComponent dim={1} {2}".format(name, rec_proj_dim, bptrunc_str)) configs.append("# r_t and p_t : rp_t will be the output") - configs.append("component-node name={0}.rp_t component={0}.W_rp.m input={0}.m_t".format(name)) - configs.append("dim-range-node name={0}.r_t_preclip input-node={0}.rp_t dim-offset=0 dim={1}".format(name, rec_proj_dim)) - configs.append("component-node name={0}.r_t component={0}.r input={0}.r_t_preclip".format(name)) + if lstm_dropout_value != -1.0: + configs.append("component-node name={0}.rp_t.dropout component={0}.W_rp.m.dropout input={0}.m_t".format(name)) + configs.append("component-node name={0}.rp_t component={0}.W_rp.m input={0}.rp_t.dropout".format(name)) + configs.append("dim-range-node name={0}.r_t_preclip input-node={0}.rp_t dim-offset=0 dim={1}".format(name, rec_proj_dim)) + configs.append("component-node name={0}.r_t component={0}.r input={0}.r_t_preclip".format(name)) + else: + configs.append("component-node name={0}.rp_t component={0}.W_rp.m input={0}.m_t".format(name)) + configs.append("dim-range-node name={0}.r_t_preclip input-node={0}.rp_t dim-offset=0 dim={1}".format(name, rec_proj_dim)) + configs.append("component-node name={0}.r_t component={0}.r input={0}.r_t_preclip".format(name)) return configs @@ -745,6 +761,7 @@ def set_default_configs(self): 'ng-affine-options' : ' max-change=1.5', 'zeroing-interval' : 20, 'zeroing-threshold' : 15.0 + } def set_derived_configs(self): From 5435f236edb09a864e8a4c9847bf63588f5f8693 Mon Sep 17 00:00:00 2001 From: Gaofeng Cheng <770579626@qq.com> Date: Wed, 14 Dec 2016 22:00:00 +0800 Subject: [PATCH 2/2] adding frame level dropout to TDNN+LSTM on AMI SDM1 #1248 --- .../local/chain/tuning/run_tdnn_lstm_1i_dp.sh | 20 ++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/egs/ami/s5b/local/chain/tuning/run_tdnn_lstm_1i_dp.sh b/egs/ami/s5b/local/chain/tuning/run_tdnn_lstm_1i_dp.sh index ff083875835..ea7c01b79ec 100644 --- a/egs/ami/s5b/local/chain/tuning/run_tdnn_lstm_1i_dp.sh +++ b/egs/ami/s5b/local/chain/tuning/run_tdnn_lstm_1i_dp.sh @@ -1,15 +1,17 @@ #!/bin/bash +# Copyright 2016 University of Chinese Academy of Sciences (Author: Cheng Gaofeng) +# Apache 2.0 -# same as 1g but with TDNN output dim 1024 instead of 512 +# same as 1i but with frame level dropout # (num-params 1g:21309812 1i: 43447156) # results on sdm1 using ihm ali -#System tdnn_lstm1g_sp_bi_ihmali_ld5 tdnn_lstm1i_sp_bi_ihmali_ld5 -#WER on dev 38.3 37.6 -#WER on eval 41.6 40.9 -#Final train prob -0.138017 -0.114135 -#Final valid prob -0.238659 -0.245208 -#Final train prob (xent) -1.66834 -1.47648 -#Final valid prob (xent) -2.17419 -2.16365 +#System tdnn_lstm1i_sp_bi_ihmali_ld5 +#WER on dev 37.6 36.7 +#WER on eval 40.9 39.9 +#Final train prob -0.114135 -0.118 +#Final valid prob -0.245208 -0.246 +#Final train prob (xent) -1.47648 -1.54 +#Final valid prob (xent) -2.16365 -2.10 set -e -o pipefail @@ -26,7 +28,7 @@ gmm=tri3_cleaned # the gmm for the target data ihm_gmm=tri3 # the gmm for the IHM system (if --use-ihm-ali true). num_threads_ubm=32 nnet3_affix=_cleaned # cleanup affix for nnet3 and chain dirs, e.g. _cleaned -dropout_schedule= +dropout_schedule='0,0@0.20,0.5@0.50,0@0.50,0' chunk_width=150 chunk_left_context=40 chunk_right_context=0