diff --git a/lit_nlp/client/modules/lm_salience_module.css b/lit_nlp/client/modules/lm_salience_module.css index 22192870..49a0c84b 100644 --- a/lit_nlp/client/modules/lm_salience_module.css +++ b/lit_nlp/client/modules/lm_salience_module.css @@ -7,10 +7,6 @@ padding: 8px; } -.chip-container-dense { - padding: 8px; -} - .pre-wrap { white-space: pre-wrap; } @@ -23,6 +19,7 @@ white-space: nowrap; text-overflow: ellipsis; overflow-x: hidden; + line-height: 22px; } lit-switch .icon-button { @@ -63,12 +60,10 @@ lit-switch .icon-button { margin-right: 8px; } -.controls-group-variable > label { - min-width: 45px; -} - .controls-group-variable .dropdown { - max-width: calc(100% - 45px); + max-width: calc(100% - 22px); + margin-right: 4px; + text-overflow: ellipsis; } .vertical-separator { @@ -95,4 +90,26 @@ color-legend { /* extra space to keep other controls from jumping when legend changes */ /* width: 400px; */ margin-right: 16px; +} + + +/* Pending request indicator */ +.loading-indicator-container { + position: relative; + width: 100%; + top: -2px; +} + +@keyframes running-progress { + 0% { margin-left: 0; margin-right: 100%; } + 50% { margin-left: 35%; margin-right: 0%; } + 100% { margin-left: 100%; margin-right: 0%; } +} + +.loading-indicator { + position: absolute; + background-color: var(--lit-neutral-500); + width: 100%; + height: 2px; + animation: running-progress 2s cubic-bezier(0.4, 0, 0.2, 1) infinite; } \ No newline at end of file diff --git a/lit_nlp/client/modules/lm_salience_module.ts b/lit_nlp/client/modules/lm_salience_module.ts index 1d064a70..14d69b9f 100644 --- a/lit_nlp/client/modules/lm_salience_module.ts +++ b/lit_nlp/client/modules/lm_salience_module.ts @@ -10,6 +10,7 @@ import '../elements/fused_button_bar'; import {css, html} from 'lit'; // tslint:disable:no-new-decorators import {customElement} from 'lit/decorators.js'; +import {classMap} from 'lit/directives/class-map.js'; import {computed, observable} from 'mobx'; import {LitModule} from '../core/lit_module'; @@ -556,17 +557,21 @@ export class LMSalienceModule extends SingleExampleSingleModelModule { `; } + /* Disabled for space reasons. */ + // renderSelfScoreSelector() { + // const onClickToggleSelfSalience = () => { + // this.showSelfSalience = !this.showSelfSalience; + // }; + // // prettier-ignore + // return html` + // + // + // `; + // } renderSelfScoreSelector() { - const onClickToggleSelfSalience = () => { - this.showSelfSalience = !this.showSelfSalience; - }; - // prettier-ignore - return html` - - - `; + return null; } renderMethodSelector() { @@ -632,14 +637,29 @@ export class LMSalienceModule extends SingleExampleSingleModelModule { `; }); + const targetSelectorHelp = + 'Select a (response) from the model or a pre-defined (target) sequence from the dataset.'; + // prettier-ignore return html`
- + + + help_outline + + +
`; + } + + renderLoadingIndicator() { + // prettier-ignore + return html` +
+
`; } @@ -658,12 +678,22 @@ export class LMSalienceModule extends SingleExampleSingleModelModule { return `Explaining ${this.printTargetForHuman(start, end)}`; }; + const requestPending = this.targetTokenSpan !== undefined && + this.salienceResultCache[this.spanToKey(this.targetTokenSpan)] === + REQUEST_PENDING; + // const requestPending = true; + const infoLineClasses = classMap({ + 'target-info-line': true, + 'gray-text': requestPending, + }); + // prettier-ignore return html`
-
+
${printSelectedTargets()} + ${requestPending ? this.renderLoadingIndicator() : null}
`; @@ -741,12 +771,9 @@ export class LMSalienceModule extends SingleExampleSingleModelModule { }); } - // TODO: revert to 4px for non-dense view if we can figure out the - // display mode for token chips? Needs more padding for block mode, - // but also indentation and newlines are wonky. // prettier-ignore return html` -
+
@@ -793,7 +820,7 @@ export class LMSalienceModule extends SingleExampleSingleModelModule { - restart_alt diff --git a/lit_nlp/examples/models/instrumented_keras_lms.py b/lit_nlp/examples/models/instrumented_keras_lms.py index 96a0af72..453487bd 100644 --- a/lit_nlp/examples/models/instrumented_keras_lms.py +++ b/lit_nlp/examples/models/instrumented_keras_lms.py @@ -307,7 +307,7 @@ def layer_intercept_fn(x, i): FieldNames.GRAD_NORM: grad_l2, FieldNames.GRAD_DOT_INPUT: grad_dot_input, # Shift token loss to align with (input) tokens. - FieldNames.TOKEN_LOSS: tf.roll(per_token_loss, shift=1, axis=1), + # FieldNames.TOKEN_LOSS: tf.roll(per_token_loss, shift=1, axis=1), } return batched_outputs @@ -322,7 +322,7 @@ def _postprocess(self, preds): ): preds[key] = preds[key][mask] # First token () is not actually predicted, so return 0 for loss. - preds[FieldNames.TOKEN_LOSS][0] = 0 + # preds[FieldNames.TOKEN_LOSS][0] = 0 return preds @@ -353,11 +353,11 @@ def input_spec(self): def output_spec(self) -> lit_types.Spec: return { FieldNames.TOKENS: lit_types.Tokens(parent=""), # All tokens. + FieldNames.GRAD_NORM: lit_types.TokenScores(align=FieldNames.TOKENS), FieldNames.GRAD_DOT_INPUT: lit_types.TokenScores( align=FieldNames.TOKENS ), - FieldNames.GRAD_NORM: lit_types.TokenScores(align=FieldNames.TOKENS), - FieldNames.TOKEN_LOSS: lit_types.TokenScores(align=FieldNames.TOKENS), + # FieldNames.TOKEN_LOSS: lit_types.TokenScores(align=FieldNames.TOKENS), } diff --git a/lit_nlp/examples/models/pretrained_lms.py b/lit_nlp/examples/models/pretrained_lms.py index 9fdae45d..cbb9e89c 100644 --- a/lit_nlp/examples/models/pretrained_lms.py +++ b/lit_nlp/examples/models/pretrained_lms.py @@ -594,7 +594,7 @@ def _pred(self, encoded_inputs, target_masks): "grad_l2": grad_l2, "grad_dot_input": grad_dot_input, # Shift token loss to align with (input) tokens. - "token_loss": tf.roll(per_token_loss, shift=1, axis=1), + # "token_loss": tf.roll(per_token_loss, shift=1, axis=1), } return batched_outputs @@ -609,7 +609,7 @@ def _postprocess(self, preds): for key in utils.find_spec_keys(self.output_spec(), lit_types.TokenScores): preds[key] = preds[key][mask] # First token (usually ) is not actually predicted, so return 0 for loss. - preds["token_loss"][0] = 0 + # preds["token_loss"][0] = 0 return preds @@ -645,7 +645,7 @@ def output_spec(self) -> lit_types.Spec: "tokens": lit_types.Tokens(parent=""), # all tokens "grad_l2": lit_types.TokenScores(align="tokens"), "grad_dot_input": lit_types.TokenScores(align="tokens"), - "token_loss": lit_types.TokenScores(align="tokens"), + # "token_loss": lit_types.TokenScores(align="tokens"), }