@@ -348,7 +348,7 @@ def forward(self, x) -> Dict[str, torch.Tensor]:
348
348
input dimensions: (n_samples, n_time_steps, n_variables)
349
349
"""
350
350
351
- dim_samples , dim_time , dim_variable , dim_loss = 0 , 1 , 2 , 3
351
+ dim_samples , dim_time , dim_variable = 0 , 1 , 2
352
352
past_target , past_covariates , historic_future_covariates , future_covariates = x
353
353
354
354
batch_size = past_target .shape [dim_samples ]
@@ -450,12 +450,13 @@ def forward(self, x) -> Dict[str, torch.Tensor]:
450
450
device = past_target .device ,
451
451
)
452
452
453
- # this is only to interpret the output
454
- static_covariate_var = torch .zeros (
455
- (past_target .shape [0 ], 0 ),
456
- dtype = past_target .dtype ,
457
- device = past_target .device ,
458
- )
453
+ # # TODO: implement below when static covariates are supported
454
+ # # this is only to interpret the output
455
+ # static_covariate_var = torch.zeros(
456
+ # (past_target.shape[0], 0),
457
+ # dtype=past_target.dtype,
458
+ # device=past_target.device,
459
+ # )
459
460
460
461
if future_covariates is None and static_covariates is None :
461
462
raise NotImplementedError ("make zero tensor if future covariates is None" )
0 commit comments