diff --git a/.github/workflows/test_examples.yml b/.github/workflows/test_examples.yml index 659f0549..d6fdf9fd 100644 --- a/.github/workflows/test_examples.yml +++ b/.github/workflows/test_examples.yml @@ -38,7 +38,6 @@ jobs: run: | cd examples python tab_transformer.py --epochs 1 - python tabnet.py --epochs 1 python ft_transformer.py --epochs 1 python tab_transformer.py --epochs 1 python rect.py @@ -49,4 +48,3 @@ jobs: python bridge/bridge_tacm12k.py --epochs 1 python bridge/bridge_tlf2k.py --epochs 1 python bridge/bridge_tml1m.py --epochs 1 - diff --git a/act.exe b/act.exe new file mode 100644 index 00000000..ccca6816 Binary files /dev/null and b/act.exe differ diff --git a/examples/bridge/bridge_tacm12k.py b/examples/bridge/bridge_tacm12k.py index 7b914c3d..18f4f392 100644 --- a/examples/bridge/bridge_tacm12k.py +++ b/examples/bridge/bridge_tacm12k.py @@ -114,19 +114,12 @@ def test_epoch(): t_encoder = TableEncoder( hidden_dim=graph.x.size(1), stats_dict=paper_table.stats_dict, - table_transorm=FTTransformerTransform, - table_conv=TabTransformerConv, - conv_params={ - "attn_dropout": 0.3, - "ff_dropout": 0.3, - }, ) g_encoder = GraphEncoder( in_dim=graph.x.size(1), hidden_dim=128, out_dim=output_dim, dropout=args.gcn_dropout, - graph_conv=GCNConv, ) model = Bridge( table_encoder=t_encoder, diff --git a/examples/bridge/bridge_tlf2k.py b/examples/bridge/bridge_tlf2k.py index 969b558d..51fd7dd7 100644 --- a/examples/bridge/bridge_tlf2k.py +++ b/examples/bridge/bridge_tlf2k.py @@ -120,19 +120,12 @@ def test_epoch(): t_encoder = TableEncoder( hidden_dim=graph.x.size(1), stats_dict=artist_table.stats_dict, - table_transorm=FTTransformerTransform, - table_conv=TabTransformerConv, - conv_params={ - "attn_dropout": 0.3, - "ff_dropout": 0.3, - }, ) g_encoder = GraphEncoder( in_dim=graph.x.size(1), hidden_dim=128, out_dim=output_dim, dropout=args.gcn_dropout, - graph_conv=GCNConv, ) model = Bridge( table_encoder=t_encoder, diff --git a/examples/bridge/bridge_tml1m.py b/examples/bridge/bridge_tml1m.py index ee238b06..92d76eea 100644 --- a/examples/bridge/bridge_tml1m.py +++ b/examples/bridge/bridge_tml1m.py @@ -124,19 +124,12 @@ def test_epoch(): t_encoder = TableEncoder( hidden_dim=graph.x.size(1), stats_dict=user_table.stats_dict, - table_transorm=FTTransformerTransform, - table_conv=TabTransformerConv, - conv_params={ - "attn_dropout": 0.3, - "ff_dropout": 0.3, - }, ) g_encoder = GraphEncoder( in_dim=graph.x.size(1), hidden_dim=128, out_dim=output_dim, dropout=args.gcn_dropout, - graph_conv=GCNConv, ) model = Bridge( table_encoder=t_encoder, diff --git a/examples/bridge/utils.py b/examples/bridge/utils.py index 2b0e248a..2fa2baae 100644 --- a/examples/bridge/utils.py +++ b/examples/bridge/utils.py @@ -8,7 +8,6 @@ from rllm.nn.conv.graph_conv.gcn_conv import GCNConv from rllm.transforms.table_transforms import FTTransformerTransform from rllm.nn.conv.table_conv import TabTransformerConv -from rllm.types import ColType def get_homo_data( @@ -119,43 +118,17 @@ class GraphEncoder(torch.nn.Module): **kwargs: Parameters required for different convolution layers. """ - def __init__( - self, - in_dim, - hidden_dim, - out_dim, - dropout, - graph_conv: GCNConv, - num_layers: int = 2, - activate: str = "relu", - **kwargs, - ) -> None: + def __init__(self, in_dim, hidden_dim, out_dim, dropout): super().__init__() self.dropout = dropout - self.activate = activate - self.num_layers = num_layers - self.convs = torch.nn.ModuleList() - conv_args = kwargs["conv_params"] if "conv_params" in kwargs.keys() else None - if num_layers >= 2: - # First layer - self.convs.append(graph_conv(in_dim, hidden_dim, conv_args)) - - # Intermediate layer - for _ in range(num_layers - 2): - self.convs.append(graph_conv(hidden_dim, hidden_dim, conv_args)) - - # Last layer - self.convs.append(graph_conv(hidden_dim, out_dim, conv_args)) - else: - # Only layer - self.convs.append(graph_conv(in_dim, out_dim, conv_args)) + self.conv1 = GCNConv(in_dim, hidden_dim) + self.conv2 = GCNConv(hidden_dim, out_dim) def forward(self, x, adj): - for layer in range(self.num_layers - 1): - x = F.dropout(x, p=self.dropout, training=self.training) - x = F.relu(self.convs[layer](x, adj)) x = F.dropout(x, p=self.dropout, training=self.training) - x = self.convs[-1](x, adj) + x = F.relu(self.conv1(x, adj)) + x = F.dropout(x, p=self.dropout, training=self.training) + x = self.conv2(x, adj) return x @@ -176,29 +149,20 @@ class TableEncoder(torch.nn.Module): def __init__( self, hidden_dim, - stats_dict: Dict[ColType, List[Dict[str, Any]]], - table_transorm: FTTransformerTransform, - table_conv: TabTransformerConv, - num_layers: int = 1, - **kwargs, + stats_dict, ) -> None: super().__init__() - self.num_layers = num_layers - - self.table_transform = table_transorm( + self.table_transform = FTTransformerTransform( out_dim=hidden_dim, col_stats_dict=stats_dict, ) - - conv_args = kwargs["conv_params"] if "conv_params" in kwargs.keys() else None - self.convs = torch.nn.ModuleList() - for _ in range(self.num_layers): - self.convs.append(table_conv(dim=hidden_dim, **conv_args)) + self.conv = TabTransformerConv( + dim=hidden_dim, + ) def forward(self, table): feat_dict = table.get_feat_dict() # A dict contains feature tensor. x, _ = self.table_transform(feat_dict) - for table_conv in self.convs: - x = table_conv(x) + x = self.conv(x) x = x.mean(dim=1) return x