|
4074 | 4074 | "from continuous_lora.layers.continuous_conv_lora_layer import ContinuousConvLoRALayer\n",
|
4075 | 4075 | "from continuous_lora.layers.old_conv_lora_layer import Conv2dLora\n",
|
4076 | 4076 | "\n",
|
| 4077 | + "# Define the input and output channels for different layers\n", |
| 4078 | + "in_channels = [3, 64, 64, 128, 128, 256, 256, 512]\n", |
| 4079 | + "out_channels = [64, 64, 128, 128, 256, 256, 512, 512]\n", |
4077 | 4080 | "\n",
|
4078 |
| - "\n", |
4079 |
| - "in_channels=[3,64,64,128,128,256,256,512]\n", |
4080 |
| - "out_channels=[64,64,128,128,256,256,512,512]\n", |
4081 |
| - "\n", |
4082 |
| - "for in_channel,out_channel in zip(in_channels,out_channels):\n", |
4083 |
| - " print(3*'\\n'+f'in_channel: {in_channel} | out_channel {out_channel}')\n", |
4084 |
| - " kernel_size=3\n", |
4085 |
| - " \n", |
4086 |
| - " r=25\n", |
4087 |
| - " \n", |
| 4081 | + "# Iterate through each pair of input and output channels\n", |
| 4082 | + "for in_channel, out_channel in zip(in_channels, out_channels):\n", |
| 4083 | + " print(3*'\\n' + f'in_channel: {in_channel} | out_channel {out_channel}')\n", |
| 4084 | + " kernel_size = 3\n", |
| 4085 | + " r = 25\n", |
4088 | 4086 | " \n",
|
| 4087 | + " # Standard Convolution Layer\n", |
4089 | 4088 | " normal_conv = nn.Conv2d(in_channels=in_channel, out_channels=out_channel, kernel_size=kernel_size)\n",
|
4090 | 4089 | " normal_conv_num_params = 0\n",
|
4091 | 4090 | " for name, p in normal_conv.named_parameters(): \n",
|
4092 | 4091 | " if p.requires_grad:\n",
|
4093 | 4092 | " normal_conv_num_params += p.numel()\n",
|
4094 |
| - " print(f'{name} é um parametro treinável. {p.numel()}. {p.shape}')\n", |
| 4093 | + " print(f'{name} is a trainable parameter. {p.numel()}. {p.shape}')\n", |
4095 | 4094 | " else:\n",
|
4096 |
| - " print(f'{name} NÃO é um parametro treinável')\n", |
| 4095 | + " print(f'{name} is NOT a trainable parameter')\n", |
4097 | 4096 | " \n",
|
4098 |
| - " print(f'Numero de parametros treinaveis na layer Conv normal: {normal_conv_num_params}')\n", |
| 4097 | + " print(f'Number of trainable parameters in the standard Conv layer: {normal_conv_num_params}')\n", |
4099 | 4098 | " \n",
|
4100 |
| - " print(3*'\\n'+40*'-'+'Continuous Conv Lora')\n", |
4101 |
| - " continuous_conv = ContinuousConvLoRALayer(in_channels=in_channel, out_channels=out_channel, kernel_size=kernel_size, number_of_tasks=1, conv_module=nn.Conv2d, r=r)\n", |
| 4099 | + " # Continuous Conv Lora Layer\n", |
| 4100 | + " print(3*'\\n' + 40*'-' + 'Continuous Conv Lora')\n", |
| 4101 | + " continuous_conv = ContinuousConvLoRALayer(\n", |
| 4102 | + " in_channels=in_channel, \n", |
| 4103 | + " out_channels=out_channel, \n", |
| 4104 | + " kernel_size=kernel_size, \n", |
| 4105 | + " number_of_tasks=1, \n", |
| 4106 | + " conv_module=nn.Conv2d, \n", |
| 4107 | + " r=r\n", |
| 4108 | + " )\n", |
4102 | 4109 | " continuous_conv_num_params = 0\n",
|
4103 | 4110 | " for name, p in continuous_conv.named_parameters(): \n",
|
4104 | 4111 | " if p.requires_grad:\n",
|
4105 | 4112 | " continuous_conv_num_params += p.numel()\n",
|
4106 |
| - " print(f'{name} é um parametro treinável. {p.numel()}. {p.shape}')\n", |
| 4113 | + " print(f'{name} is a trainable parameter. {p.numel()}. {p.shape}')\n", |
4107 | 4114 | " else:\n",
|
4108 |
| - " print(f'{name} NÃO é um parametro treinável')\n", |
| 4115 | + " print(f'{name} is NOT a trainable parameter')\n", |
4109 | 4116 | " \n",
|
4110 |
| - " print(f'Numero de parametros treinaveis ConvLora terceiro: {continuous_conv_num_params}')\n", |
| 4117 | + " print(f'Number of trainable parameters in ConvLora third-party: {continuous_conv_num_params}')\n", |
4111 | 4118 | " \n",
|
4112 |
| - " print(3*'\\n'+40*'-'+'Continuous Conv Lora NOSSO')\n", |
| 4119 | + " # Custom Continuous Conv Lora Layer\n", |
| 4120 | + " print(3*'\\n' + 40*'-' + 'Continuous Conv Lora OUR')\n", |
4113 | 4121 | " continuous_conv_our = Conv2dLora(in_channels=in_channel, out_channels=out_channel, kernel_size=kernel_size)\n",
|
4114 | 4122 | " continuous_conv_our.weight.requires_grad = False\n",
|
4115 | 4123 | " continuous_conv_our.bias.requires_grad = False\n",
|
4116 | 4124 | " continuous_conv_our_num_params = 0\n",
|
4117 | 4125 | " for name, p in continuous_conv_our.named_parameters(): \n",
|
4118 | 4126 | " if p.requires_grad:\n",
|
4119 | 4127 | " continuous_conv_our_num_params += p.numel()\n",
|
4120 |
| - " print(f'{name} é um parametro treinável. {p.numel()}. {p.shape}')\n", |
| 4128 | + " print(f'{name} is a trainable parameter. {p.numel()}. {p.shape}')\n", |
4121 | 4129 | " else:\n",
|
4122 |
| - " print(f'{name} NÃO é um parametro treinável')\n", |
| 4130 | + " print(f'{name} is NOT a trainable parameter')\n", |
4123 | 4131 | " \n",
|
4124 |
| - " print(f'Numero de parametros treinaveis no nosso ConvLora: {continuous_conv_our_num_params}')\n" |
| 4132 | + " print(f'Number of trainable parameters in our ConvLora: {continuous_conv_our_num_params}')\n" |
4125 | 4133 | ]
|
4126 | 4134 | },
|
4127 | 4135 | {
|
|
4147 | 4155 | "import sys\n",
|
4148 | 4156 | "import os\n",
|
4149 | 4157 | "\n",
|
4150 |
| - "# Adiciona o diretório pai ao caminho de pesquisa de módulos\n", |
4151 | 4158 | "sys.path.append(os.path.abspath(os.path.join('..')))\n",
|
4152 | 4159 | "\n",
|
4153 | 4160 | "from torch import nn\n",
|
|
4164 | 4171 | " adapt_last_n_conv=0,\n",
|
4165 | 4172 | " adapt_last_n_linear=0\n",
|
4166 | 4173 | ")\n",
|
4167 |
| - "print('APENAS CONTANDO OS PARAMETROS DAS LAYERS CONVOLUCIONAIS. PRECISEI ALTERAR O CODIGO PARA NÃO CONGELAR AS LAYERS\\n\\n')\n", |
4168 |
| - "print(f'Número de parametros treinaveis no features extractor: {model.count_features_trainable_params()}')\n", |
4169 |
| - "print(f'Número de parametros treinaveis no classifier: {model.count_classifier_trainable_params()}')\n", |
4170 |
| - "print(f'Número de parametros treinaveis total: {model.count_trainable_params()}')" |
| 4174 | + "print('ONLY COUNTING THE PARAMETERS OF FILTERS A, B (NEW IMPLEMENTATION) R=3. NEEDED TO MODIFY THE CODE TO USE OUR IMPLEMENTATION.\\n\\n')\n", |
| 4175 | + "print(f'Number of trainable parameters in the feature extractor: {model.count_features_trainable_params()}')\n", |
| 4176 | + "print(f'Number of trainable parameters in the classifier: {model.count_classifier_trainable_params()}')\n", |
| 4177 | + "print(f'Total number of trainable parameters: {model.count_trainable_params()}')" |
4171 | 4178 | ]
|
4172 | 4179 | },
|
4173 | 4180 | {
|
|
4193 | 4200 | "import sys\n",
|
4194 | 4201 | "import os\n",
|
4195 | 4202 | "\n",
|
4196 |
| - "# Adiciona o diretório pai ao caminho de pesquisa de módulos\n", |
4197 | 4203 | "sys.path.append(os.path.abspath(os.path.join('..')))\n",
|
4198 | 4204 | "\n",
|
4199 | 4205 | "from torch import nn\n",
|
|
4211 | 4217 | " adapt_last_n_linear=0\n",
|
4212 | 4218 | ")\n",
|
4213 | 4219 | "\n",
|
4214 |
| - "print('APENAS CONTANDO OS PARAMETROS DOS FILTROS A,B,C e D (nosso). PRECISEI ALTERAR O CODIGO PARA USAR NOSSA IMPL.\\n\\n')\n", |
4215 |
| - "print(f'Número de parametros treinaveis no features extractor: {model.count_features_trainable_params()}')\n", |
4216 |
| - "print(f'Número de parametros treinaveis no classifier: {model.count_classifier_trainable_params()}')\n", |
4217 |
| - "print(f'Número de parametros treinaveis total: {model.count_trainable_params()}')" |
| 4220 | + "print('ONLY COUNTING THE PARAMETERS OF FILTERS A, B (NEW IMPLEMENTATION) R=3. NEEDED TO MODIFY THE CODE TO USE OUR IMPLEMENTATION.\\n\\n')\n", |
| 4221 | + "print(f'Number of trainable parameters in the feature extractor: {model.count_features_trainable_params()}')\n", |
| 4222 | + "print(f'Number of trainable parameters in the classifier: {model.count_classifier_trainable_params()}')\n", |
| 4223 | + "print(f'Total number of trainable parameters: {model.count_trainable_params()}')" |
4218 | 4224 | ]
|
4219 | 4225 | },
|
4220 | 4226 | {
|
|
4240 | 4246 | "import sys\n",
|
4241 | 4247 | "import os\n",
|
4242 | 4248 | "\n",
|
4243 |
| - "# Adiciona o diretório pai ao caminho de pesquisa de módulos\n", |
4244 | 4249 | "sys.path.append(os.path.abspath(os.path.join('..')))\n",
|
4245 | 4250 | "\n",
|
4246 | 4251 | "from torch import nn\n",
|
|
4258 | 4263 | " adapt_last_n_linear=0\n",
|
4259 | 4264 | ")\n",
|
4260 | 4265 | "\n",
|
4261 |
| - "print('APENAS CONTANDO OS PARAMETROS DOS FILTROS A,B (NOVA IMPLEMENTACAO) R=1. PRECISEI ALTERAR O CODIGO PARA USAR NOSSA IMPL.\\n\\n')\n", |
4262 |
| - "print(f'Número de parametros treinaveis no features extractor: {model.count_features_trainable_params()}')\n", |
4263 |
| - "print(f'Número de parametros treinaveis no classifier: {model.count_classifier_trainable_params()}')\n", |
4264 |
| - "print(f'Número de parametros treinaveis total: {model.count_trainable_params()}')" |
| 4266 | + "print('ONLY COUNTING THE PARAMETERS OF FILTERS A, B (NEW IMPLEMENTATION) R=1. NEEDED TO MODIFY THE CODE TO USE OUR IMPLEMENTATION.\\n\\n')\n", |
| 4267 | + "print(f'Number of trainable parameters in the feature extractor: {model.count_features_trainable_params()}')\n", |
| 4268 | + "print(f'Number of trainable parameters in the classifier: {model.count_classifier_trainable_params()}')\n", |
| 4269 | + "print(f'Total number of trainable parameters: {model.count_trainable_params()}')\n" |
4265 | 4270 | ]
|
4266 | 4271 | },
|
4267 | 4272 | {
|
|
4287 | 4292 | "import sys\n",
|
4288 | 4293 | "import os\n",
|
4289 | 4294 | "\n",
|
4290 |
| - "# Adiciona o diretório pai ao caminho de pesquisa de módulos\n", |
4291 | 4295 | "sys.path.append(os.path.abspath(os.path.join('..')))\n",
|
4292 | 4296 | "\n",
|
4293 | 4297 | "from torch import nn\n",
|
|
4305 | 4309 | " adapt_last_n_linear=0\n",
|
4306 | 4310 | ")\n",
|
4307 | 4311 | "\n",
|
4308 |
| - "print('APENAS CONTANDO OS PARAMETROS DOS FILTROS A,B (NOVA IMPLEMENTACAO) R=3. PRECISEI ALTERAR O CODIGO PARA USAR NOSSA IMPL.\\n\\n')\n", |
4309 |
| - "print(f'Número de parametros treinaveis no features extractor: {model.count_features_trainable_params()}')\n", |
4310 |
| - "print(f'Número de parametros treinaveis no classifier: {model.count_classifier_trainable_params()}')\n", |
4311 |
| - "print(f'Número de parametros treinaveis total: {model.count_trainable_params()}')" |
| 4312 | + "print('ONLY COUNTING THE PARAMETERS OF FILTERS A, B (NEW IMPLEMENTATION) R=3. NEEDED TO MODIFY THE CODE TO USE OUR IMPLEMENTATION.\\n\\n')\n", |
| 4313 | + "print(f'Number of trainable parameters in the feature extractor: {model.count_features_trainable_params()}')\n", |
| 4314 | + "print(f'Number of trainable parameters in the classifier: {model.count_classifier_trainable_params()}')\n", |
| 4315 | + "print(f'Total number of trainable parameters: {model.count_trainable_params()}')" |
4312 | 4316 | ]
|
4313 | 4317 | },
|
4314 | 4318 | {
|
|
4334 | 4338 | "import sys\n",
|
4335 | 4339 | "import os\n",
|
4336 | 4340 | "\n",
|
4337 |
| - "# Adiciona o diretório pai ao caminho de pesquisa de módulos\n", |
4338 | 4341 | "sys.path.append(os.path.abspath(os.path.join('..')))\n",
|
4339 | 4342 | "\n",
|
4340 | 4343 | "from torch import nn\n",
|
|
4352 | 4355 | " adapt_last_n_linear=0\n",
|
4353 | 4356 | ")\n",
|
4354 | 4357 | "\n",
|
4355 |
| - "print('APENAS CONTANDO OS PARAMETROS DOS FILTROS A,B (NOVA IMPLEMENTACAO) R=24. PRECISEI ALTERAR O CODIGO PARA USAR NOSSA IMPL.\\n\\n')\n", |
4356 |
| - "print(f'Número de parametros treinaveis no features extractor: {model.count_features_trainable_params()}')\n", |
4357 |
| - "print(f'Número de parametros treinaveis no classifier: {model.count_classifier_trainable_params()}')\n", |
4358 |
| - "print(f'Número de parametros treinaveis total: {model.count_trainable_params()}')" |
| 4358 | + "print('ONLY COUNTING THE PARAMETERS OF FILTERS A, B (NEW IMPLEMENTATION) R=3. NEEDED TO MODIFY THE CODE TO USE OUR IMPLEMENTATION.\\n\\n')\n", |
| 4359 | + "print(f'Number of trainable parameters in the feature extractor: {model.count_features_trainable_params()}')\n", |
| 4360 | + "print(f'Number of trainable parameters in the classifier: {model.count_classifier_trainable_params()}')\n", |
| 4361 | + "print(f'Total number of trainable parameters: {model.count_trainable_params()}')" |
4359 | 4362 | ]
|
4360 | 4363 | },
|
4361 | 4364 | {
|
|
4381 | 4384 | "import sys\n",
|
4382 | 4385 | "import os\n",
|
4383 | 4386 | "\n",
|
4384 |
| - "# Adiciona o diretório pai ao caminho de pesquisa de módulos\n", |
4385 | 4387 | "sys.path.append(os.path.abspath(os.path.join('..')))\n",
|
4386 | 4388 | "\n",
|
4387 | 4389 | "from torch import nn\n",
|
|
4399 | 4401 | " adapt_last_n_linear=0\n",
|
4400 | 4402 | ")\n",
|
4401 | 4403 | "\n",
|
4402 |
| - "print('APENAS CONTANDO OS PARAMETROS DOS FILTROS A,B (NOVA IMPLEMENTACAO) R=12. PRECISEI ALTERAR O CODIGO PARA USAR NOSSA IMPL.\\n\\n')\n", |
4403 |
| - "print(f'Número de parametros treinaveis no features extractor: {model.count_features_trainable_params()}')\n", |
4404 |
| - "print(f'Número de parametros treinaveis no classifier: {model.count_classifier_trainable_params()}')\n", |
4405 |
| - "print(f'Número de parametros treinaveis total: {model.count_trainable_params()}')" |
| 4404 | + "print('ONLY COUNTING THE PARAMETERS OF FILTERS A, B (NEW IMPLEMENTATION) R=3. NEEDED TO MODIFY THE CODE TO USE OUR IMPLEMENTATION.\\n\\n')\n", |
| 4405 | + "print(f'Number of trainable parameters in the feature extractor: {model.count_features_trainable_params()}')\n", |
| 4406 | + "print(f'Number of trainable parameters in the classifier: {model.count_classifier_trainable_params()}')\n", |
| 4407 | + "print(f'Total number of trainable parameters: {model.count_trainable_params()}')" |
4406 | 4408 | ]
|
4407 | 4409 | },
|
4408 | 4410 | {
|
|
0 commit comments