Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix initial assumptions as list of pipelines #1070

Merged
merged 5 commits into from
Mar 22, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
19 changes: 12 additions & 7 deletions fedot/api/api_utils/api_composer.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ def obtain_model(self, train_data: InputData) -> Tuple[Pipeline, Sequence[Pipeli

self.timer = ApiTime(time_for_automl=timeout, with_tuning=with_tuning)

fitted_assumption = self.propose_and_fit_initial_assumption(train_data)
initial_assumption, fitted_assumption = self.propose_and_fit_initial_assumption(train_data)

multi_objective = len(self.metrics.metric_functions) > 1
self.params.init_params_for_composing(self.timer.timedelta_composing, multi_objective)
Expand All @@ -66,7 +66,11 @@ def obtain_model(self, train_data: InputData) -> Tuple[Pipeline, Sequence[Pipeli
f" Time limit: {timeout} min."
f" Set of candidate models: {self.params.get('available_operations')}.")

best_pipeline, best_pipeline_candidates, gp_composer = self.compose_pipeline(train_data, fitted_assumption)
best_pipeline, best_pipeline_candidates, gp_composer = self.compose_pipeline(
train_data,
initial_assumption,
fitted_assumption
)
if with_tuning:
best_pipeline = self.tune_final_pipeline(train_data, best_pipeline)
if gp_composer.history:
Expand All @@ -78,9 +82,10 @@ def obtain_model(self, train_data: InputData) -> Tuple[Pipeline, Sequence[Pipeli
self.log.message('Model generation finished')
return best_pipeline, best_pipeline_candidates, gp_composer.history

def propose_and_fit_initial_assumption(self, train_data: InputData) -> Pipeline:
def propose_and_fit_initial_assumption(self, train_data: InputData) -> Tuple[Sequence[Pipeline], Pipeline]:
""" Method for obtaining and fitting initial assumption"""
available_operations = self.params.get('available_operations')

preset = self.params.get('preset')

assumption_handler = AssumptionsHandler(train_data)
Expand All @@ -102,14 +107,14 @@ def propose_and_fit_initial_assumption(self, train_data: InputData) -> Pipeline:

self.params.update(preset=assumption_handler.propose_preset(preset, self.timer, n_jobs=self.params.n_jobs))

return fitted_assumption
return initial_assumption, fitted_assumption
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Возможно есть смысл в fit_assumption_and_check_correctness фитить все пайплайны из initial_assumption?

Тогда не нужно будет делать разделение на initial_assumption и fitted_assumption. И в compose_pipeline, если не хватает времени, возвращать в качестве best_pipeline_candidates initial_assumption целиком. Ну и как будто если мы действительно весь initial_assumption передаем в оптимизатор, то нужно его весь проверить.

Только надо будет оценку времени на assumption изменить, возможно, взять как среднее время фита для паплайна.

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Тут смысл в том, что fit_assumption_and_check_correctness обучает на полных данных, без CV. Поэтому дальше эти результаты можно использовать только если итоговый пайплайн совпадает с ним по структере.

Так что особо смысла обучать все нет.

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

@YamLyubov, значит, можем вливать?

Copy link
Collaborator Author

@MorrisNein MorrisNein Mar 21, 2023

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Ну и как будто если мы действительно весь initial_assumption передаем в оптимизатор, то нужно его весь проверить.

Насколько я понимаю, оптимизатор не сломается, если часть индивидов будут "поломаные", — тогда они просто тут же "вымрут". Верно?

Для единственного начального приближения более критично быть работоспособным.

Конечно, ещё можем тут в цикле проверять начальные приближения до первого успешного. В редких случаях это отнимет много времени у эволюции, но это не должно возникать часто. Зато, если пользователь предоставил несколько начальных приближений, часть из которых нерабочие, Федот не сломается.


def compose_pipeline(self, train_data: InputData, fitted_assumption: Pipeline) \
-> Tuple[Pipeline, List[Pipeline], GPComposer]:
def compose_pipeline(self, train_data: InputData, initial_assumption: Sequence[Pipeline],
fitted_assumption: Pipeline) -> Tuple[Pipeline, List[Pipeline], GPComposer]:

gp_composer: GPComposer = (ComposerBuilder(task=self.params.task)
.with_requirements(self.params.composer_requirements)
.with_initial_pipelines(fitted_assumption)
.with_initial_pipelines(initial_assumption)
.with_optimizer(self.params.get('optimizer'))
.with_optimizer_params(parameters=self.params.optimizer_params,
external_parameters=self.params.get('optimizer_external_params'))
Expand Down
8 changes: 3 additions & 5 deletions test/unit/api/test_api_params.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,11 +79,9 @@ def test_correctly_sets_default_params(input_params):
assert output_params[k] == input_params[k]


@pytest.mark.parametrize('input_params, case, correct_keys',
[(fedot_params_full, 'composer', correct_composer_attributes),
(params_with_missings, 'composer', correct_composer_attributes),
(fedot_params_full, 'gp_algo', correct_gp_algorithm_attributes),
(params_with_missings, 'gp_algo', correct_gp_algorithm_attributes)])
@pytest.mark.parametrize('input_params', [fedot_params_full, params_with_missings])
@pytest.mark.parametrize('case, correct_keys', [('composer', correct_composer_attributes),
('gp_algo', correct_gp_algorithm_attributes)])
def test_filter_params_correctly(input_params, case, correct_keys):
params_repository = get_api_params_repository()
input_params = params_repository.check_and_set_default_params(input_params)
Expand Down
20 changes: 8 additions & 12 deletions test/unit/api/test_api_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,8 @@

import pytest

from examples.simple.classification.classification_pipelines import classification_pipeline_without_balancing
from examples.simple.classification.classification_pipelines import (classification_pipeline_with_balancing,
classification_pipeline_without_balancing)
from fedot.api.api_utils.assumptions.assumptions_builder import AssumptionsBuilder
from fedot.api.main import Fedot
from fedot.core.data.data_split import train_test_data_setup
Expand Down Expand Up @@ -47,23 +48,18 @@ def test_output_binary_classification_correct():
def test_predefined_initial_assumption():
""" Check if predefined initial assumption and other api params don't lose while preprocessing is performing"""
train_input, _, _ = get_dataset(task_type='classification')
initial_pipeline = classification_pipeline_without_balancing()
initial_pipelines = [classification_pipeline_without_balancing(), classification_pipeline_with_balancing()]
available_operations = ['bernb', 'dt', 'knn', 'lda', 'qda', 'logit', 'rf', 'svc',
'scaling', 'normalization', 'pca', 'kernel_pca']

model = Fedot(problem='classification', timeout=1.,
model = Fedot(problem='classification', timeout=.1,
logging_level=logging.DEBUG, available_operations=available_operations,
initial_assumption=initial_pipeline)
model.target = train_input.target
model.train_data = model.data_processor.define_data(features=train_input.features,
target=train_input.target,
is_predict=False)
initial_assumption=initial_pipelines)
old_params = deepcopy(model.params)
recs_for_data, _ = model.data_analyser.give_recommendations(model.train_data)
model.data_processor.accept_and_apply_recommendations(model.train_data, recs_for_data)
model.params.accept_and_apply_recommendations(model.train_data, recs_for_data)
model.fit(train_input)

assert model.params.get('initial_assumption') is not None
assert len(initial_pipelines) == len(model.params.get('initial_assumption'))
assert len(model.params.get('initial_assumption')) == len(model.history.initial_assumptions)
assert len(old_params) == len(model.params)


Expand Down
13 changes: 6 additions & 7 deletions test/unit/composer/test_history.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,14 @@
import os
from functools import partial
from itertools import chain
from pathlib import Path

import numpy as np
import pytest
from golem.core.dag.graph import Graph
from golem.core.optimisers.fitness import SingleObjFitness
from golem.core.optimisers.genetic.evaluation import MultiprocessingDispatcher
from golem.core.optimisers.opt_history_objects.individual import Individual
from golem.core.optimisers.opt_history_objects.opt_history import OptHistory

from fedot.api.main import Fedot
from fedot.core.data.data import InputData
Expand All @@ -18,11 +22,6 @@
from fedot.core.repository.quality_metrics_repository import ClassificationMetricsEnum, \
RegressionMetricsEnum, MetricType
from fedot.core.utils import fedot_project_root
from golem.core.dag.graph import Graph
from golem.core.optimisers.fitness import SingleObjFitness
from golem.core.optimisers.genetic.evaluation import MultiprocessingDispatcher
from golem.core.optimisers.opt_history_objects.individual import Individual
from golem.core.optimisers.opt_history_objects.opt_history import OptHistory
from test.unit.tasks.test_forecasting import get_ts_data
from test.unit.validation.test_table_cv import get_classification_data

Expand Down Expand Up @@ -86,7 +85,7 @@ def test_newly_generated_history(n_jobs: int):
assert history is not None
assert len(history.individuals) == num_of_gens + 2 # initial_assumptions + num_of_gens + final_choices
assert len(history.archive_history) == num_of_gens + 2 # initial_assumptions + num_of_gens + final_choices
assert len(history.initial_assumptions) == 1
assert len(history.initial_assumptions) >= 2
assert len(history.final_choices) == 1
assert isinstance(history.tuning_result, Graph)
_test_individuals_in_history(history)
Expand Down