Skip to content

Commit

Permalink
Mulltimodal pipeline improvement (#581)
Browse files Browse the repository at this point in the history
* Mulltimodal pipeline improvement

- fixed the optimizer error in multimodal pipeline
- fixed the bug #564 'Example multi_modal_pipeline_genres failed'
- deleted the example of rating prediction
- optimized the process of NLP libraries import
- changed the data for multimodal example
- upgraded stemmer from Porter to Snowball
- fixed bug of merging multimodal data
- fixed bug of multimodal data shuffling while loading
- CNN now works on multioutput task
- multimodal data now is prepared as a united MultiModalData object
- removed warning during scaling image data
- test_multi_modal.py is changed accordingly to new structure of multi_modal_pipeline.py
- added pipeline tuning to multi_modal_genre_prediction.py
- keras.Input changed to recommended keras.layers.InputLayer
- test_multi_modal.py is moved to multimodal folder
  • Loading branch information
andreygetmanov authored Apr 7, 2022
1 parent 3fa9b2f commit 92aede4
Show file tree
Hide file tree
Showing 13 changed files with 184 additions and 306 deletions.
42 changes: 29 additions & 13 deletions cases/multi_modal_genre_prediction.py
Original file line number Diff line number Diff line change
@@ -1,25 +1,27 @@
import datetime
from examples.multi_modal_pipeline_genres import calculate_validation_metric, \

from sklearn.metrics import f1_score

from examples.advanced.multi_modal_pipeline import calculate_validation_metric, \
generate_initial_pipeline_and_data, prepare_multi_modal_data
from fedot.core.composer.composer_builder import ComposerBuilder
from fedot.core.composer.gp_composer.gp_composer import PipelineComposerRequirements
from fedot.core.log import default_log
from fedot.core.optimisers.gp_comp.gp_optimiser import GPGraphOptimiserParameters, GeneticSchemeTypesEnum
from fedot.core.repository.operation_types_repository import get_operations_for_task
from fedot.core.repository.quality_metrics_repository import ClassificationMetricsEnum
from fedot.core.pipelines.tuning.unified import PipelineTuner
from fedot.core.repository.tasks import Task, TaskTypesEnum


def run_multi_modal_case(files_path, is_visualise=True, timeout=datetime.timedelta(minutes=2)):
def run_multi_modal_case(files_path, is_visualise=True, timeout=datetime.timedelta(minutes=1)):
task = Task(TaskTypesEnum.classification)
images_size = (128, 128)

train_num, test_num, train_text, test_text = prepare_multi_modal_data(files_path, task,
images_size)
data = prepare_multi_modal_data(files_path, task, images_size)

pipeline, fit_data, predict_data = generate_initial_pipeline_and_data(images_size,
train_num, test_num,
train_text, test_text)
initial_pipeline, fit_data, predict_data = generate_initial_pipeline_and_data(images_size, data,
with_split=True)

# the search of the models provided by the framework that can be used as nodes in a pipeline for the selected task
available_model_types = get_operations_for_task(task=task, mode='model')
Expand All @@ -30,7 +32,7 @@ def run_multi_modal_case(files_path, is_visualise=True, timeout=datetime.timedel
composer_requirements = PipelineComposerRequirements(
primary=available_model_types,
secondary=available_model_types, max_arity=3,
max_depth=3, pop_size=5, num_of_generations=5,
max_depth=5, pop_size=5, num_of_generations=5,
crossover_prob=0.8, mutation_prob=0.8, timeout=timeout)

# GP optimiser parameters choice
Expand All @@ -43,15 +45,29 @@ def run_multi_modal_case(files_path, is_visualise=True, timeout=datetime.timedel
# the multi modal template (with data sources) is passed as initial assumption for composer
builder = ComposerBuilder(task=task).with_requirements(composer_requirements). \
with_metrics(metric_function).with_optimiser(parameters=optimiser_parameters).with_logger(logger=logger). \
with_initial_pipelines(pipeline).with_cache('multi_modal_opt.cache')
with_initial_pipelines([initial_pipeline]).with_cache('multi_modal_opt.cache')

# Create GP-based composer
composer = builder.build()

# the optimal pipeline generation by composition - the most time-consuming task
pipeline_evo_composed = composer.compose_pipeline(data=fit_data,
is_visualise=True)
pipeline_evo_composed.print_structure()

pipeline.fit(input_data=fit_data)
# tuning of the composed pipeline
pipeline_tuner = PipelineTuner(pipeline=pipeline_evo_composed, task=task, iterations=15)
tuned_pipeline = pipeline_tuner.tune_pipeline(input_data=fit_data,
loss_function=f1_score,
loss_params={'average': 'micro'})
tuned_pipeline.print_structure()
tuned_pipeline.fit(input_data=fit_data)

if is_visualise:
pipeline.show()
tuned_pipeline.show()

prediction = pipeline.predict(predict_data, output_mode='labels')
err = calculate_validation_metric(test_text, prediction)
prediction = tuned_pipeline.predict(predict_data, output_mode='labels')
err = calculate_validation_metric(predict_data, prediction)

print(f'F1 micro for validation sample is {err}')
return err
Expand Down
78 changes: 0 additions & 78 deletions cases/multi_modal_rating_prediction.py

This file was deleted.

150 changes: 87 additions & 63 deletions examples/advanced/multi_modal_pipeline.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import os
from typing import Union

import numpy as np
from sklearn.metrics import roc_auc_score as roc_auc
from sklearn.metrics import f1_score as f1

from cases.dataset_preparation import unpack_archived_data
from fedot.core.pipelines.pipeline import Pipeline
Expand All @@ -14,112 +14,136 @@
from fedot.core.utils import fedot_project_root


def calculate_validation_metric(pred: OutputData, valid: InputData) -> float:
predicted = np.ravel(pred.predict)
real = np.ravel(valid.target)
def calculate_validation_metric(valid: Union[InputData, MultiModalData], pred: OutputData) -> float:
"""
Calculates F1 score for predicted data
err = roc_auc(y_true=real,
y_score=predicted)
:param valid: dataclass with true target
:param pred: dataclass with model's prediction
"""

return round(err, 2)
real = valid.target
predicted = pred.predict

err = f1(y_true=real,
y_pred=predicted, average='micro')

def prepare_multi_modal_data(files_path, task: Task, images_size=(128, 128), with_split=True):
path = os.path.join(str(fedot_project_root()), files_path)
return round(err, 2)

unpack_archived_data(path)

data = InputData.from_json_files(path, fields_to_use=['votes', 'year'],
label='rating', task=task)
def prepare_multi_modal_data(files_path: str, task: Task, images_size: tuple = (128, 128)) -> MultiModalData:
"""
Imports data from 3 different sources (table, images and text)
class_labels = np.asarray([0 if t <= 7 else 1 for t in data.target])
data.target = class_labels
:param files_path: path to data
:param task: task to solve
:param images_size: the requested size in pixels, as a 2-tuple of (width, height)
:return: MultiModalData object which contains table, text and image data
"""

ratio = 0.5
path = os.path.join(str(fedot_project_root()), files_path)
# unpacking of data archive
unpack_archived_data(path)
# import of table data
data_num = InputData.from_json_files(path, fields_to_use=['votes', 'rating'],
label='genres', task=task, is_multilabel=True, shuffle=False)

class_labels = data_num.target

img_files_path = f'{files_path}/*.jpeg'
img_path = os.path.join(str(fedot_project_root()), img_files_path)

# import of image data
data_img = InputData.from_image(images=img_path, labels=class_labels, task=task, target_size=images_size)

# import of text data
data_text = InputData.from_json_files(path, fields_to_use=['plot'],
label='rating', task=task,
data_type=DataTypesEnum.text)
data_text.target = class_labels
label='genres', task=task,
data_type=DataTypesEnum.text, is_multilabel=True, shuffle=False)

if with_split:
train_num, test_num = train_test_data_setup(data, shuffle_flag=False, split_ratio=ratio)
train_img, test_img = train_test_data_setup(data_img, shuffle_flag=False, split_ratio=ratio)
train_text, test_text = train_test_data_setup(data_text, shuffle_flag=False, split_ratio=ratio)
else:
train_num, test_num = data, data
train_img, test_img = data_img, data_img
train_text, test_text = data_text, data_text
data = MultiModalData({
'data_source_img': data_img,
'data_source_table': data_num,
'data_source_text': data_text
})

return data

return train_num, test_num, train_img, test_img, train_text, test_text

def generate_initial_pipeline_and_data(images_size: tuple,
data: Union[InputData, MultiModalData],
with_split=True) -> tuple:
"""
Generates initial pipeline for data from 3 different sources (table, images and text)
Each source is the primary node for its subpipeline
def generate_initial_pipeline_and_data(images_size,
train_num, test_num,
train_img, test_img,
train_text, test_text):
:param images_size: the requested size in pixels, as a 2-tuple of (width, height)
:param data: multimodal data (from 3 different sources: table, text, image)
:param with_split: if True, splits the sample on train/test
:return: pipeline object, 2 multimodal data objects (fit and predict)
"""

# Identifying a number of classes for CNN params
if data.target.shape[1] > 1:
num_classes = data.target.shape[1]
else:
num_classes = data.num_classes
# image
ds_image = PrimaryNode('data_source_img/1')
ds_image = PrimaryNode('data_source_img')
image_node = SecondaryNode('cnn', nodes_from=[ds_image])
image_node.custom_params = {'image_shape': (images_size[0], images_size[1], 1),
'architecture': 'simplified',
'num_classes': 2,
'epochs': 15,
'batch_size': 128}
'architecture_type': 'simplified',
'num_classes': num_classes,
'epochs': 10,
'batch_size': 16,
'optimizer_parameters': {'loss': "binary_crossentropy",
'optimizer': "adam",
'metrics': 'categorical_crossentropy'}
}

# table
ds_table = PrimaryNode('data_source_table/2')
scaling_node = SecondaryNode('scaling', nodes_from=[ds_table])
numeric_node = SecondaryNode('rf', nodes_from=[scaling_node])
ds_table = PrimaryNode('data_source_table')
numeric_node = SecondaryNode('scaling', nodes_from=[ds_table])

# text
ds_text = PrimaryNode('data_source_text/3')
ds_text = PrimaryNode('data_source_text')
node_text_clean = SecondaryNode('text_clean', nodes_from=[ds_text])
text_node = SecondaryNode('tfidf', nodes_from=[node_text_clean])
text_node.custom_params = {'ngram_range': (1, 3), 'min_df': 0.001, 'max_df': 0.9}

pipeline = Pipeline(SecondaryNode('logit', nodes_from=[numeric_node, image_node, text_node]))
# combining all sources together
logit_node = SecondaryNode('logit', nodes_from=[image_node, numeric_node, text_node])
logit_node.custom_params = {'max_iter': 100000, 'random_state': 42}
pipeline = Pipeline(logit_node)

fit_data = MultiModalData({
'data_source_img/1': train_img,
'data_source_table/2': train_num,
'data_source_text/3': train_text
})
predict_data = MultiModalData({
'data_source_img/1': test_img,
'data_source_table/2': test_num,
'data_source_text/3': test_text
})
# train/test ratio
ratio = 0.6
if with_split:
fit_data, predict_data = train_test_data_setup(data, shuffle_flag=True, split_ratio=ratio)
else:
fit_data, predict_data = data, data

return pipeline, fit_data, predict_data


def run_multi_modal_pipeline(files_path, is_visualise=False):
def run_multi_modal_pipeline(files_path: str, is_visualise=False) -> float:
task = Task(TaskTypesEnum.classification)
images_size = (128, 128)

train_num, test_num, train_img, test_img, train_text, test_text = \
prepare_multi_modal_data(files_path, task, images_size)
data = prepare_multi_modal_data(files_path, task, images_size)

pipeline, fit_data, predict_data = generate_initial_pipeline_and_data(images_size,
train_num, test_num,
train_img, test_img,
train_text, test_text)
pipeline, fit_data, predict_data = generate_initial_pipeline_and_data(images_size, data,
with_split=True)

pipeline.fit(input_data=fit_data)

if is_visualise:
pipeline.show()

prediction = pipeline.predict(predict_data)
prediction = pipeline.predict(predict_data, output_mode='labels')

err = calculate_validation_metric(prediction, test_num)
err = calculate_validation_metric(predict_data, prediction)

print(f'ROC AUC for validation sample is {err}')
print(f'F1 micro for validation sample is {err}')

return err

Expand Down
Binary file modified examples/data/multimodal.tar.gz
Binary file not shown.
Loading

0 comments on commit 92aede4

Please sign in to comment.