From 6db96d42ffd9168e753c2f50e4dafdc452ae43bf Mon Sep 17 00:00:00 2001
From: carloderamo <carlo.deramo@gmail.com>
Date: Tue, 2 Feb 2021 17:02:46 +0100
Subject: [PATCH] Small updates in docs

---
 docs/conf.py                                  |  2 +-
 docs/index.rst                                | 19 ++++++++++---------
 .../tutorials/tutorials.0_experiments.rst     |  2 +-
 3 files changed, 12 insertions(+), 11 deletions(-)

diff --git a/docs/conf.py b/docs/conf.py
index cf014f77d..2070b6f5a 100644
--- a/docs/conf.py
+++ b/docs/conf.py
@@ -55,7 +55,7 @@
 
 # General information about the project.
 project = u'MushroomRL'
-copyright = u'2018, 2019, 2020 Carlo D\'Eramo, Davide Tateo'
+copyright = u'2018, 2019, 2020, 2021 Carlo D\'Eramo, Davide Tateo'
 author = u'Carlo D\'Eramo'
 
 # The version info for the project you're documenting, acts as replacement for
diff --git a/docs/index.rst b/docs/index.rst
index bdd2b814c..07a57339a 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -13,16 +13,17 @@ Reinforcement Learning python library
 .. highlight:: python
 
 MushroomRL is a Reinforcement Learning (RL) library that aims to be a simple, yet
-powerful way to make **RL** and **deep RL** experiments. The idea behind Mushroom
-consists in offering the majority of RL algorithms providing a common interface
+powerful way to make **RL** and **deep RL** experiments. The idea behind MushroomRL
+is to offer the majority of RL algorithms providing a common interface
 in order to run them without excessive effort. Moreover, it is designed in such
-a way that new algorithms and other stuff can generally be added transparently
-without the need of editing other parts of the code. MushroomRL makes a large use
-of the environments provided by
+a way that new algorithms and other stuff can be added transparently,
+without the need of editing other parts of the code. MushroomRL is compatible with RL
+libraries like
 `OpenAI Gym <https://gym.openai.com/>`_,
 `DeepMind Control Suite <https://github.com/deepmind/dm_control>`_ and
-`MuJoCo <http://www.mujoco.org/>`_ libraries, and
-the `PyTorch <https://pytorch.org>`_ library for tensor computation.
+`MuJoCo <http://www.mujoco.org/>`_, and
+the `PyTorch <https://pytorch.org>`_ and `Tensorflow <https://www.tensorflow.org/>`_
+libraries for tensor computation.
 
 With MushroomRL you can:
 
@@ -31,8 +32,8 @@ With MushroomRL you can:
 - use all RL environments offered by well-known libraries and build customized
   environments as well;
 - exploit regression models offered by Scikit-Learn or build a customized one
-  with PyTorch;
-- run experiments on GPU.
+  with PyTorch or Tensorflow;
+- seamlessly run experiments on CPU or GPU.
 
 Basic run example
 -----------------
diff --git a/docs/source/tutorials/tutorials.0_experiments.rst b/docs/source/tutorials/tutorials.0_experiments.rst
index 75da43ed0..ba26c65fa 100644
--- a/docs/source/tutorials/tutorials.0_experiments.rst
+++ b/docs/source/tutorials/tutorials.0_experiments.rst
@@ -29,7 +29,7 @@ state and action spaces. An agent can be defined this way:
 .. literalinclude:: code/simple_experiment.py
    :lines: 13-27
 
-This piece of code creates the policy followed by the agent (e.g. :math:`\epsilon`-greedy)
+This piece of code creates the policy followed by the agent (e.g. :math:`\varepsilon`-greedy)
 with :math:`\varepsilon = 1`. Then, the policy approximator is created specifying the
 parameters to create it and the class (in this case, the ``ExtraTreesRegressor`` class
 of scikit-learn is used). Eventually, the agent is created calling the algorithm