Skip to content

Commit

Permalink
add logging
Browse files Browse the repository at this point in the history
  • Loading branch information
alvaromc317 committed Aug 22, 2020
1 parent ec96cce commit aaa21d4
Show file tree
Hide file tree
Showing 4 changed files with 35 additions and 30 deletions.
39 changes: 20 additions & 19 deletions asgl/asgl.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
import sys
import functools
import itertools
import logging
Expand All @@ -7,7 +8,7 @@
import numpy as np
from sklearn.metrics import mean_absolute_error, median_absolute_error, mean_squared_error

logger = logging.getLogger(__name__)
logging.basicConfig(stream=sys.stdout, level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')


class ASGL:
Expand Down Expand Up @@ -67,7 +68,7 @@ def __model_checker(self):
if self.model in self.valid_models:
return True
else:
logger.error(f'{self.model} is not a valid model. Valid models are {self.valid_models}')
logging.error(f'{self.model} is not a valid model. Valid models are {self.valid_models}')
return False

def __penalization_checker(self):
Expand All @@ -83,8 +84,8 @@ def __penalization_checker(self):
if (self.penalization in self.valid_penalizations) or (self.penalization is None):
return True
else:
logger.error(f'{self.penalization} is not a valid penalization. '
f'Valid penalizations are {self.valid_penalizations} or None')
logging.error(f'{self.penalization} is not a valid penalization. '
f'Valid penalizations are {self.valid_penalizations} or None')
return False

def __dtype_checker(self):
Expand Down Expand Up @@ -182,7 +183,7 @@ def __preprocessing_itertools_param(self, lambda_vector, alpha_vector, lasso_wei
param = itertools.product(lambda_vector, alpha_vector, lasso_weights_list, gl_weights_list)
else:
param = None
logger.error(f'Error preprocessing input parameters')
logging.error(f'Error preprocessing input parameters')
param = list(param)
return param

Expand All @@ -193,7 +194,7 @@ def __preprocessing(self):
"""
# Run the input_checker to verify that the inputs have the correct format
if self.__input_checker() is False:
logger.error('incorrect input parameters')
logging.error('incorrect input parameters')
raise ValueError('incorrect input parameters')
# Defines param as None for the unpenalized model
if self.penalization is None:
Expand Down Expand Up @@ -270,7 +271,7 @@ def unpenalized_solver(self, x, y):
except (ValueError, cvxpy.error.SolverError):
continue
if problem.status in ["infeasible", "unbounded"]:
logger.warning('Optimization problem status failure')
logging.warning('Optimization problem status failure')
beta_sol = beta_var.value
beta_sol[np.abs(beta_sol) < self.tol] = 0
return [beta_sol]
Expand Down Expand Up @@ -316,11 +317,11 @@ def lasso(self, x, y, param):
except (ValueError, cvxpy.error.SolverError):
continue
if problem.status in ["infeasible", "unbounded"]:
logger.warning('Optimization problem status failure')
logging.warning('Optimization problem status failure')
beta_sol = beta_var.value
beta_sol[np.abs(beta_sol) < self.tol] = 0
beta_sol_list.append(beta_sol)
logger.debug('Function finished without errors')
logging.debug('Function finished without errors')
return beta_sol_list

def gl(self, x, y, group_index, param):
Expand Down Expand Up @@ -376,7 +377,7 @@ def gl(self, x, y, group_index, param):
except (ValueError, cvxpy.error.SolverError):
continue
if problem.status in ["infeasible", "unbounded"]:
logger.warning('Optimization problem status failure')
logging.warning('Optimization problem status failure')
beta_sol = np.concatenate([b.value for b in beta_var], axis=0)
beta_sol[np.abs(beta_sol) < self.tol] = 0
beta_sol_list.append(beta_sol)
Expand Down Expand Up @@ -439,7 +440,7 @@ def sgl(self, x, y, group_index, param):
except (ValueError, cvxpy.error.SolverError):
continue
if problem.status in ["infeasible", "unbounded"]:
logger.warning('Optimization problem status failure')
logging.warning('Optimization problem status failure')
beta_sol = np.concatenate([b.value for b in beta_var], axis=0)
beta_sol[np.abs(beta_sol) < self.tol] = 0
beta_sol_list.append(beta_sol)
Expand Down Expand Up @@ -486,11 +487,11 @@ def alasso(self, x, y, param):
except (ValueError, cvxpy.error.SolverError):
continue
if problem.status in ["infeasible", "unbounded"]:
logger.warning('Optimization problem status failure')
logging.warning('Optimization problem status failure')
beta_sol = beta_var.value
beta_sol[np.abs(beta_sol) < self.tol] = 0
beta_sol_list.append(beta_sol)
logger.debug('Function finished without errors')
logging.debug('Function finished without errors')
return beta_sol_list

def agl(self, x, y, group_index, param):
Expand Down Expand Up @@ -546,7 +547,7 @@ def agl(self, x, y, group_index, param):
except (ValueError, cvxpy.error.SolverError):
continue
if problem.status in ["infeasible", "unbounded"]:
logger.warning('Optimization problem status failure')
logging.warning('Optimization problem status failure')
beta_sol = np.concatenate([b.value for b in beta_var], axis=0)
beta_sol[np.abs(beta_sol) < self.tol] = 0
beta_sol_list.append(beta_sol)
Expand Down Expand Up @@ -611,7 +612,7 @@ def asgl(self, x, y, group_index, param):
except (ValueError, cvxpy.error.SolverError):
continue
if problem.status in ["infeasible", "unbounded"]:
logger.warning('Optimization problem status failure')
logging.warning('Optimization problem status failure')
beta_sol = np.concatenate([b.value for b in beta_var], axis=0)
beta_sol[np.abs(beta_sol) < self.tol] = 0
beta_sol_list.append(beta_sol)
Expand Down Expand Up @@ -695,7 +696,7 @@ def predict(self, x_new):
if self.intercept:
x_new = np.c_[np.ones(x_new.shape[0]), x_new]
if x_new.shape[1] != len(self.coef_[0]):
logger.error('Model dimension and new data dimension does not match')
logging.error('Model dimension and new data dimension does not match')
raise ValueError('Model dimension and new data dimension does not match')
# Store predictions in a list
prediction_list = []
Expand All @@ -717,7 +718,7 @@ def _num_parameters(self):
"""
# Run the input_checker to verify that the inputs have the correct format
if self.__input_checker() is False:
logger.error('incorrect input parameters')
logging.error('incorrect input parameters')
raise ValueError('incorrect input parameters')
if self.penalization is None:
# See meaning of each element in the "else" result statement.
Expand Down Expand Up @@ -755,7 +756,7 @@ def _retrieve_parameters_idx(self, param_index):
if param_index > n_models:
string = f'param_index should be smaller or equal than the number of models solved. n_models={n_models}, ' \
f'param_index={param_index}'
logger.error(string)
logging.error(string)
raise ValueError(string)
# If penalization is None, all parameters are set to None
if self.penalization is None:
Expand Down Expand Up @@ -822,7 +823,7 @@ def error_calculator(y_true, prediction_list, error_type="MSE", tau=None):
if error_type not in valid_error_types:
raise ValueError(f'invalid error type. Valid error types are {error_dict.keys()}')
if y_true.shape[0] != len(prediction_list[0]):
logger.error('Dimension of test data does not match dimension of prediction')
logging.error('Dimension of test data does not match dimension of prediction')
raise ValueError('Dimension of test data does not match dimension of prediction')
# For each prediction, store the error associated to that prediction in a list
error_list = []
Expand Down
15 changes: 9 additions & 6 deletions asgl/cv.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,13 @@
import logging
import sys

import numpy as np
from sklearn.model_selection import KFold

from . import asgl
from . import weights

logger = logging.getLogger(__name__)
logging.basicConfig(stream=sys.stdout, level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')


class CvGeneralClass(asgl.ASGL):
Expand Down Expand Up @@ -73,8 +74,9 @@ def __init__(self, model, penalization, intercept=True, tol=1e-5, lambda1=1, alp
"""
# CvGeneralClass
super().__init__(model, penalization, intercept, tol, lambda1, alpha, tau, lasso_weights, gl_weights, parallel,
num_cores, solver, max_iters, weight_technique, weight_tol, lasso_power_weight, gl_power_weight,
variability_pct, lambda1_weights, spca_alpha, spca_ridge_alpha, error_type, random_state)
num_cores, solver, max_iters, weight_technique, weight_tol, lasso_power_weight,
gl_power_weight, variability_pct, lambda1_weights, spca_alpha, spca_ridge_alpha, error_type,
random_state)
# Relative to cross validation / train validate / test
self.nfolds = nfolds

Expand Down Expand Up @@ -122,8 +124,9 @@ def __init__(self, model, penalization, intercept=True, tol=1e-5, lambda1=1, alp
random_state=None, train_pct=0.05, validate_pct=0.05, train_size=None, validate_size=None):

super().__init__(model, penalization, intercept, tol, lambda1, alpha, tau, lasso_weights, gl_weights, parallel,
num_cores, solver, max_iters, weight_technique, weight_tol, lasso_power_weight, gl_power_weight,
variability_pct, lambda1_weights, spca_alpha, spca_ridge_alpha, error_type, random_state)
num_cores, solver, max_iters, weight_technique, weight_tol, lasso_power_weight,
gl_power_weight, variability_pct, lambda1_weights, spca_alpha, spca_ridge_alpha, error_type,
random_state)
# Relative to / train validate / test
self.train_pct = train_pct
self.validate_pct = validate_pct
Expand Down Expand Up @@ -195,7 +198,7 @@ def train_test_split(nrows, train_size=None, train_pct=0.7, random_state=None):
train_size = int(round(nrows * train_pct))
# Check that nrows is larger than train_size
if nrows < train_size:
logger.error(f'Train size is too large. Input number of rows:{nrows}, current train_size: {train_size}')
logging.error(f'Train size is too large. Input number of rows:{nrows}, current train_size: {train_size}')
# List of 2 elements of size train_size, remaining_size (test)
split_index = np.split(data_index, [train_size])
train_idx, test_idx = [elt for elt in split_index]
Expand Down
7 changes: 4 additions & 3 deletions asgl/weights.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import logging
import sys

import numpy as np
from sklearn.cross_decomposition import PLSRegression
Expand All @@ -7,7 +8,7 @@

from .asgl import ASGL

logger = logging.getLogger(__name__)
logging.basicConfig(stream=sys.stdout, level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s')


class WEIGHTS:
Expand Down Expand Up @@ -200,6 +201,6 @@ def fit(self, x, y=None, group_index=None):
else:
lasso_weights = None
gl_weights = None
logger.error(f'Not a valid penalization for weight calculation. Valid penalizations '
f'are {self.valid_penalizations}')
logging.error(f'Not a valid penalization for weight calculation. Valid penalizations '
f'are {self.valid_penalizations}')
return lasso_weights, gl_weights
4 changes: 2 additions & 2 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,13 +9,13 @@

setup(
name='asgl',
version='v1.0.1',
version='v1.0.2',
author='Alvaro Mendez Civieta',
author_email='[email protected]',
license='GNU General Public License',
zip_safe=False,
url='https://github.com/alvaromc317/asgl',
dowload_url='https://github.com/alvaromc317/asgl/archive/v1.0.1.tar.gz',
dowload_url='https://github.com/alvaromc317/asgl/archive/v1.0.2.tar.gz',
description='A regression solver for linear and quantile regression models and lasso based penalizations',
long_description=long_description,
long_description_content_type='text/markdown',
Expand Down

0 comments on commit aaa21d4

Please sign in to comment.