Skip to content

Commit

Permalink
Fixed #22 Removed hardcoded logback logger, removed verbose flag from…
Browse files Browse the repository at this point in the history
… methods
  • Loading branch information
zaleslaw committed Jan 15, 2021
1 parent e0f8e35 commit 7dd6ff1
Show file tree
Hide file tree
Showing 35 changed files with 111 additions and 131 deletions.
4 changes: 2 additions & 2 deletions api/build.gradle
Original file line number Diff line number Diff line change
Expand Up @@ -7,10 +7,10 @@ dependencies {
implementation "org.jetbrains.kotlin:kotlin-stdlib-jdk8"
compile group: 'org.tensorflow', name: 'tensorflow', version: '1.15.0'
compile 'com.github.doyaaaaaken:kotlin-csv-jvm:0.7.3' // for csv parsing
compile 'io.github.microutils:kotlin-logging:1.7.9' // for logging
compile 'ch.qos.logback:logback-classic:1.2.3'
compile 'io.github.microutils:kotlin-logging:2.0.4' // for logging
compile 'io.jhdf:jhdf:0.5.7' // for hdf5 parsing
compile 'com.beust:klaxon:5.0.1'
testCompile 'ch.qos.logback:logback-classic:1.2.3'
testCompile 'org.junit.jupiter:junit-jupiter-api:5.5.2'
testCompile 'org.junit.jupiter:junit-jupiter-engine:5.5.2'
testCompile 'org.junit.jupiter:junit-jupiter-params:5.5.2'
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@

package org.jetbrains.kotlinx.dl.api.core

import ch.qos.logback.classic.Level
import mu.KLogger
import mu.KotlinLogging
import org.jetbrains.kotlinx.dl.api.core.callback.Callback
Expand Down Expand Up @@ -299,11 +298,9 @@ public class Sequential(input: Input, vararg layers: Layer) : TrainableModel() {
validationDataset: Dataset,
epochs: Int,
trainBatchSize: Int,
validationBatchSize: Int,
verbose: Boolean
validationBatchSize: Int
): TrainingHistory {
return internalFit(
verbose,
trainBatchSize,
epochs,
trainingDataset,
Expand All @@ -316,11 +313,9 @@ public class Sequential(input: Input, vararg layers: Layer) : TrainableModel() {
override fun fit(
dataset: Dataset,
epochs: Int,
batchSize: Int,
verbose: Boolean
batchSize: Int
): TrainingHistory {
return internalFit(
verbose,
batchSize,
epochs,
dataset,
Expand Down Expand Up @@ -350,7 +345,6 @@ public class Sequential(input: Input, vararg layers: Layer) : TrainableModel() {
}

private fun internalFit(
verbose: Boolean,
trainBatchSize: Int,
epochs: Int,
trainingDataset: Dataset,
Expand All @@ -368,11 +362,6 @@ public class Sequential(input: Input, vararg layers: Layer) : TrainableModel() {

val trainingHistory = TrainingHistory()

this.isDebugMode = verbose
if (!isDebugMode) {
logger.level = Level.INFO
}

val metricOp = metric.apply(tf, prediction, yOp, numberOfLossesOp)

if (!isOptimizerVariableInitialized) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,9 +26,6 @@ import java.io.FileNotFoundException
* Base abstract class for all trainable models.
*/
public abstract class TrainableModel : InferenceModel() {
/** Controls level of verbosity. */
protected var isDebugMode: Boolean = false

/** Optimization algorithm required for compiling a model, and its learning rate. */
protected var optimizer: Optimizer = SGD(0.2f)

Expand Down Expand Up @@ -149,7 +146,6 @@ public abstract class TrainableModel : InferenceModel() {
* @param [dataset] The train dataset that combines input data (X) and target data (Y).
* @param [epochs] Number of epochs to train the model. An epoch is an iteration over the entire x and y data provided.
* @param [batchSize] Number of samples per gradient update.
* @param [verbose] Verbosity mode. False = silent, True = one line per batch and epoch.
* True (default) = Weights are initialized at the beginning of the training phase.
* False = Weights are not initialized during training phase. It should be initialized before (via transfer learning or init() method call).
*
Expand All @@ -158,8 +154,7 @@ public abstract class TrainableModel : InferenceModel() {
public abstract fun fit(
dataset: Dataset,
epochs: Int = 5,
batchSize: Int = 32,
verbose: Boolean = true
batchSize: Int = 32
): TrainingHistory

/**
Expand All @@ -170,7 +165,6 @@ public abstract class TrainableModel : InferenceModel() {
* @param [epochs] Number of epochs to train the model. An epoch is an iteration over the entire x and y data provided.
* @param [trainBatchSize] Number of samples per gradient update.
* @param [validationBatchSize] Number of samples per validation batch.
* @param [verbose] Verbosity mode. False = silent, True = one line per batch and epoch.
* True (default) = optimizer variables are initialized at the beginning of the training phase.
* False = optimizer variables are not initialized during training phase. It should be initialized before (via transfer learning).
*
Expand All @@ -181,8 +175,7 @@ public abstract class TrainableModel : InferenceModel() {
validationDataset: Dataset,
epochs: Int = 5,
trainBatchSize: Int = 32,
validationBatchSize: Int = 256,
verbose: Boolean = true
validationBatchSize: Int = 256
): TrainingHistory

/**
Expand Down Expand Up @@ -296,16 +289,14 @@ public abstract class TrainableModel : InferenceModel() {
* @param [epochs] Number of epochs to train the model. An epoch is an iteration over the entire x and y data provided.
* @param [trainBatchSize] Number of samples per gradient update.
* @param [validationBatchSize] Number of samples per validation batch.
* @param [verbose] Verbosity mode. False = silent, True = one line per batch and epoch.
* @return A [TrainingHistory] object. It contains records with training/validation loss values and metrics per each batch and epoch.
*/
public fun fit(
dataset: Dataset,
validationRate: Double,
epochs: Int,
trainBatchSize: Int,
validationBatchSize: Int,
verbose: Boolean
validationBatchSize: Int
): TrainingHistory {
require(validationRate > 0.0 && validationRate < 1.0) {
"Validation rate should be more than 0.0 and less than 1.0. " +
Expand All @@ -318,8 +309,7 @@ public abstract class TrainableModel : InferenceModel() {
validation,
epochs,
trainBatchSize,
validationBatchSize,
verbose
validationBatchSize
)
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,6 @@

package org.jetbrains.kotlinx.dl.api.inference

import ch.qos.logback.classic.Level
import ch.qos.logback.classic.Logger
import mu.KotlinLogging
import org.jetbrains.kotlinx.dl.api.core.KGraph
import org.jetbrains.kotlinx.dl.api.core.shape.TensorShape
Expand Down Expand Up @@ -57,13 +55,6 @@ public open class InferenceModel : AutoCloseable {
/** Logger. */
private val logger = KotlinLogging.logger {}

/** Logging level. */
protected var mu.KLogger.level: Level
get() = (logger.underlyingLogger as Logger).level
set(value) {
(underlyingLogger as Logger).level = value
}

public companion object {
/**
* Loads tensorflow graphs and variable data (if required).
Expand Down
11 changes: 0 additions & 11 deletions api/src/main/resources/logback.xml

This file was deleted.

Original file line number Diff line number Diff line change
Expand Up @@ -140,8 +140,7 @@ class InferenceModelTest {
it.fit(
dataset = train,
epochs = EPOCHS,
batchSize = TRAINING_BATCH_SIZE,
verbose = true
batchSize = TRAINING_BATCH_SIZE
)

val accuracy = it.evaluate(dataset = test, batchSize = TEST_BATCH_SIZE).metrics[Metrics.ACCURACY]
Expand Down Expand Up @@ -261,8 +260,7 @@ class InferenceModelTest {
it.fit(
dataset = train,
epochs = EPOCHS,
batchSize = TRAINING_BATCH_SIZE,
verbose = true
batchSize = TRAINING_BATCH_SIZE
)

val accuracy = it.evaluate(dataset = test, batchSize = TEST_BATCH_SIZE).metrics[Metrics.ACCURACY]
Expand Down Expand Up @@ -314,8 +312,7 @@ class InferenceModelTest {
it.fit(
dataset = train,
epochs = EPOCHS,
batchSize = TRAINING_BATCH_SIZE,
verbose = true
batchSize = TRAINING_BATCH_SIZE
)

val accuracy = it.evaluate(dataset = test, batchSize = TEST_BATCH_SIZE).metrics[Metrics.ACCURACY]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -141,7 +141,7 @@ internal class SequentialBasicTest : IntegrationTest() {
it.compile(optimizer = Adam(), loss = Losses.SOFT_MAX_CROSS_ENTROPY_WITH_LOGITS, metric = Accuracy())

val trainingHistory =
it.fit(dataset = train, epochs = EPOCHS, batchSize = TRAINING_BATCH_SIZE, verbose = false)
it.fit(dataset = train, epochs = EPOCHS, batchSize = TRAINING_BATCH_SIZE)

assertEquals(trainingHistory.batchHistory.size, 60)
assertEquals(1, trainingHistory.batchHistory[0].epochIndex)
Expand Down Expand Up @@ -219,8 +219,7 @@ internal class SequentialBasicTest : IntegrationTest() {
validationDataset = validation,
epochs = EPOCHS,
trainBatchSize = TRAINING_BATCH_SIZE,
validationBatchSize = TEST_BATCH_SIZE,
verbose = true
validationBatchSize = TEST_BATCH_SIZE
)

assertEquals(57, trainingHistory.batchHistory.size)
Expand Down Expand Up @@ -255,8 +254,7 @@ internal class SequentialBasicTest : IntegrationTest() {
it.fit(
dataset = train,
epochs = EPOCHS,
batchSize = TRAINING_BATCH_SIZE,
verbose = false
batchSize = TRAINING_BATCH_SIZE
)
}
assertEquals(
Expand Down Expand Up @@ -503,7 +501,7 @@ internal class SequentialBasicTest : IntegrationTest() {
it.summary()

val trainingHistory =
it.fit(dataset = train, epochs = EPOCHS, batchSize = TRAINING_BATCH_SIZE, verbose = true)
it.fit(dataset = train, epochs = EPOCHS, batchSize = TRAINING_BATCH_SIZE)

assertEquals(trainingHistory.batchHistory.size, 60)
assertEquals(1, trainingHistory.batchHistory[0].epochIndex)
Expand Down Expand Up @@ -592,7 +590,7 @@ internal class SequentialBasicTest : IntegrationTest() {
it.summary()

val trainingHistory =
it.fit(dataset = train, epochs = EPOCHS, batchSize = TRAINING_BATCH_SIZE, verbose = true)
it.fit(dataset = train, epochs = EPOCHS, batchSize = TRAINING_BATCH_SIZE)

assertEquals(trainingHistory.batchHistory.size, 60)
assertEquals(1, trainingHistory.batchHistory[0].epochIndex)
Expand Down Expand Up @@ -682,8 +680,7 @@ internal class SequentialBasicTest : IntegrationTest() {
it.fit(
dataset = train,
epochs = EPOCHS,
batchSize = TRAINING_BATCH_SIZE,
verbose = false
batchSize = TRAINING_BATCH_SIZE
)
}
assertEquals(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -129,8 +129,7 @@ class SequentialInferenceTest {
it.fit(
dataset = train,
epochs = EPOCHS,
batchSize = TRAINING_BATCH_SIZE,
verbose = true
batchSize = TRAINING_BATCH_SIZE
)

val accuracy = it.evaluate(dataset = test, batchSize = TEST_BATCH_SIZE).metrics[Metrics.ACCURACY]
Expand Down Expand Up @@ -223,8 +222,7 @@ class SequentialInferenceTest {
it.fit(
dataset = train,
epochs = EPOCHS,
batchSize = TRAINING_BATCH_SIZE,
verbose = true
batchSize = TRAINING_BATCH_SIZE
)

val accuracy = it.evaluate(dataset = test, batchSize = TEST_BATCH_SIZE).metrics[Metrics.ACCURACY]
Expand Down Expand Up @@ -260,8 +258,7 @@ class SequentialInferenceTest {
validationRate = 0.1,
epochs = EPOCHS,
trainBatchSize = TRAINING_BATCH_SIZE,
validationBatchSize = TEST_BATCH_SIZE,
verbose = false
validationBatchSize = TEST_BATCH_SIZE
)
}
assertEquals(
Expand Down Expand Up @@ -376,8 +373,7 @@ class SequentialInferenceTest {
it.fit(
dataset = train,
epochs = EPOCHS,
batchSize = TRAINING_BATCH_SIZE,
verbose = true
batchSize = TRAINING_BATCH_SIZE
)

val accuracy = it.evaluate(dataset = test, batchSize = TEST_BATCH_SIZE).metrics[Metrics.ACCURACY]
Expand Down Expand Up @@ -420,8 +416,7 @@ class SequentialInferenceTest {
validationRate = 0.1,
epochs = EPOCHS,
trainBatchSize = TRAINING_BATCH_SIZE,
validationBatchSize = TEST_BATCH_SIZE,
verbose = false
validationBatchSize = TEST_BATCH_SIZE
)

val accuracyAfterTraining = it.evaluate(dataset = test, batchSize = 100).metrics[Metrics.ACCURACY]
Expand Down Expand Up @@ -584,8 +579,7 @@ class SequentialInferenceTest {
validationDataset = validation,
epochs = EPOCHS,
trainBatchSize = TRAINING_BATCH_SIZE,
validationBatchSize = TEST_BATCH_SIZE,
verbose = true
validationBatchSize = TEST_BATCH_SIZE
)

it.save(
Expand Down Expand Up @@ -623,8 +617,7 @@ class SequentialInferenceTest {
validationRate = 0.1,
epochs = 1,
trainBatchSize = 1000,
validationBatchSize = 100,
verbose = true
validationBatchSize = 100
)

val accuracyAfterTraining = it.evaluate(dataset = test, batchSize = 100).metrics[Metrics.ACCURACY]
Expand All @@ -649,8 +642,7 @@ class SequentialInferenceTest {
validationRate = 0.1,
epochs = 1,
trainBatchSize = 1000,
validationBatchSize = 100,
verbose = true
validationBatchSize = 100
)

val accuracyAfterTraining = it.evaluate(dataset = test, batchSize = 100).metrics[Metrics.ACCURACY]
Expand Down
Loading

0 comments on commit 7dd6ff1

Please sign in to comment.