Skip to content

Commit

Permalink
javadoc updates
Browse files Browse the repository at this point in the history
  • Loading branch information
maniospas committed Aug 26, 2024
1 parent 0404df0 commit b6bd5fc
Show file tree
Hide file tree
Showing 412 changed files with 23,657 additions and 3,830 deletions.
2 changes: 1 addition & 1 deletion JGNN/src/examples/nodeClassification/GCN.java
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@
import mklab.JGNN.nn.Model;
import mklab.JGNN.core.Slice;
import mklab.JGNN.core.Tensor;
import mklab.JGNN.core.empy.EmptyTensor;
import mklab.JGNN.core.empty.EmptyTensor;
import mklab.JGNN.nn.initializers.XavierNormal;
import mklab.JGNN.nn.loss.Accuracy;
import mklab.JGNN.nn.loss.CategoricalCrossEntropy;
Expand Down
2 changes: 1 addition & 1 deletion JGNN/src/examples/nodeClassification/Scripting.java
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
import mklab.JGNN.nn.Model;
import mklab.JGNN.core.Slice;
import mklab.JGNN.core.Tensor;
import mklab.JGNN.core.empy.EmptyTensor;
import mklab.JGNN.core.empty.EmptyTensor;
import mklab.JGNN.nn.initializers.XavierNormal;
import mklab.JGNN.nn.loss.Accuracy;
import mklab.JGNN.nn.loss.CategoricalCrossEntropy;
Expand Down
2 changes: 1 addition & 1 deletion JGNN/src/examples/tutorial/Quickstart.java
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
import mklab.JGNN.nn.Model;
import mklab.JGNN.core.Slice;
import mklab.JGNN.core.Tensor;
import mklab.JGNN.core.empy.EmptyTensor;
import mklab.JGNN.core.empty.EmptyTensor;
import mklab.JGNN.nn.initializers.XavierNormal;
import mklab.JGNN.nn.loss.Accuracy;
import mklab.JGNN.nn.loss.CategoricalCrossEntropy;
Expand Down
47 changes: 37 additions & 10 deletions JGNN/src/main/java/mklab/JGNN/adhoc/ModelTraining.java
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@ public ModelTraining() {

/**
* @param verbose Whether an error message will be printed.
* @return The model training instance.
* @deprecated This method was available in earlier JGNN versions but will be
* gradually phased out. Instead, wrap the validation loss within
* {@link mklab.JGNN.nn.loss.report.VerboseLoss} to replicate the
Expand All @@ -54,16 +55,37 @@ public ModelTraining setVerbose(boolean verbose) {
}

/**
* Set
* Sets which {@link mklab.JGNN.nn.Loss} should be applied on training batches
* (the loss is averaged across batches, but is aggregated as a sum within each
* batch by {@link BatchOptimizer}). Model training mainly uses the loss's
* {@link mklab.JGNN.nn.Loss#derivative(Tensor, Tensor)} method, alongside
* {@link mklab.JGNN.nn.Loss#onEndEpoch()} and
* {@link mklab.JGNN.nn.Loss#onEndTraining()}. If no validation loss is set, in
* which case the training loss is also used for validation.
*
* @param loss
* @return
* @param loss The loss's instance.
* @return The model training instance.
* @see #setValidationLoss(Loss)
*/
public ModelTraining setLoss(Loss loss) {
this.loss = loss;
return this;
}

/**
* Sets which {@link mklab.JGNN.nn.Loss} should be applied on validation data on
* each epoch. The loss's {@link mklab.JGNN.nn.Loss#onEndEpoch()},
* {@link mklab.JGNN.nn.Loss#onEndTraining()}, and
* {@link mklab.JGNN.nn.Loss#evaluate(Tensor, Tensor)} methods are used. In the
* case where validation is split into multiple instances of batch data, which
* may be necessary for complex scenarios like graph classification, the loss
* value is averaged across those batches. The methods mentioned above are not
* used by losses employed in training.
*
* @param loss The loss's instance.
* @return The model training instance.
* @see #setLoss(Loss)
*/
public ModelTraining setValidationLoss(Loss loss) {
this.validationLoss = loss;
return this;
Expand All @@ -73,7 +95,10 @@ public ModelTraining setValidationLoss(Loss loss) {
* Sets an {@link Optimizer} instance to controls parameter updates during
* training. If the provided optimizer is not an instance of
* {@link BatchOptimizer}, it is forcefully wrapped by the latter. Training
* calls the batch optimizer's update method after every batch.
* calls the batch optimizer's update method after every batch. Each batch could
* contain multiple instances of batch data. However, the total number of
* applied gradient updates is always equal to the value set by
* {@link #setNumBatches(int)}.
*
* @param optimizer The desired optimizer.
* @return <code>this</code> model training instance.
Expand Down Expand Up @@ -101,10 +126,9 @@ public ModelTraining setNumBatches(int numBatches) {

/**
* Sets whether the training strategy should reflect stochastic gradient descent
* by randomly sampling from the training dataset to obtain data samples. If
* <code>true</code>, both this feature and acceptable thread-based
* paralellization is enabled. Parallelization makes use of JGNN's
* {@link ThreadPool}.
* by randomly sampling from the training data samples. If <code>true</code>,
* both this feature and acceptable thread-based paralellization is enabled.
* Parallelization uses JGNN's {@link ThreadPool}.
*
* @param paralellization A boolean value indicating whether this feature is
* enabled.
Expand Down Expand Up @@ -151,7 +175,8 @@ public ModelTraining setPatience(int patience) {
* This is a leftover method from an earlier version of JGNN's interface. For
* the time being, there is no good alternative, but it will be phased out.
*
* @deprecated This method's full implementation has been moved to {@link #train(Model)}
* @deprecated This method's full implementation has been moved to
* {@link #train(Model)}
*/
public Model train(Model model, Matrix features, Matrix labels, Slice trainingSamples, Slice validationSamples) {
throw new RuntimeException(
Expand Down Expand Up @@ -234,7 +259,7 @@ public Model train(Model model) {
Runnable batchCode = new Runnable() {
@Override
public void run() {
for (BatchData batchData : getBatchData(batchId, epochId))
for (BatchData batchData : getBatchData(batchId, epochId))
model.train(loss, optimizer, batchData.getInputs(), batchData.getOutputs());
if (stochasticGradientDescent)
optimizer.updateAll();
Expand Down Expand Up @@ -271,13 +296,15 @@ public void run() {

if (verbose)
System.out.println("Epoch " + epoch + " with loss " + totalLoss);
loss.onEndEpoch();
validLoss.onEndEpoch();
currentPatience -= 1;
if (currentPatience == 0)
break;
}
for (Parameter parameter : model.getParameters())
parameter.set(minLossParameters.get(parameter));
loss.onEndTraining();
validLoss.onEndTraining();
onEndTraining();
return model;
Expand Down
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
/**
* Contains datasets for out-of-the-box experimentation. When run
* for the first time, the datasets also download their data in a
* <i>downloads/<i> directory in the running path.
* Contains datasets for out-of-the-box experimentation. When run for the first
* time, the datasets also download their data in a <i>downloads&#47;</i>
* directory in the running path.
*
* @author Emmanouil Krasanakis
*/
Expand Down
11 changes: 6 additions & 5 deletions JGNN/src/main/java/mklab/JGNN/adhoc/train/AGFTraining.java
Original file line number Diff line number Diff line change
Expand Up @@ -12,11 +12,12 @@
import mklab.JGNN.core.util.Range;

/**
* Extends the {@link ModelTraining} class to be able to train {@link Model}
* instances for attributed graph functions (AGFs). Training needs to account
* for a list of graphs, corresponding graph node features, and corresponding
* graph labels. Each label holds the one-hot encoding of each graph. Fill data
* with the method {@link #addGraph(Matrix, Matrix, Tensor)}.
* Extends the {@link ModelTraining} class to be able to train
* {@link mklab.JGNN.nn.Model} instances for attributed graph functions (AGFs).
* Training needs to account for a list of graphs, corresponding graph node
* features, and corresponding graph labels. Each label holds the one-hot
* encoding of each graph. Fill data with the method
* {@link #addGraph(Matrix, Matrix, Tensor)}.
*
* @author Emmanouil Krasanakis
*/
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -10,13 +10,13 @@
import mklab.JGNN.core.matrix.WrapRows;

/**
* Extends the {@link ModelTraining} class to train {@link Model} instances from
* feature and label matrices. This is a generic classification scheme that also
* supports (and follows the training data flow of) traditional neural networks
* that can produce batch predictions. If the model is a GNN, it is assumed that
* it contains the graph adjacency matrix and the node features as constants,
* and its input are node identifiers. This scheme is automated under
* {@link mklab.JGNN.adhoc.parsers.FastBuilder#classify()}. In this case,
* Extends the {@link ModelTraining} class to train {@link mklab.JGNN.nn.Model}
* instances from feature and label matrices. This is a generic classification
* scheme that also supports (and follows the training data flow of) traditional
* neural networks that can produce batch predictions. If the model is a GNN, it
* is assumed that it contains the graph adjacency matrix and the node features
* as constants, and its input are node identifiers. This scheme is automated
* under {@link mklab.JGNN.adhoc.parsers.FastBuilder#classify()}. In this case,
* classification features should be an identity vertical matrix of node
* identifiers; in the simplest case a vertical matrix organization of
* [0,1,2,3,4,...]. Labels should be a matrix with predictions for corresponding
Expand Down
18 changes: 11 additions & 7 deletions JGNN/src/main/java/mklab/JGNN/core/Tensor.java
Original file line number Diff line number Diff line change
Expand Up @@ -113,8 +113,8 @@ public void assertFinite() {

/**
* If supported by the subclassed tensor, invalidates calls to
* {@link #release()} so that memory is a de-allocated only when object
* references expire.
* {@link #release()} so that memory is de-allocated only when object references
* expire.
*
* @see #release()
* @deprecated This method may not be present in future versions of the library,
Expand All @@ -126,11 +126,12 @@ public void assertFinite() {
* Assign a value to a tensor element. All tensor operations use this function
* to wrap element assignments.
*
* @param pos The position of the tensor element
* @param value The value to assign
* @throws RuntimeException If the value is NaN or the element position is less
* than 0 or greater than {@link #size()}-1.
* @param pos The position of the tensor element.
* @param value The value to assign.
* @throws RuntimeException If the element position is less than 0 or greater
* than {@link #size()}-1.
* @return <code>this</code> Tensor instance.
* @see #put(int, double)
*/
public abstract Tensor put(long pos, double value);

Expand Down Expand Up @@ -223,6 +224,9 @@ public Tensor zeroCopy() {
/**
* Creates a tensor of the same class and all elements set to zero, but size and
* dimension names are obtained from a prototype tensor.
*
* @param prototype The tensor whose size and dimension name is used.
* @return A tensor with the same size as the prototype.
*/
public Tensor zeroCopy(Tensor prototype) {
return zeroCopy(prototype.size()).setDimensionName(prototype.getDimensionName());
Expand All @@ -238,7 +242,7 @@ public Tensor zeroCopy(Tensor prototype) {
* @return <code>this</code> Tensor instance.
*/
public Tensor setDimensionName(Tensor other) {
//assertMatching(other);
// assertMatching(other);
if (other.getDimensionName() != null)
dimensionName = other.getDimensionName();
return this;
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
package mklab.JGNN.core.empy;
package mklab.JGNN.core.empty;

import java.util.ArrayList;
import java.util.Iterator;
Expand All @@ -7,7 +7,25 @@
import mklab.JGNN.core.Matrix;
import mklab.JGNN.core.Tensor;

/**
* A {@link Matrix} without data that contains only the correct dimension names
* and sizes. All its data are considered zero. Empty data types try to
* pervasively fill all operation outcomes in which it is involved. The intent
* is to use them during
* {@link mklab.JGNN.adhoc.ModelBuilder#autosize(java.util.List)} to make it
* lightweight.
*
* @author Emmanouil Krasanakis
* @see EmptyTensor
*/
public class EmptyMatrix extends Matrix {
/**
* Initializes an {@link EmptyMatrix} of given dimensions. It does not allocate
* memory for data.
*
* @param rows The number of matrix rows.
* @param cols The number of matrix columns.
*/
public EmptyMatrix(long rows, long cols) {
super(rows, cols);
}
Expand Down
Original file line number Diff line number Diff line change
@@ -1,26 +1,51 @@
package mklab.JGNN.core.empy;
package mklab.JGNN.core.empty;

import java.util.ArrayList;
import java.util.Iterator;

import mklab.JGNN.core.Tensor;

/**
* A {@link Tensor} without data that contains only the correct dimension names
* and sizes. All its data are considered zero. Empty data types try to
* pervasively fill all operation outcomes in which it is involved. The intent
* is to use them during
* {@link mklab.JGNN.adhoc.ModelBuilder#autosize(java.util.List)} to make it
* lightweight.
*
* @author Emmanouil Krasanakis
* @see EmptyMatrix
*/
public class EmptyTensor extends Tensor {
/**
* Initializes an {@link EmptyTensor} of zero size.
*/
public EmptyTensor() {
super(0);
}

/**
* Initializes an {@link EmptyTensor} of the given size. It does not allocate
* memory for data.
*
* @param size The tensor size.
*/
public EmptyTensor(long size) {
super(size);
}

@Override
protected void allocate(long size) {
}

@Override
public void release() {
}

@Override
public void persist() {
}

@Override
public Tensor put(long pos, double value) {
return this;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,4 +8,4 @@
*
* @author Emmanouil Krasanakis
*/
package mklab.JGNN.core.empy;
package mklab.JGNN.core.empty;
Loading

0 comments on commit b6bd5fc

Please sign in to comment.