Skip to content

Commit

Permalink
Add VNN Operations.
Browse files Browse the repository at this point in the history
  • Loading branch information
ameritusweb committed Mar 31, 2024
1 parent 341433a commit 4873db6
Show file tree
Hide file tree
Showing 15 changed files with 2,297 additions and 3 deletions.
24 changes: 24 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -199,6 +199,30 @@ DeepScaleAndShiftOperation

FlattenOperation

### Vector Neural Network (VNN) Operations
These types of operations typically operate on instances of the Matrix class where the left half are magnitudes and the right half are angles in radians.
Learn more about Vector Neural Networks [here](https://www.amazon.com/Vector-Neural-Networks-Geometric-Tensors-ebook/dp/B0CXBV3DY5/ref=sr_1_1).

ElementwiseSquareOperation

ElementwiseVectorAddOperation

ElementwiseVectorCartesianSummationOperation

ElementwiseVectorConstituentMultiplyOperation

ElementwiseVectorDecompositionOperation

ElementwiseVectorMiniDecompositionOperation

PairwiseSineSoftmaxOperation

VectorAttentionBinaryOperation

VectorAttentionOperation

VectorizeOperation

### Neural Network Parameters

Each neural network base class has a set of parameters that can be used to configure the neural network. They are as follows:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -95,6 +95,5 @@ public override BackwardResult Backward(Matrix dOutput)
.AddInputGradient(dProbabilities)
.Build();
}

}
}
Binary file not shown.
4 changes: 2 additions & 2 deletions src/ParallelReverseAutoDiff.nuspec
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
<package xmlns="http://schemas.microsoft.com/packaging/2013/05/nuspec.xsd">
<metadata>
<id>ParallelReverseAutoDiff</id>
<version>1.1.65</version>
<version>1.2.0</version>
<authors>ameritusweb</authors>
<owners>ameritusweb</owners>
<license type="expression">LGPL-2.1-only</license>
Expand All @@ -11,7 +11,7 @@
<requireLicenseAcceptance>false</requireLicenseAcceptance>
<description>A library for parallelized reverse mode automatic differentiation in C# for custom neural network development.</description>
<repository type="git" url="https://github.com/ameritusweb/ParallelReverseAutoDiff.git" commit="0a9bbd18f45c4f4434160a7c064539f29f3a3c67" />
<releaseNotes>Fix GPU Matrix Multiply.</releaseNotes>
<releaseNotes>Add VNN Operations.</releaseNotes>
<copyright>ameritusweb, 2024</copyright>
<tags>autodiff automatic-differentiation parallel reverse-mode differentiation C# neural network</tags>
<dependencies>
Expand Down
85 changes: 85 additions & 0 deletions src/RMAD/ElementwiseSquareOperation.cs
Original file line number Diff line number Diff line change
@@ -0,0 +1,85 @@
//------------------------------------------------------------------------------
// <copyright file="ElementwiseSquareOperation.cs" author="ameritusweb" date="5/2/2023">
// Copyright (c) 2023 ameritusweb All rights reserved.
// </copyright>
//------------------------------------------------------------------------------
namespace ParallelReverseAutoDiff.RMAD
{
using System;

/// <summary>
/// Performs the forward and backward operations for the element-wise square function.
/// </summary>
public class ElementwiseSquareOperation : Operation
{
private Matrix input;

/// <summary>
/// A common factory method for instantiating this operation.
/// </summary>
/// <param name="net">The neural network.</param>
/// <returns>The instantiated operation.</returns>
public static IOperation Instantiate(NeuralNetwork net)
{
return new ElementwiseSquareOperation();
}

/// <inheritdoc />
public override void Store(Guid id)
{
this.IntermediateMatrices.AddOrUpdate(id, this.input, (x, y) => this.input);
}

/// <inheritdoc />
public override void Restore(Guid id)
{
this.input = this.IntermediateMatrices[id];
}

/// <summary>
/// Performs the forward operation for the element-wise square function.
/// </summary>
/// <param name="input">The input to the element-wise square operation.</param>
/// <returns>The output of the element-wise square operation.</returns>
public Matrix Forward(Matrix input)
{
this.input = input;
int rows = input.Length;
int cols = input[0].Length;
this.Output = new Matrix(rows, cols);

for (int i = 0; i < rows; i++)
{
for (int j = 0; j < cols; j++)
{
double x = input[i][j];
this.Output[i][j] = x * x;
}
}

return this.Output;
}

/// <inheritdoc />
public override BackwardResult Backward(Matrix dLdOutput)
{
int rows = dLdOutput.Length;
int cols = dLdOutput[0].Length;
Matrix dLdInput = new Matrix(rows, cols);

for (int i = 0; i < rows; i++)
{
for (int j = 0; j < cols; j++)
{
double x = this.input[i][j];
double gradient = 2 * x;
dLdInput[i][j] = dLdOutput[i][j] * gradient;
}
}

return new BackwardResultBuilder()
.AddInputGradient(dLdInput)
.Build();
}
}
}
122 changes: 122 additions & 0 deletions src/RMAD/ElementwiseVectorAddOperation.cs
Original file line number Diff line number Diff line change
@@ -0,0 +1,122 @@
//------------------------------------------------------------------------------
// <copyright file="ElementwiseVectorAddOperation.cs" author="ameritusweb" date="5/2/2023">
// Copyright (c) 2023 ameritusweb All rights reserved.
// </copyright>
//------------------------------------------------------------------------------
namespace ParallelReverseAutoDiff.RMAD
{
using System;
using System.Threading.Tasks;

/// <summary>
/// Element-wise add operation.
/// </summary>
public class ElementwiseVectorAddOperation : Operation
{
private Matrix input1;
private Matrix input2;

/// <summary>
/// A common method for instantiating an operation.
/// </summary>
/// <param name="net">The neural network.</param>
/// <returns>The instantiated operation.</returns>
public static IOperation Instantiate(NeuralNetwork net)
{
return new ElementwiseVectorAddOperation();
}

/// <summary>
/// Performs the forward operation for the element-wise vector summation function.
/// </summary>
/// <param name="input1">The first input to the element-wise vector summation operation.</param>
/// <param name="input2">The second input to the element-wise vector summation operation.</param>
/// <returns>The output of the element-wise vector summation operation.</returns>
public Matrix Forward(Matrix input1, Matrix input2)
{
this.input1 = input1;
this.input2 = input2;

this.Output = new Matrix(this.input1.Rows, this.input1.Cols);
Parallel.For(0, input1.Rows, i =>
{
for (int j = 0; j < input1.Cols / 2; j++)
{
// Accessing the magnitudes and angles from the concatenated matrices
double magnitude = input1[i, j];
double angle = input1[i, j + (input1.Cols / 2)];
double wMagnitude = input2[i, j];
double wAngle = input2[i, j + (input2.Cols / 2)];
// Compute vector components
double x1 = magnitude * Math.Cos(angle);
double y1 = magnitude * Math.Sin(angle);
double x2 = wMagnitude * Math.Cos(wAngle);
double y2 = wMagnitude * Math.Sin(wAngle);
double sumx = x1 + x2;
double sumy = y1 + y2;
// Compute resultant vector magnitude and angle
double resultMagnitude = Math.Sqrt((sumx * sumx) + (sumy * sumy));
double resultAngle = Math.Atan2(sumy, sumx);
this.Output[i, j] = resultMagnitude;
this.Output[i, j + (this.input1.Cols / 2)] = resultAngle;
}
});

return this.Output;
}

/// <inheritdoc />
public override BackwardResult Backward(Matrix dOutput)
{
Matrix dInput1 = new Matrix(this.input1.Rows, this.input1.Cols);
Matrix dInput2 = new Matrix(this.input2.Rows, this.input2.Cols);

Parallel.For(0, this.input1.Rows, i =>
{
for (int j = 0; j < this.input1.Cols / 2; j++)
{
var magnitude = this.input1[i, j];
var angle = this.input1[i, j + (this.input1.Cols / 2)];
var wMagnitude = this.input2[i, j];
var wAngle = this.input2[i, j + (this.input2.Cols / 2)];
var x1 = magnitude * Math.Cos(angle);
var y1 = magnitude * Math.Sin(angle);
var x2 = wMagnitude * Math.Cos(wAngle);
var y2 = wMagnitude * Math.Sin(wAngle);
var combinedX = x1 + x2;
var combinedY = y1 + y2;
// Compute gradients for magnitude and angle
double dResultMagnitude_dX = combinedX / this.Output[i, j];
double dResultMagnitude_dY = combinedY / this.Output[i, j];
double dResultAngle_dX = -combinedY / ((combinedX * combinedX) + (combinedY * combinedY));
double dResultAngle_dY = combinedX / ((combinedX * combinedX) + (combinedY * combinedY));
// Chain rule to compute gradients for input vectors
dInput1[i, j] = (dOutput[i, j] * dResultMagnitude_dX * Math.Cos(angle)) +
(dOutput[i, j + (this.input1.Cols / 2)] * dResultAngle_dX * -Math.Sin(angle));
dInput1[i, j + (this.input1.Cols / 2)] = (dOutput[i, j] * dResultMagnitude_dY * Math.Sin(angle)) +
(dOutput[i, j + (this.input1.Cols / 2)] * dResultAngle_dY * Math.Cos(angle));
dInput2[i, j] = (dOutput[i, j] * dResultMagnitude_dX * Math.Cos(wAngle)) +
(dOutput[i, j + (this.input2.Cols / 2)] * dResultAngle_dX * -Math.Sin(wAngle));
dInput2[i, j + (this.input2.Cols / 2)] = (dOutput[i, j] * dResultMagnitude_dY * Math.Sin(wAngle)) +
(dOutput[i, j + (this.input2.Cols / 2)] * dResultAngle_dY * Math.Cos(wAngle));
}
});

return new BackwardResultBuilder()
.AddInputGradient(dInput1)
.AddInputGradient(dInput2)
.Build();
}
}
}
Loading

0 comments on commit 4873db6

Please sign in to comment.