-
Notifications
You must be signed in to change notification settings - Fork 23
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
Merge pull request #14 from MengyangHe1/v3
merge gitlab/akita/dnn to this repo
- Loading branch information
Showing
112 changed files
with
8,469 additions
and
2,066 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,6 +1,6 @@ | ||
name: MGPUSim Test | ||
|
||
on: push | ||
on: [push, pull_request] | ||
|
||
jobs: | ||
compile: | ||
|
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,2 @@ | ||
lenet | ||
lenet.exe |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,86 @@ | ||
package main | ||
|
||
import ( | ||
"flag" | ||
"math" | ||
"math/rand" | ||
|
||
"github.com/sarchlab/mgpusim/v3/benchmarks/dnn/tensor" | ||
"github.com/sarchlab/mgpusim/v3/benchmarks/dnn/training/optimization" | ||
|
||
"github.com/sarchlab/mgpusim/v3/benchmarks/dnn/dataset/mnist" | ||
|
||
"github.com/sarchlab/mgpusim/v3/benchmarks/dnn/layers" | ||
"github.com/sarchlab/mgpusim/v3/benchmarks/dnn/training" | ||
) | ||
|
||
func main() { | ||
flag.Parse() | ||
rand.Seed(1) | ||
|
||
to := &tensor.CPUOperator{} | ||
|
||
network := defineNetwork(to) | ||
trainer := training.Trainer{ | ||
TO: to, | ||
DataSource: mnist.NewTrainingDataSource(to), | ||
Network: network, | ||
LossFunc: training.NewSoftmaxCrossEntropy(to), | ||
// OptimizationAlg: optimization.NewSGD(to, 0.001), | ||
//OptimizationAlg: optimization.NewMomentum(0.1, 0.9), | ||
//OptimizationAlg: optimization.NewRMSProp(0.003), | ||
OptimizationAlg: optimization.NewAdam(to, 0.01), | ||
Tester: &training.Tester{ | ||
DataSource: mnist.NewTestDataSource(to), | ||
Network: network, | ||
BatchSize: math.MaxInt32, | ||
}, | ||
Epoch: 1000, | ||
BatchSize: 128, | ||
ShowBatchInfo: true, | ||
} | ||
|
||
for _, l := range network.Layers { | ||
l.Randomize() | ||
} | ||
|
||
trainer.Train() | ||
} | ||
|
||
func defineNetwork(to *tensor.CPUOperator) training.Network { | ||
network := training.Network{ | ||
Layers: []layers.Layer{ | ||
layers.NewConv2D( | ||
0, | ||
to, | ||
[]int{1, 28, 28}, | ||
[]int{6, 1, 5, 5}, | ||
[]int{1, 1}, | ||
[]int{2, 2}), | ||
layers.NewReluLayer(to), | ||
layers.NewAvgPoolingLayer( | ||
to, | ||
[]int{2, 2}, | ||
[]int{0, 0}, | ||
[]int{2, 2}), | ||
layers.NewConv2D( | ||
3, | ||
to, | ||
[]int{6, 14, 14}, | ||
[]int{16, 6, 5, 5}, | ||
[]int{1, 1}, | ||
[]int{0, 0}), | ||
layers.NewReluLayer(to), | ||
layers.NewAvgPoolingLayer(to, | ||
[]int{2, 2}, | ||
[]int{0, 0}, | ||
[]int{2, 2}), | ||
layers.NewFullyConnectedLayer(6, to, 400, 120), | ||
layers.NewReluLayer(to), | ||
layers.NewFullyConnectedLayer(8, to, 120, 84), | ||
layers.NewReluLayer(to), | ||
layers.NewFullyConnectedLayer(10, to, 84, 10), | ||
}, | ||
} | ||
return network | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,2 @@ | ||
minerva | ||
minerva.exe |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,57 @@ | ||
package main | ||
|
||
import ( | ||
"flag" | ||
"math" | ||
"math/rand" | ||
|
||
"github.com/sarchlab/mgpusim/v3/benchmarks/dnn/tensor" | ||
"github.com/sarchlab/mgpusim/v3/benchmarks/dnn/training/optimization" | ||
|
||
"github.com/sarchlab/mgpusim/v3/benchmarks/dnn/dataset/mnist" | ||
|
||
"github.com/sarchlab/mgpusim/v3/benchmarks/dnn/layers" | ||
"github.com/sarchlab/mgpusim/v3/benchmarks/dnn/training" | ||
) | ||
|
||
func main() { | ||
flag.Parse() | ||
rand.Seed(1) | ||
|
||
to := &tensor.CPUOperator{} | ||
|
||
network := training.Network{ | ||
Layers: []layers.Layer{ | ||
layers.NewFullyConnectedLayer(0, to, 784, 256), | ||
layers.NewReluLayer(to), | ||
layers.NewFullyConnectedLayer(2, to, 256, 100), | ||
layers.NewReluLayer(to), | ||
layers.NewFullyConnectedLayer(4, to, 100, 100), | ||
layers.NewReluLayer(to), | ||
layers.NewFullyConnectedLayer(6, to, 100, 10), | ||
}, | ||
} | ||
trainer := training.Trainer{ | ||
DataSource: mnist.NewTrainingDataSource(to), | ||
Network: network, | ||
LossFunc: training.NewSoftmaxCrossEntropy(to), | ||
//OptimizationAlg: optimization.NewSGD(0.03), | ||
//OptimizationAlg: optimization.NewMomentum(0.1, 0.9), | ||
//OptimizationAlg: optimization.NewRMSProp(0.003), | ||
OptimizationAlg: optimization.NewAdam(to, 0.001), | ||
Tester: &training.Tester{ | ||
DataSource: mnist.NewTestDataSource(to), | ||
Network: network, | ||
BatchSize: math.MaxInt32, | ||
}, | ||
Epoch: 1000, | ||
BatchSize: 128, | ||
ShowBatchInfo: true, | ||
} | ||
|
||
for _, l := range network.Layers { | ||
l.Randomize() | ||
} | ||
|
||
trainer.Train() | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,2 @@ | ||
vgg16 | ||
vgg16.exe |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,92 @@ | ||
package main | ||
|
||
import ( | ||
"flag" | ||
"math" | ||
"math/rand" | ||
|
||
"github.com/sarchlab/mgpusim/v3/benchmarks/dnn/tensor" | ||
"github.com/sarchlab/mgpusim/v3/benchmarks/dnn/training/optimization" | ||
|
||
"github.com/sarchlab/mgpusim/v3/benchmarks/dnn/dataset/imagenet" | ||
|
||
"github.com/sarchlab/mgpusim/v3/benchmarks/dnn/layers" | ||
"github.com/sarchlab/mgpusim/v3/benchmarks/dnn/training" | ||
) | ||
|
||
func main() { | ||
flag.Parse() | ||
rand.Seed(1) | ||
|
||
to := &tensor.CPUOperator{} | ||
|
||
network := getNetwork(to) | ||
trainer := training.Trainer{ | ||
DataSource: imagenet.NewTrainingDataSource(to), | ||
Network: network, | ||
LossFunc: training.NewSoftmaxCrossEntropy(to), | ||
OptimizationAlg: optimization.NewAdam(to, 0.001), | ||
Tester: &training.Tester{ | ||
DataSource: imagenet.NewTestDataSource(to), | ||
Network: network, | ||
BatchSize: math.MaxInt32, | ||
}, | ||
Epoch: 1000, | ||
BatchSize: 128, | ||
ShowBatchInfo: true, | ||
} | ||
|
||
for _, l := range network.Layers { | ||
l.Randomize() | ||
} | ||
|
||
trainer.Train() | ||
} | ||
|
||
func getNetwork(to tensor.Operator) training.Network { | ||
return training.Network{ | ||
Layers: []layers.Layer{ | ||
layers.NewConv2D(0, to, []int{3, 224, 224}, []int{64, 3, 3, 3}, []int{1, 1}, []int{1, 1}), | ||
layers.NewReluLayer(to), | ||
layers.NewConv2D(2, to, []int{64, 224, 224}, []int{64, 64, 3, 3}, []int{1, 1}, []int{1, 1}), | ||
layers.NewReluLayer(to), | ||
layers.NewMaxPoolingLayer(to, []int{2, 2}, []int{0, 0}, []int{2, 2}), | ||
|
||
layers.NewConv2D(5, to, []int{64, 112, 112}, []int{128, 64, 3, 3}, []int{1, 1}, []int{1, 1}), | ||
layers.NewReluLayer(to), | ||
layers.NewConv2D(7, to, []int{128, 112, 112}, []int{128, 128, 3, 3}, []int{1, 1}, []int{1, 1}), | ||
layers.NewReluLayer(to), | ||
layers.NewConv2D(9, to, []int{128, 112, 112}, []int{128, 128, 3, 3}, []int{1, 1}, []int{1, 1}), | ||
layers.NewReluLayer(to), | ||
layers.NewMaxPoolingLayer(to, []int{2, 2}, []int{0, 0}, []int{2, 2}), | ||
|
||
layers.NewConv2D(12, to, []int{128, 56, 56}, []int{256, 128, 3, 3}, []int{1, 1}, []int{1, 1}), | ||
layers.NewReluLayer(to), | ||
layers.NewConv2D(14, to, []int{256, 56, 56}, []int{256, 256, 3, 3}, []int{1, 1}, []int{1, 1}), | ||
layers.NewReluLayer(to), | ||
layers.NewConv2D(16, to, []int{256, 56, 56}, []int{256, 256, 3, 3}, []int{1, 1}, []int{1, 1}), | ||
layers.NewReluLayer(to), | ||
layers.NewMaxPoolingLayer(to, []int{2, 2}, []int{0, 0}, []int{2, 2}), | ||
|
||
layers.NewConv2D(17, to, []int{256, 28, 28}, []int{512, 256, 3, 3}, []int{1, 1}, []int{1, 1}), | ||
layers.NewReluLayer(to), | ||
layers.NewConv2D(19, to, []int{512, 28, 28}, []int{512, 512, 3, 3}, []int{1, 1}, []int{1, 1}), | ||
layers.NewReluLayer(to), | ||
layers.NewConv2D(21, to, []int{512, 28, 28}, []int{512, 512, 3, 3}, []int{1, 1}, []int{1, 1}), | ||
layers.NewReluLayer(to), | ||
layers.NewMaxPoolingLayer(to, []int{2, 2}, []int{0, 0}, []int{2, 2}), | ||
|
||
layers.NewConv2D(24, to, []int{512, 14, 14}, []int{512, 512, 3, 3}, []int{1, 1}, []int{1, 1}), | ||
layers.NewReluLayer(to), | ||
layers.NewConv2D(26, to, []int{512, 14, 14}, []int{512, 512, 3, 3}, []int{1, 1}, []int{1, 1}), | ||
layers.NewReluLayer(to), | ||
layers.NewConv2D(27, to, []int{512, 14, 14}, []int{512, 512, 3, 3}, []int{1, 1}, []int{1, 1}), | ||
layers.NewReluLayer(to), | ||
layers.NewMaxPoolingLayer(to, []int{2, 2}, []int{0, 0}, []int{2, 2}), | ||
|
||
layers.NewFullyConnectedLayer(30, to, 7*7*512, 2*2*512), | ||
layers.NewReluLayer(to), | ||
layers.NewFullyConnectedLayer(32, to, 2*2*512, 200), | ||
}, | ||
} | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1 @@ | ||
xor |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,59 @@ | ||
package main | ||
|
||
import ( | ||
"github.com/sarchlab/mgpusim/v3/benchmarks/dnn/tensor" | ||
) | ||
|
||
// DataSource generate XOR data. | ||
type DataSource struct { | ||
to tensor.Operator | ||
allData []float64 | ||
allLabel []int | ||
imageSize int | ||
currPtr int | ||
} | ||
|
||
// NewDataSource creates a new XOR DataSource | ||
func NewDataSource(to tensor.Operator) *DataSource { | ||
ds := &DataSource{ | ||
to: to, | ||
imageSize: 2, | ||
} | ||
ds.allData = []float64{ | ||
0, 0, | ||
0, 1, | ||
1, 0, | ||
1, 1, | ||
} | ||
ds.allLabel = []int{ | ||
0, 1, 1, 0, | ||
} | ||
return ds | ||
} | ||
|
||
// NextBatch returns the next batch of data. | ||
func (ds *DataSource) NextBatch(batchSize int) ( | ||
data tensor.Tensor, | ||
label []int, | ||
) { | ||
start := ds.currPtr | ||
end := start + batchSize | ||
|
||
if end > len(ds.allLabel) { | ||
end = len(ds.allLabel) | ||
} | ||
|
||
rawData := ds.allData[start*ds.imageSize : end*ds.imageSize] | ||
data = ds.to.CreateWithData(rawData, []int{end - start, ds.imageSize}, "") | ||
|
||
label = ds.allLabel[start:end] | ||
|
||
ds.currPtr = end | ||
|
||
return data, label | ||
} | ||
|
||
// Rewind sets the pointer back to the beginning point. | ||
func (ds *DataSource) Rewind() { | ||
ds.currPtr = 0 | ||
} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,39 @@ | ||
package main | ||
|
||
import ( | ||
"math/rand" | ||
|
||
"github.com/sarchlab/mgpusim/v3/benchmarks/dnn/tensor" | ||
"github.com/sarchlab/mgpusim/v3/benchmarks/dnn/training/optimization" | ||
|
||
"github.com/sarchlab/mgpusim/v3/benchmarks/dnn/layers" | ||
"github.com/sarchlab/mgpusim/v3/benchmarks/dnn/training" | ||
) | ||
|
||
func main() { | ||
rand.Seed(1) | ||
to := tensor.CPUOperator{} | ||
|
||
network := training.Network{ | ||
Layers: []layers.Layer{ | ||
layers.NewFullyConnectedLayer(0, to, 2, 4), | ||
layers.NewReluLayer(to), | ||
layers.NewFullyConnectedLayer(2, to, 4, 2), | ||
}, | ||
} | ||
trainer := training.Trainer{ | ||
DataSource: NewDataSource(to), | ||
Network: network, | ||
LossFunc: training.NewSoftmaxCrossEntropy(to), | ||
OptimizationAlg: optimization.NewSGD(to, 0.03), | ||
Epoch: 50, | ||
BatchSize: 4, | ||
ShowBatchInfo: true, | ||
} | ||
|
||
for _, l := range network.Layers { | ||
l.Randomize() | ||
} | ||
|
||
trainer.Train() | ||
} |
Oops, something went wrong.