Skip to content

Commit

Permalink
Merge pull request #14 from MengyangHe1/v3
Browse files Browse the repository at this point in the history
merge gitlab/akita/dnn to this repo
  • Loading branch information
syifan authored Sep 29, 2023
2 parents 5199aab + b3ef371 commit b851ee8
Show file tree
Hide file tree
Showing 112 changed files with 8,469 additions and 2,066 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/mgpusim_test.yml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
name: MGPUSim Test

on: push
on: [push, pull_request]

jobs:
compile:
Expand Down
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ You can run a simulation with the `--report-all` argument to enable all the perf
## How to Prepare Your Own Experiment

- Create a new repository repo. Typically we create one repo for each project, which may contain multiple experiments.
- Create a folder in your repo for each experiment. Run `go init [git repo path]/[directory_name]` to initialize the folder as a new go module. For example, if your git repository is hosted at `https://gitlab.com/syifan/fancy_project` and your experiment folder is named as `exp1`, your module path should be `gitlab.com/syifan/fancy_project/exp1`.
- Create a folder in your repo for each experiment. Run `go init [git repo path]/[directory_name]` to initialize the folder as a new go module. For example, if your git repository is hosted at `https://github.com/syifan/fancy_project` and your experiment folder is named as `exp1`, your module path should be `github.com/syifan/fancy_project/exp1`.
- Copy all the files under the directory `samples/experiment` to your experiment folder. In the `main.go` file, change the benchmark and the problem size to run. Or you can use an argument to select which benchmark to run. The file `runner.go`, `platform.go`, `r9nano.go`, and `shaderarray.go` serve as configuration files. So you need to change them according to your need.
- It is also possible to modify an existing component or adding a new component. You should copy the folder that includes the component you want to modify to your repo first. Then, modify the configuration scripts to link the system with your new component. You can try to add some print commands to see if your local component is used. Finally, you can start to modify the component code.

Expand Down
2 changes: 2 additions & 0 deletions benchmarks/dnn/cpu_only_examples/lenet/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
lenet
lenet.exe
86 changes: 86 additions & 0 deletions benchmarks/dnn/cpu_only_examples/lenet/main.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,86 @@
package main

import (
"flag"
"math"
"math/rand"

"github.com/sarchlab/mgpusim/v3/benchmarks/dnn/tensor"
"github.com/sarchlab/mgpusim/v3/benchmarks/dnn/training/optimization"

"github.com/sarchlab/mgpusim/v3/benchmarks/dnn/dataset/mnist"

"github.com/sarchlab/mgpusim/v3/benchmarks/dnn/layers"
"github.com/sarchlab/mgpusim/v3/benchmarks/dnn/training"
)

func main() {
flag.Parse()
rand.Seed(1)

to := &tensor.CPUOperator{}

network := defineNetwork(to)
trainer := training.Trainer{
TO: to,
DataSource: mnist.NewTrainingDataSource(to),
Network: network,
LossFunc: training.NewSoftmaxCrossEntropy(to),
// OptimizationAlg: optimization.NewSGD(to, 0.001),
//OptimizationAlg: optimization.NewMomentum(0.1, 0.9),
//OptimizationAlg: optimization.NewRMSProp(0.003),
OptimizationAlg: optimization.NewAdam(to, 0.01),
Tester: &training.Tester{
DataSource: mnist.NewTestDataSource(to),
Network: network,
BatchSize: math.MaxInt32,
},
Epoch: 1000,
BatchSize: 128,
ShowBatchInfo: true,
}

for _, l := range network.Layers {
l.Randomize()
}

trainer.Train()
}

func defineNetwork(to *tensor.CPUOperator) training.Network {
network := training.Network{
Layers: []layers.Layer{
layers.NewConv2D(
0,
to,
[]int{1, 28, 28},
[]int{6, 1, 5, 5},
[]int{1, 1},
[]int{2, 2}),
layers.NewReluLayer(to),
layers.NewAvgPoolingLayer(
to,
[]int{2, 2},
[]int{0, 0},
[]int{2, 2}),
layers.NewConv2D(
3,
to,
[]int{6, 14, 14},
[]int{16, 6, 5, 5},
[]int{1, 1},
[]int{0, 0}),
layers.NewReluLayer(to),
layers.NewAvgPoolingLayer(to,
[]int{2, 2},
[]int{0, 0},
[]int{2, 2}),
layers.NewFullyConnectedLayer(6, to, 400, 120),
layers.NewReluLayer(to),
layers.NewFullyConnectedLayer(8, to, 120, 84),
layers.NewReluLayer(to),
layers.NewFullyConnectedLayer(10, to, 84, 10),
},
}
return network
}
2 changes: 2 additions & 0 deletions benchmarks/dnn/cpu_only_examples/minerva/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
minerva
minerva.exe
57 changes: 57 additions & 0 deletions benchmarks/dnn/cpu_only_examples/minerva/main.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,57 @@
package main

import (
"flag"
"math"
"math/rand"

"github.com/sarchlab/mgpusim/v3/benchmarks/dnn/tensor"
"github.com/sarchlab/mgpusim/v3/benchmarks/dnn/training/optimization"

"github.com/sarchlab/mgpusim/v3/benchmarks/dnn/dataset/mnist"

"github.com/sarchlab/mgpusim/v3/benchmarks/dnn/layers"
"github.com/sarchlab/mgpusim/v3/benchmarks/dnn/training"
)

func main() {
flag.Parse()
rand.Seed(1)

to := &tensor.CPUOperator{}

network := training.Network{
Layers: []layers.Layer{
layers.NewFullyConnectedLayer(0, to, 784, 256),
layers.NewReluLayer(to),
layers.NewFullyConnectedLayer(2, to, 256, 100),
layers.NewReluLayer(to),
layers.NewFullyConnectedLayer(4, to, 100, 100),
layers.NewReluLayer(to),
layers.NewFullyConnectedLayer(6, to, 100, 10),
},
}
trainer := training.Trainer{
DataSource: mnist.NewTrainingDataSource(to),
Network: network,
LossFunc: training.NewSoftmaxCrossEntropy(to),
//OptimizationAlg: optimization.NewSGD(0.03),
//OptimizationAlg: optimization.NewMomentum(0.1, 0.9),
//OptimizationAlg: optimization.NewRMSProp(0.003),
OptimizationAlg: optimization.NewAdam(to, 0.001),
Tester: &training.Tester{
DataSource: mnist.NewTestDataSource(to),
Network: network,
BatchSize: math.MaxInt32,
},
Epoch: 1000,
BatchSize: 128,
ShowBatchInfo: true,
}

for _, l := range network.Layers {
l.Randomize()
}

trainer.Train()
}
2 changes: 2 additions & 0 deletions benchmarks/dnn/cpu_only_examples/vgg16/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
vgg16
vgg16.exe
92 changes: 92 additions & 0 deletions benchmarks/dnn/cpu_only_examples/vgg16/main.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,92 @@
package main

import (
"flag"
"math"
"math/rand"

"github.com/sarchlab/mgpusim/v3/benchmarks/dnn/tensor"
"github.com/sarchlab/mgpusim/v3/benchmarks/dnn/training/optimization"

"github.com/sarchlab/mgpusim/v3/benchmarks/dnn/dataset/imagenet"

"github.com/sarchlab/mgpusim/v3/benchmarks/dnn/layers"
"github.com/sarchlab/mgpusim/v3/benchmarks/dnn/training"
)

func main() {
flag.Parse()
rand.Seed(1)

to := &tensor.CPUOperator{}

network := getNetwork(to)
trainer := training.Trainer{
DataSource: imagenet.NewTrainingDataSource(to),
Network: network,
LossFunc: training.NewSoftmaxCrossEntropy(to),
OptimizationAlg: optimization.NewAdam(to, 0.001),
Tester: &training.Tester{
DataSource: imagenet.NewTestDataSource(to),
Network: network,
BatchSize: math.MaxInt32,
},
Epoch: 1000,
BatchSize: 128,
ShowBatchInfo: true,
}

for _, l := range network.Layers {
l.Randomize()
}

trainer.Train()
}

func getNetwork(to tensor.Operator) training.Network {
return training.Network{
Layers: []layers.Layer{
layers.NewConv2D(0, to, []int{3, 224, 224}, []int{64, 3, 3, 3}, []int{1, 1}, []int{1, 1}),
layers.NewReluLayer(to),
layers.NewConv2D(2, to, []int{64, 224, 224}, []int{64, 64, 3, 3}, []int{1, 1}, []int{1, 1}),
layers.NewReluLayer(to),
layers.NewMaxPoolingLayer(to, []int{2, 2}, []int{0, 0}, []int{2, 2}),

layers.NewConv2D(5, to, []int{64, 112, 112}, []int{128, 64, 3, 3}, []int{1, 1}, []int{1, 1}),
layers.NewReluLayer(to),
layers.NewConv2D(7, to, []int{128, 112, 112}, []int{128, 128, 3, 3}, []int{1, 1}, []int{1, 1}),
layers.NewReluLayer(to),
layers.NewConv2D(9, to, []int{128, 112, 112}, []int{128, 128, 3, 3}, []int{1, 1}, []int{1, 1}),
layers.NewReluLayer(to),
layers.NewMaxPoolingLayer(to, []int{2, 2}, []int{0, 0}, []int{2, 2}),

layers.NewConv2D(12, to, []int{128, 56, 56}, []int{256, 128, 3, 3}, []int{1, 1}, []int{1, 1}),
layers.NewReluLayer(to),
layers.NewConv2D(14, to, []int{256, 56, 56}, []int{256, 256, 3, 3}, []int{1, 1}, []int{1, 1}),
layers.NewReluLayer(to),
layers.NewConv2D(16, to, []int{256, 56, 56}, []int{256, 256, 3, 3}, []int{1, 1}, []int{1, 1}),
layers.NewReluLayer(to),
layers.NewMaxPoolingLayer(to, []int{2, 2}, []int{0, 0}, []int{2, 2}),

layers.NewConv2D(17, to, []int{256, 28, 28}, []int{512, 256, 3, 3}, []int{1, 1}, []int{1, 1}),
layers.NewReluLayer(to),
layers.NewConv2D(19, to, []int{512, 28, 28}, []int{512, 512, 3, 3}, []int{1, 1}, []int{1, 1}),
layers.NewReluLayer(to),
layers.NewConv2D(21, to, []int{512, 28, 28}, []int{512, 512, 3, 3}, []int{1, 1}, []int{1, 1}),
layers.NewReluLayer(to),
layers.NewMaxPoolingLayer(to, []int{2, 2}, []int{0, 0}, []int{2, 2}),

layers.NewConv2D(24, to, []int{512, 14, 14}, []int{512, 512, 3, 3}, []int{1, 1}, []int{1, 1}),
layers.NewReluLayer(to),
layers.NewConv2D(26, to, []int{512, 14, 14}, []int{512, 512, 3, 3}, []int{1, 1}, []int{1, 1}),
layers.NewReluLayer(to),
layers.NewConv2D(27, to, []int{512, 14, 14}, []int{512, 512, 3, 3}, []int{1, 1}, []int{1, 1}),
layers.NewReluLayer(to),
layers.NewMaxPoolingLayer(to, []int{2, 2}, []int{0, 0}, []int{2, 2}),

layers.NewFullyConnectedLayer(30, to, 7*7*512, 2*2*512),
layers.NewReluLayer(to),
layers.NewFullyConnectedLayer(32, to, 2*2*512, 200),
},
}
}
1 change: 1 addition & 0 deletions benchmarks/dnn/cpu_only_examples/xor/.gitignore
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
xor
59 changes: 59 additions & 0 deletions benchmarks/dnn/cpu_only_examples/xor/datasource.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,59 @@
package main

import (
"github.com/sarchlab/mgpusim/v3/benchmarks/dnn/tensor"
)

// DataSource generate XOR data.
type DataSource struct {
to tensor.Operator
allData []float64
allLabel []int
imageSize int
currPtr int
}

// NewDataSource creates a new XOR DataSource
func NewDataSource(to tensor.Operator) *DataSource {
ds := &DataSource{
to: to,
imageSize: 2,
}
ds.allData = []float64{
0, 0,
0, 1,
1, 0,
1, 1,
}
ds.allLabel = []int{
0, 1, 1, 0,
}
return ds
}

// NextBatch returns the next batch of data.
func (ds *DataSource) NextBatch(batchSize int) (
data tensor.Tensor,
label []int,
) {
start := ds.currPtr
end := start + batchSize

if end > len(ds.allLabel) {
end = len(ds.allLabel)
}

rawData := ds.allData[start*ds.imageSize : end*ds.imageSize]
data = ds.to.CreateWithData(rawData, []int{end - start, ds.imageSize}, "")

label = ds.allLabel[start:end]

ds.currPtr = end

return data, label
}

// Rewind sets the pointer back to the beginning point.
func (ds *DataSource) Rewind() {
ds.currPtr = 0
}
39 changes: 39 additions & 0 deletions benchmarks/dnn/cpu_only_examples/xor/xor.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
package main

import (
"math/rand"

"github.com/sarchlab/mgpusim/v3/benchmarks/dnn/tensor"
"github.com/sarchlab/mgpusim/v3/benchmarks/dnn/training/optimization"

"github.com/sarchlab/mgpusim/v3/benchmarks/dnn/layers"
"github.com/sarchlab/mgpusim/v3/benchmarks/dnn/training"
)

func main() {
rand.Seed(1)
to := tensor.CPUOperator{}

network := training.Network{
Layers: []layers.Layer{
layers.NewFullyConnectedLayer(0, to, 2, 4),
layers.NewReluLayer(to),
layers.NewFullyConnectedLayer(2, to, 4, 2),
},
}
trainer := training.Trainer{
DataSource: NewDataSource(to),
Network: network,
LossFunc: training.NewSoftmaxCrossEntropy(to),
OptimizationAlg: optimization.NewSGD(to, 0.03),
Epoch: 50,
BatchSize: 4,
ShowBatchInfo: true,
}

for _, l := range network.Layers {
l.Randomize()
}

trainer.Train()
}
Loading

0 comments on commit b851ee8

Please sign in to comment.