Skip to content

Commit

Permalink
Fix warnings with new compiler options
Browse files Browse the repository at this point in the history
Enable CI on windows and macos
  • Loading branch information
sbrunk committed Feb 5, 2024
1 parent 71751da commit 11eb92c
Show file tree
Hide file tree
Showing 40 changed files with 158 additions and 208 deletions.
35 changes: 30 additions & 5 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -27,12 +27,17 @@ jobs:
name: Build and Test
strategy:
matrix:
os: [ubuntu-latest]
os: [macos-latest, ubuntu-latest, windows-latest]
scala: [3]
java: [temurin@11]
runs-on: ${{ matrix.os }}
timeout-minutes: 60
steps:
- name: Ignore line ending differences in git
if: contains(runner.os, 'windows')
shell: bash
run: git config --global core.autocrlf false

- name: Checkout current branch (full)
uses: actions/checkout@v4
with:
Expand All @@ -49,32 +54,40 @@ jobs:

- name: sbt update
if: matrix.java == 'temurin@11' && steps.setup-java-temurin-11.outputs.cache-hit == 'false'
shell: bash
run: sbt +update

- name: Check that workflows are up to date
shell: bash
run: sbt githubWorkflowCheck

- name: Check headers and formatting
if: matrix.java == 'temurin@11' && matrix.os == 'ubuntu-latest'
if: matrix.java == 'temurin@11' && matrix.os == 'macos-latest'
shell: bash
run: sbt '++ ${{ matrix.scala }}' headerCheckAll scalafmtCheckAll 'project /' scalafmtSbtCheck

- name: Test
shell: bash
run: sbt '++ ${{ matrix.scala }}' test

- name: Check binary compatibility
if: matrix.java == 'temurin@11' && matrix.os == 'ubuntu-latest'
if: matrix.java == 'temurin@11' && matrix.os == 'macos-latest'
shell: bash
run: sbt '++ ${{ matrix.scala }}' mimaReportBinaryIssues

- name: Generate API documentation
if: matrix.java == 'temurin@11' && matrix.os == 'ubuntu-latest'
if: matrix.java == 'temurin@11' && matrix.os == 'macos-latest'
shell: bash
run: sbt '++ ${{ matrix.scala }}' doc

- name: Make target directories
if: github.event_name != 'pull_request' && (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main')
shell: bash
run: mkdir -p vision/target core/target project/target

- name: Compress target directories
if: github.event_name != 'pull_request' && (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main')
shell: bash
run: tar cf targets.tar vision/target core/target project/target

- name: Upload target directories
Expand All @@ -90,10 +103,14 @@ jobs:
if: github.event_name != 'pull_request' && (startsWith(github.ref, 'refs/tags/v') || github.ref == 'refs/heads/main')
strategy:
matrix:
os: [ubuntu-latest]
os: [macos-latest]
java: [temurin@11]
runs-on: ${{ matrix.os }}
steps:
- name: Ignore line ending differences in git
if: contains(runner.os, 'windows')
run: git config --global core.autocrlf false

- name: Checkout current branch (full)
uses: actions/checkout@v4
with:
Expand Down Expand Up @@ -155,6 +172,10 @@ jobs:
java: [temurin@11]
runs-on: ${{ matrix.os }}
steps:
- name: Ignore line ending differences in git
if: contains(runner.os, 'windows')
run: git config --global core.autocrlf false

- name: Checkout current branch (full)
uses: actions/checkout@v4
with:
Expand Down Expand Up @@ -187,6 +208,10 @@ jobs:
java: [temurin@11]
runs-on: ${{ matrix.os }}
steps:
- name: Ignore line ending differences in git
if: contains(runner.os, 'windows')
run: git config --global core.autocrlf false

- name: Checkout current branch (full)
uses: actions/checkout@v4
with:
Expand Down
10 changes: 8 additions & 2 deletions build.sbt
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@ ThisBuild / javaCppVersion := "1.5.10"
ThisBuild / resolvers ++= Resolver.sonatypeOssRepos("snapshots")

ThisBuild / githubWorkflowJavaVersions := Seq(JavaSpec.temurin("11"))
ThisBuild / githubWorkflowOSes := Seq("macos-latest", "ubuntu-latest", "windows-latest")

val enableGPU = settingKey[Boolean]("enable or disable GPU support")

Expand All @@ -46,10 +47,11 @@ val hasMKL = {
lazy val commonSettings = Seq(
Compile / doc / scalacOptions ++= Seq("-groups", "-snippet-compiler:compile"),
javaCppVersion := (ThisBuild / javaCppVersion).value,
javaCppPlatform := Seq()
javaCppPlatform := Seq(),
// This is a hack to avoid depending on the native libs when publishing
// but conveniently have them on the classpath during development.
// There's probably a cleaner way to do this.
tlJdkRelease := Some(11)
) ++ tlReplaceCommandAlias(
"tlReleaseLocal",
List(
Expand Down Expand Up @@ -111,7 +113,11 @@ lazy val vision = project
lazy val examples = project
.in(file("examples"))
.enablePlugins(NoPublishPlugin)
.settings(commonSettings)
.settings(
commonSettings,
// disable discarded non-Unit value warnings in examples for now
scalacOptions ~= (_.filterNot(Set("-Wvalue-discard")))
)
.settings(
fork := true,
libraryDependencies ++= Seq(
Expand Down
17 changes: 2 additions & 15 deletions core/src/main/scala/torch/DType.scala
Original file line number Diff line number Diff line change
Expand Up @@ -16,22 +16,9 @@

package torch

import org.bytedeco.javacpp.{DoublePointer, FloatPointer}
import org.bytedeco.pytorch.global.torch.ScalarType
import org.bytedeco.pytorch.Scalar

import java.nio.{
Buffer,
ByteBuffer,
CharBuffer,
DoubleBuffer,
FloatBuffer,
IntBuffer,
LongBuffer,
ShortBuffer
}
import scala.annotation.{targetName, unused}
import scala.reflect.ClassTag

import java.nio.{Buffer, ByteBuffer, DoubleBuffer, FloatBuffer, IntBuffer, LongBuffer, ShortBuffer}
import spire.math.{Complex, UByte}

import scala.compiletime.{erasedValue, summonFrom}
Expand Down
39 changes: 11 additions & 28 deletions core/src/main/scala/torch/Tensor.scala
Original file line number Diff line number Diff line change
Expand Up @@ -25,43 +25,22 @@ import org.bytedeco.javacpp.{
LongPointer,
ShortPointer
}
import org.bytedeco.javacpp.indexer.{Indexer, IntIndexer, LongIndexer}
import org.bytedeco.pytorch
import org.bytedeco.pytorch.{LongOptional, Scalar, TensorIndexArrayRef}
import org.bytedeco.pytorch.TensorIndexArrayRef
import org.bytedeco.pytorch.global.torch as torchNative
import Tensor.*
import org.bytedeco.pytorch.global.torch.ScalarType
import org.bytedeco.pytorch.NoGradGuard

import java.nio.{
Buffer,
ByteBuffer,
CharBuffer,
DoubleBuffer,
FloatBuffer,
IntBuffer,
LongBuffer,
ShortBuffer
}

import java.nio.{Buffer, ByteBuffer, DoubleBuffer, FloatBuffer, IntBuffer, LongBuffer, ShortBuffer}
import scala.collection.immutable.ArraySeq
import scala.reflect.ClassTag
import scala.annotation.{targetName, unused}
import org.bytedeco.pytorch.global.torch.DeviceType
import internal.NativeConverters.{toOptional, toScalar}
import spire.math.{Complex, UByte}

import scala.reflect.Typeable
import internal.NativeConverters
import internal.NativeConverters.toArray
import Device.CPU
import Layout.Strided
import org.bytedeco.pytorch.ByteArrayRef
import org.bytedeco.pytorch.ShortArrayRef
import org.bytedeco.pytorch.BoolArrayRef
import org.bytedeco.pytorch.IntArrayRef
import org.bytedeco.pytorch.LongArrayRef
import org.bytedeco.pytorch.FloatArrayRef
import org.bytedeco.pytorch.DoubleArrayRef
import org.bytedeco.pytorch.EllipsisIndexType
import org.bytedeco.pytorch.SymInt
import org.bytedeco.pytorch.SymIntOptional
Expand Down Expand Up @@ -675,7 +654,9 @@ sealed abstract class Tensor[D <: DType]( /* private[torch] */ val native: pyto
*/
def unsqueeze(dim: Int): Tensor[D] = fromNative(native.unsqueeze(dim))

def zero(): Unit = native.zero_()
def zero_(): this.type =
native.zero_()
this

private def nativeIndices[T <: Boolean | Long: ClassTag](
indices: (Slice | Int | Long | Tensor[Bool] | Tensor[UInt8] | Tensor[Int64] | Seq[T] |
Expand Down Expand Up @@ -731,7 +712,9 @@ sealed abstract class Tensor[D <: DType]( /* private[torch] */ val native: pyto

def requiresGrad: Boolean = native.requires_grad()

def requiresGrad_=(requiresGrad: Boolean): Unit = native.requires_grad_(requiresGrad)
def requiresGrad_=(requiresGrad: Boolean): this.type =
native.requires_grad_(requiresGrad)
this

def split(
splitSize: Int | Seq[Int],
Expand Down Expand Up @@ -807,7 +790,8 @@ sealed abstract class Tensor[D <: DType]( /* private[torch] */ val native: pyto
get: (Array[A], TypedBuffer[A]) => TypedBuffer[A]
): Array[A] =
val a = new Array[A](numel.toInt)
if numel > 0 then get(a, tensor.native.contiguous.createBuffer[TypedBuffer[A]])
if numel > 0 then
val _ = get(a, tensor.native.contiguous.createBuffer[TypedBuffer[A]])
a

import ScalarType.*
Expand Down Expand Up @@ -867,7 +851,6 @@ sealed abstract class Tensor[D <: DType]( /* private[torch] */ val native: pyto
flattened: Boolean = false,
includeInfo: Boolean = true
): String =
if dtype == int32 then max()
def format(x: Any): String =
x match
case x: Float => "%1.4f".format(x)
Expand Down
2 changes: 1 addition & 1 deletion core/src/main/scala/torch/hub.scala
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,6 @@
package torch

import dev.dirs.BaseDirectories
import scala.io.Source
import scala.util.Using
import java.nio.file.Files
import java.net.URL
Expand All @@ -36,5 +35,6 @@ object hub:
System.err.println(s"Downloading: $url to $cachedFile")
Using.resource(URL(url).openStream()) { inputStream =>
Files.copy(inputStream, cachedFile.toNIO)
()
}
torch.pickleLoad(cachedFile.toNIO)
3 changes: 0 additions & 3 deletions core/src/main/scala/torch/internal/NativeConverters.scala
Original file line number Diff line number Diff line change
Expand Up @@ -24,15 +24,12 @@ import org.bytedeco.pytorch.{
DeviceOptional,
DoubleOptional,
BoolOptional,
LongArrayRefOptional,
LongOptional,
TensorOptional
}

import scala.reflect.Typeable
import org.bytedeco.javacpp.{LongPointer, DoublePointer}
import org.bytedeco.pytorch.GenericDict
import org.bytedeco.pytorch.GenericDictIterator
import spire.math.Complex
import spire.math.UByte
import scala.annotation.targetName
Expand Down
5 changes: 2 additions & 3 deletions core/src/main/scala/torch/nn/functional/Activations.scala
Original file line number Diff line number Diff line change
Expand Up @@ -21,9 +21,8 @@ package functional
import Derive.derive
import org.bytedeco.pytorch
import org.bytedeco.pytorch.global.torch as torchNative
import org.bytedeco.javacpp.LongPointer
import torch.internal.NativeConverters.{fromNative, toNative, toOptional}
import org.bytedeco.pytorch.{ScalarTypeOptional, TensorOptional}
import torch.internal.NativeConverters.fromNative
import org.bytedeco.pytorch.ScalarTypeOptional

private[torch] trait Activations {

Expand Down
1 change: 0 additions & 1 deletion core/src/main/scala/torch/nn/functional/Convolution.scala
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,6 @@ package nn
package functional

import org.bytedeco.pytorch
import org.bytedeco.pytorch.TensorOptional
import org.bytedeco.pytorch.global.torch as torchNative
import torch.internal.NativeConverters.*

Expand Down
2 changes: 0 additions & 2 deletions core/src/main/scala/torch/nn/functional/Linear.scala
Original file line number Diff line number Diff line change
Expand Up @@ -18,9 +18,7 @@ package torch
package nn
package functional

import org.bytedeco.javacpp.LongPointer
import org.bytedeco.pytorch
import org.bytedeco.pytorch.TensorOptional
import org.bytedeco.pytorch.global.torch as torchNative
import torch.internal.NativeConverters.{fromNative, toOptional}

Expand Down
5 changes: 2 additions & 3 deletions core/src/main/scala/torch/nn/functional/Loss.scala
Original file line number Diff line number Diff line change
Expand Up @@ -18,11 +18,10 @@ package torch
package nn
package functional

import org.bytedeco.javacpp.LongPointer
import org.bytedeco.pytorch
import org.bytedeco.pytorch.{BCEWithLogitsLossOptions, TensorOptional}
import org.bytedeco.pytorch.BCEWithLogitsLossOptions
import org.bytedeco.pytorch.global.torch as torchNative
import torch.internal.NativeConverters.{fromNative, toOptional}
import torch.internal.NativeConverters.fromNative

// Loss functions
private[torch] trait Loss {
Expand Down
4 changes: 1 addition & 3 deletions core/src/main/scala/torch/nn/functional/Pooling.scala
Original file line number Diff line number Diff line change
Expand Up @@ -18,16 +18,14 @@ package torch
package nn
package functional

import org.bytedeco.javacpp.LongPointer
import org.bytedeco.pytorch
import org.bytedeco.pytorch.{
AvgPool1dOptions,
AvgPool2dOptions,
AvgPool3dOptions,
MaxPool1dOptions,
MaxPool2dOptions,
MaxPool3dOptions,
TensorOptional
MaxPool3dOptions
}
import org.bytedeco.pytorch.global.torch as torchNative
import torch.internal.NativeConverters.*
Expand Down
2 changes: 0 additions & 2 deletions core/src/main/scala/torch/nn/functional/package.scala
Original file line number Diff line number Diff line change
Expand Up @@ -17,8 +17,6 @@
package torch
package nn

import functional.*

/** @groupname nn_conv Convolution functions
* @groupname nn_pooling Pooling functions
* @groupname nn_attention Attention mechanisms
Expand Down
Loading

0 comments on commit 11eb92c

Please sign in to comment.