Skip to content

Commit

Permalink
* Refactor and improve presets for PyTorch (pull #1360)
Browse files Browse the repository at this point in the history
  • Loading branch information
HGuillemet authored Jul 25, 2023
1 parent b80d5a2 commit d370dbc
Show file tree
Hide file tree
Showing 1,775 changed files with 67,810 additions and 76,066 deletions.
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@

* Refactor and improve presets for PyTorch ([pull #1360](https://github.com/bytedeco/javacpp-presets/pull/1360))
* Include `mkl_lapack.h` header file in presets for MKL ([issue #1388](https://github.com/bytedeco/javacpp-presets/issues/1388))
* Map new higher-level C++ API of Triton Inference Server ([pull #1361](https://github.com/bytedeco/javacpp-presets/pull/1361))
* Upgrade presets for OpenCV 4.8.0, DNNL 3.1.1, CPython 3.11.4, NumPy 1.25.1, SciPy 1.11.1, LLVM 16.0.6, TensorFlow Lite 2.13.0, Triton Inference Server 2.34.0, ONNX Runtime 1.15.1, and their dependencies
Expand Down
10 changes: 5 additions & 5 deletions pytorch/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -40,36 +40,36 @@ We can use [Maven 3](http://maven.apache.org/) to download and install automatic
<modelVersion>4.0.0</modelVersion>
<groupId>org.bytedeco.pytorch</groupId>
<artifactId>simplemnist</artifactId>
<version>1.5.9</version>
<version>1.5.10-SNAPSHOT</version>
<properties>
<exec.mainClass>SimpleMNIST</exec.mainClass>
</properties>
<dependencies>
<dependency>
<groupId>org.bytedeco</groupId>
<artifactId>pytorch-platform</artifactId>
<version>2.0.1-1.5.9</version>
<version>2.0.1-1.5.10-SNAPSHOT</version>
</dependency>

<!-- Additional dependencies required to use CUDA, cuDNN, and NCCL -->
<dependency>
<groupId>org.bytedeco</groupId>
<artifactId>pytorch-platform-gpu</artifactId>
<version>2.0.1-1.5.9</version>
<version>2.0.1-1.5.10-SNAPSHOT</version>
</dependency>

<!-- Additional dependencies to use bundled CUDA, cuDNN, and NCCL -->
<dependency>
<groupId>org.bytedeco</groupId>
<artifactId>cuda-platform-redist</artifactId>
<version>12.1-8.9-1.5.9</version>
<version>12.1-8.9-1.5.10-SNAPSHOT</version>
</dependency>

<!-- Additional dependencies to use bundled full version of MKL -->
<dependency>
<groupId>org.bytedeco</groupId>
<artifactId>mkl-platform-redist</artifactId>
<version>2023.1-1.5.9</version>
<version>2023.1-1.5.10-SNAPSHOT</version>
</dependency>
</dependencies>
<build>
Expand Down
4 changes: 3 additions & 1 deletion pytorch/src/gen/java/org/bytedeco/pytorch/ASMoutput.java
Original file line number Diff line number Diff line change
@@ -1,10 +1,12 @@
// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE
// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE

package org.bytedeco.pytorch;

import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
import org.bytedeco.javacpp.*;
import org.bytedeco.javacpp.annotation.*;
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE

package org.bytedeco.pytorch;

import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
import org.bytedeco.javacpp.*;
import org.bytedeco.javacpp.annotation.*;

import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;

import static org.bytedeco.pytorch.global.torch.*;

@Namespace("torch::profiler::impl::kineto") @Opaque @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
public class ActivityTraceWrapper extends Pointer {
/** Empty constructor. Calls {@code super((Pointer)null)}. */
public ActivityTraceWrapper() { super((Pointer)null); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public ActivityTraceWrapper(Pointer p) { super(p); }
}
46 changes: 46 additions & 0 deletions pytorch/src/gen/java/org/bytedeco/pytorch/ActivityTypeSet.java
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE

package org.bytedeco.pytorch;

import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
import org.bytedeco.javacpp.*;
import org.bytedeco.javacpp.annotation.*;

import static org.bytedeco.javacpp.presets.javacpp.*;
import static org.bytedeco.openblas.global.openblas_nolapack.*;
import static org.bytedeco.openblas.global.openblas.*;

import static org.bytedeco.pytorch.global.torch.*;

@Name("std::set<torch::profiler::impl::ActivityType>") @Properties(inherit = org.bytedeco.pytorch.presets.torch.class)
public class ActivityTypeSet extends Pointer {
static { Loader.load(); }
/** Pointer cast constructor. Invokes {@link Pointer#Pointer(Pointer)}. */
public ActivityTypeSet(Pointer p) { super(p); }
public ActivityTypeSet() { allocate(); }
private native void allocate();
public native @Name("operator =") @ByRef ActivityTypeSet put(@ByRef ActivityTypeSet x);

public boolean empty() { return size() == 0; }
public native long size();

public ActivityType front() { try (Iterator it = begin()) { return it.get(); } }
public native void insert(@ByRef ActivityType value);
public native void erase(@ByRef ActivityType value);
public native @ByVal Iterator begin();
public native @ByVal Iterator end();
@NoOffset @Name("iterator") public static class Iterator extends Pointer {
public Iterator(Pointer p) { super(p); }
public Iterator() { }

public native @Name("operator ++") @ByRef Iterator increment();
public native @Name("operator ==") boolean equals(@ByRef Iterator it);
public native @Name("operator *") @ByRef @Const ActivityType get();
}
}

12 changes: 7 additions & 5 deletions pytorch/src/gen/java/org/bytedeco/pytorch/Adagrad.java
Original file line number Diff line number Diff line change
@@ -1,10 +1,12 @@
// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE
// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE

package org.bytedeco.pytorch;

import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
import org.bytedeco.javacpp.*;
import org.bytedeco.javacpp.annotation.*;
Expand Down Expand Up @@ -33,10 +35,10 @@ public Adagrad(
private native void allocate(
@ByVal OptimizerParamGroupVector param_groups);

public Adagrad(@Cast({"", "std::vector<at::Tensor>"}) @StdMove TensorVector params, @ByVal(nullValue = "torch::optim::AdagradOptions{}") AdagradOptions defaults) { super((Pointer)null); allocate(params, defaults); }
private native void allocate(@Cast({"", "std::vector<at::Tensor>"}) @StdMove TensorVector params, @ByVal(nullValue = "torch::optim::AdagradOptions{}") AdagradOptions defaults);
public Adagrad(@Cast({"", "std::vector<at::Tensor>"}) @StdMove TensorVector params) { super((Pointer)null); allocate(params); }
private native void allocate(@Cast({"", "std::vector<at::Tensor>"}) @StdMove TensorVector params);
public Adagrad(@Cast({"", "std::vector<torch::Tensor>"}) @StdMove TensorVector params, @ByVal(nullValue = "torch::optim::AdagradOptions{}") AdagradOptions defaults) { super((Pointer)null); allocate(params, defaults); }
private native void allocate(@Cast({"", "std::vector<torch::Tensor>"}) @StdMove TensorVector params, @ByVal(nullValue = "torch::optim::AdagradOptions{}") AdagradOptions defaults);
public Adagrad(@Cast({"", "std::vector<torch::Tensor>"}) @StdMove TensorVector params) { super((Pointer)null); allocate(params); }
private native void allocate(@Cast({"", "std::vector<torch::Tensor>"}) @StdMove TensorVector params);

public native @ByVal Tensor step(@ByVal(nullValue = "torch::optim::Optimizer::LossClosure(nullptr)") LossClosure closure);
public native @ByVal Tensor step();
Expand Down
9 changes: 7 additions & 2 deletions pytorch/src/gen/java/org/bytedeco/pytorch/AdagradOptions.java
Original file line number Diff line number Diff line change
@@ -1,10 +1,12 @@
// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE
// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE

package org.bytedeco.pytorch;

import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
import org.bytedeco.javacpp.*;
import org.bytedeco.javacpp.annotation.*;
Expand Down Expand Up @@ -33,7 +35,10 @@ public class AdagradOptions extends OptimizerCloneableAdagradOptions {
public native @ByRef @NoException(true) DoublePointer eps();



private static native @Namespace @Cast("bool") @Name("operator ==") boolean equals(
@Const @ByRef AdagradOptions lhs,
@Const @ByRef AdagradOptions rhs);
public boolean equals(AdagradOptions rhs) { return equals(this, rhs); }
public native double get_lr();
public native void set_lr(double lr);
}
Original file line number Diff line number Diff line change
@@ -1,10 +1,12 @@
// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE
// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE

package org.bytedeco.pytorch;

import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
import org.bytedeco.javacpp.*;
import org.bytedeco.javacpp.annotation.*;
Expand Down Expand Up @@ -38,5 +40,8 @@ public class AdagradParamState extends OptimizerCloneableAdagradParamState {
public native @Cast("int64_t*") @ByRef @NoException(true) LongPointer step();



private static native @Namespace @Cast("bool") @Name("operator ==") boolean equals(
@Const @ByRef AdagradParamState lhs,
@Const @ByRef AdagradParamState rhs);
public boolean equals(AdagradParamState rhs) { return equals(this, rhs); }
}
12 changes: 7 additions & 5 deletions pytorch/src/gen/java/org/bytedeco/pytorch/Adam.java
Original file line number Diff line number Diff line change
@@ -1,10 +1,12 @@
// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE
// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE

package org.bytedeco.pytorch;

import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
import org.bytedeco.javacpp.*;
import org.bytedeco.javacpp.annotation.*;
Expand Down Expand Up @@ -32,10 +34,10 @@ public Adam(
@ByVal OptimizerParamGroupVector param_groups) { super((Pointer)null); allocate(param_groups); }
private native void allocate(
@ByVal OptimizerParamGroupVector param_groups);
public Adam(@Cast({"", "std::vector<at::Tensor>"}) @StdMove TensorVector params, @ByVal(nullValue = "torch::optim::AdamOptions{}") AdamOptions defaults) { super((Pointer)null); allocate(params, defaults); }
private native void allocate(@Cast({"", "std::vector<at::Tensor>"}) @StdMove TensorVector params, @ByVal(nullValue = "torch::optim::AdamOptions{}") AdamOptions defaults);
public Adam(@Cast({"", "std::vector<at::Tensor>"}) @StdMove TensorVector params) { super((Pointer)null); allocate(params); }
private native void allocate(@Cast({"", "std::vector<at::Tensor>"}) @StdMove TensorVector params);
public Adam(@Cast({"", "std::vector<torch::Tensor>"}) @StdMove TensorVector params, @ByVal(nullValue = "torch::optim::AdamOptions{}") AdamOptions defaults) { super((Pointer)null); allocate(params, defaults); }
private native void allocate(@Cast({"", "std::vector<torch::Tensor>"}) @StdMove TensorVector params, @ByVal(nullValue = "torch::optim::AdamOptions{}") AdamOptions defaults);
public Adam(@Cast({"", "std::vector<torch::Tensor>"}) @StdMove TensorVector params) { super((Pointer)null); allocate(params); }
private native void allocate(@Cast({"", "std::vector<torch::Tensor>"}) @StdMove TensorVector params);

public native @ByVal Tensor step(@ByVal(nullValue = "torch::optim::Optimizer::LossClosure(nullptr)") LossClosure closure);
public native @ByVal Tensor step();
Expand Down
9 changes: 7 additions & 2 deletions pytorch/src/gen/java/org/bytedeco/pytorch/AdamOptions.java
Original file line number Diff line number Diff line change
@@ -1,10 +1,12 @@
// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE
// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE

package org.bytedeco.pytorch;

import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
import org.bytedeco.javacpp.*;
import org.bytedeco.javacpp.annotation.*;
Expand Down Expand Up @@ -33,7 +35,10 @@ public class AdamOptions extends OptimizerCloneableAdamOptions {
public native @Cast("bool*") @ByRef @NoException(true) BoolPointer amsgrad();



private static native @Namespace @Cast("bool") @Name("operator ==") boolean equals(
@Const @ByRef AdamOptions lhs,
@Const @ByRef AdamOptions rhs);
public boolean equals(AdamOptions rhs) { return equals(this, rhs); }
public native double get_lr();
public native void set_lr(double lr);
}
9 changes: 7 additions & 2 deletions pytorch/src/gen/java/org/bytedeco/pytorch/AdamParamState.java
Original file line number Diff line number Diff line change
@@ -1,10 +1,12 @@
// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE
// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE

package org.bytedeco.pytorch;

import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
import org.bytedeco.javacpp.*;
import org.bytedeco.javacpp.annotation.*;
Expand Down Expand Up @@ -40,5 +42,8 @@ public class AdamParamState extends OptimizerCloneableAdamParamState {
public native @ByRef @NoException(true) Tensor max_exp_avg_sq();



private static native @Namespace @Cast("bool") @Name("operator ==") boolean equals(
@Const @ByRef AdamParamState lhs,
@Const @ByRef AdamParamState rhs);
public boolean equals(AdamParamState rhs) { return equals(this, rhs); }
}
12 changes: 7 additions & 5 deletions pytorch/src/gen/java/org/bytedeco/pytorch/AdamW.java
Original file line number Diff line number Diff line change
@@ -1,10 +1,12 @@
// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE
// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE

package org.bytedeco.pytorch;

import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
import org.bytedeco.javacpp.*;
import org.bytedeco.javacpp.annotation.*;
Expand Down Expand Up @@ -32,10 +34,10 @@ public AdamW(
@ByVal OptimizerParamGroupVector param_groups) { super((Pointer)null); allocate(param_groups); }
private native void allocate(
@ByVal OptimizerParamGroupVector param_groups);
public AdamW(@Cast({"", "std::vector<at::Tensor>"}) @StdMove TensorVector params, @ByVal(nullValue = "torch::optim::AdamWOptions{}") AdamWOptions defaults) { super((Pointer)null); allocate(params, defaults); }
private native void allocate(@Cast({"", "std::vector<at::Tensor>"}) @StdMove TensorVector params, @ByVal(nullValue = "torch::optim::AdamWOptions{}") AdamWOptions defaults);
public AdamW(@Cast({"", "std::vector<at::Tensor>"}) @StdMove TensorVector params) { super((Pointer)null); allocate(params); }
private native void allocate(@Cast({"", "std::vector<at::Tensor>"}) @StdMove TensorVector params);
public AdamW(@Cast({"", "std::vector<torch::Tensor>"}) @StdMove TensorVector params, @ByVal(nullValue = "torch::optim::AdamWOptions{}") AdamWOptions defaults) { super((Pointer)null); allocate(params, defaults); }
private native void allocate(@Cast({"", "std::vector<torch::Tensor>"}) @StdMove TensorVector params, @ByVal(nullValue = "torch::optim::AdamWOptions{}") AdamWOptions defaults);
public AdamW(@Cast({"", "std::vector<torch::Tensor>"}) @StdMove TensorVector params) { super((Pointer)null); allocate(params); }
private native void allocate(@Cast({"", "std::vector<torch::Tensor>"}) @StdMove TensorVector params);

public native @ByVal Tensor step(@ByVal(nullValue = "torch::optim::Optimizer::LossClosure(nullptr)") LossClosure closure);
public native @ByVal Tensor step();
Expand Down
9 changes: 7 additions & 2 deletions pytorch/src/gen/java/org/bytedeco/pytorch/AdamWOptions.java
Original file line number Diff line number Diff line change
@@ -1,10 +1,12 @@
// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE
// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE

package org.bytedeco.pytorch;

import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
import org.bytedeco.javacpp.*;
import org.bytedeco.javacpp.annotation.*;
Expand Down Expand Up @@ -33,7 +35,10 @@ public class AdamWOptions extends OptimizerCloneableAdamWOptions {
public native @Cast("bool*") @ByRef @NoException(true) BoolPointer amsgrad();



private static native @Namespace @Cast("bool") @Name("operator ==") boolean equals(
@Const @ByRef AdamWOptions lhs,
@Const @ByRef AdamWOptions rhs);
public boolean equals(AdamWOptions rhs) { return equals(this, rhs); }
public native double get_lr();
public native void set_lr(double lr);
}
Original file line number Diff line number Diff line change
@@ -1,10 +1,12 @@
// Targeted by JavaCPP version 1.5.9: DO NOT EDIT THIS FILE
// Targeted by JavaCPP version 1.5.10-SNAPSHOT: DO NOT EDIT THIS FILE

package org.bytedeco.pytorch;

import org.bytedeco.pytorch.Allocator;
import org.bytedeco.pytorch.Function;
import org.bytedeco.pytorch.functions.*;
import org.bytedeco.pytorch.Module;
import org.bytedeco.javacpp.annotation.Cast;
import java.nio.*;
import org.bytedeco.javacpp.*;
import org.bytedeco.javacpp.annotation.*;
Expand Down Expand Up @@ -40,5 +42,8 @@ public class AdamWParamState extends OptimizerCloneableAdamWParamState {
public native @ByRef @NoException(true) Tensor max_exp_avg_sq();



private static native @Namespace @Cast("bool") @Name("operator ==") boolean equals(
@Const @ByRef AdamWParamState lhs,
@Const @ByRef AdamWParamState rhs);
public boolean equals(AdamWParamState rhs) { return equals(this, rhs); }
}
Loading

0 comments on commit d370dbc

Please sign in to comment.