From 92596919c7b5535ce2172037371de6db4fdad390 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B6rn=20Norell?= Date: Fri, 21 Aug 2020 13:03:45 +0200 Subject: [PATCH 1/2] Fixed some inconsistencies --- build/torch_api.cpp | 4 ++-- build/torch_api.h | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/build/torch_api.cpp b/build/torch_api.cpp index 0890fa17..7ef53a8e 100644 --- a/build/torch_api.cpp +++ b/build/torch_api.cpp @@ -262,11 +262,11 @@ return 1; } int at_set_double_value_at_indexes(tensor t, int *indexes, int indexes_len, double v) { - at_set_value_at_indexes(t, indexes, indexes_len, v); + return at_set_value_at_indexes(t, indexes, indexes_len, v); } int at_set_int64_value_at_indexes(tensor t, int *indexes, int indexes_len, int64_t v) { - at_set_value_at_indexes(t, indexes, indexes_len, v); + return at_set_value_at_indexes(t, indexes, indexes_len, v); } int at_fill_double(tensor t, double v) { diff --git a/build/torch_api.h b/build/torch_api.h index 6b74e07a..2f74f9ec 100644 --- a/build/torch_api.h +++ b/build/torch_api.h @@ -39,8 +39,8 @@ int at_sync(); int at_from_blob(tensor *, void *data, int64_t *dims, int ndims, int64_t *strides, int nstrides, int dev); int at_tensor_of_data(tensor *, void *vs, int64_t *dims, int ndims, int element_size_in_bytes, int type); int at_copy_data(tensor tensor, void *vs, int64_t numel, int element_size_in_bytes); -int at_float_vec(double *values, int value_len, int type); -int at_int_vec(int64_t *values, int value_len, int type); +int at_float_vec(tensor* tensor, double *values, int value_len, int type); +int at_int_vec(tensor* tensor, int64_t *values, int value_len, int type); int at_defined(int *i, tensor); int at_dim(int *i, tensor); @@ -56,7 +56,7 @@ int at_fill_double(tensor, double); int at_fill_int64(tensor, int64_t); int at_double_value_at_indexes(double *i, tensor, int *indexes, int indexes_len); -int at_int64_value_at_indexes(double *i, tensor, int *indexes, int indexes_len); +int at_int64_value_at_indexes(int64_t *i, tensor, int *indexes, int indexes_len); int at_set_double_value_at_indexes(tensor, int *indexes, int indexes_len, double v); int at_set_int64_value_at_indexes(tensor, int *indexes, int indexes_len, int64_t v); @@ -65,7 +65,7 @@ int at_copy_(tensor dst, tensor src); int at_print(tensor); // char *at_to_string(tensor, int line_size); int at_save(tensor, char *filename); -tensor at_load(char *filename); +int at_load(char *filename, tensor *tensor); int at_save_multi(tensor *tensors, char **tensor_names, int ntensors, char *filename); /* [at_load_multi] takes as input an array of nullptr for [tensors]. */ From d4dc4b1dd4b6048c1593a3bc078062b8b783a40e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B6rn=20Norell?= Date: Fri, 21 Aug 2020 14:20:24 +0200 Subject: [PATCH 2/2] Added dll export name decorator --- build/torch_api.cpp | 158 +-- build/torch_api.h | 184 +-- build/torch_api_generated.cpp.h | 1930 +++++++++++++++---------------- build/torch_api_generated.h | 1930 +++++++++++++++---------------- 4 files changed, 2104 insertions(+), 2098 deletions(-) diff --git a/build/torch_api.cpp b/build/torch_api.cpp index 7ef53a8e..9211c180 100644 --- a/build/torch_api.cpp +++ b/build/torch_api.cpp @@ -11,14 +11,14 @@ #define caml_invalid_argument printf using namespace std; -int get_last_error(char *err) { +C_API int get_last_error(char *err) { int len = strlen(myerr); for (int i = 0; i < len; ++i) err[i] = myerr[i]; err[len] = '\0'; return 0; } -int flush_error() { +C_API int flush_error() { PROTECT( myerr = ""; return 0; @@ -26,7 +26,7 @@ int flush_error() { return 1; } -int at_manual_seed(int64_t seed) { +C_API int at_manual_seed(int64_t seed) { PROTECT( torch::manual_seed(seed); return 0; @@ -34,13 +34,13 @@ int at_manual_seed(int64_t seed) { return 1; } -vector of_carray_tensor(torch::Tensor **vs, int len) { +C_API vector of_carray_tensor(torch::Tensor **vs, int len) { vector result; for (int i = 0; i < len; ++i) result.push_back(*(vs[i])); return result; } -int at_from_blob(tensor *out__, void *data, int64_t *dims, int ndims, int64_t *strides, int nstrides, int dev) { +C_API int at_from_blob(tensor *out__, void *data, int64_t *dims, int ndims, int64_t *strides, int nstrides, int dev) { PROTECT( auto options = torch::TensorOptions().device(torch::kCUDA, dev).requires_grad(false); torch::Tensor tens = torch::from_blob(data, torch::IntArrayRef(dims, ndims), torch::IntArrayRef(strides, nstrides), options); @@ -51,7 +51,7 @@ int at_from_blob(tensor *out__, void *data, int64_t *dims, int ndims, int64_t *s return 1; } -int at_new_tensor(tensor *out__) { +C_API int at_new_tensor(tensor *out__) { PROTECT( out__[0] = new torch::Tensor(); return 0; @@ -60,7 +60,7 @@ int at_new_tensor(tensor *out__) { return 1; } -int at_empty_cache() { +C_API int at_empty_cache() { PROTECT( c10::cuda::CUDACachingAllocator::emptyCache(); return 0; @@ -68,7 +68,7 @@ int at_empty_cache() { return 1; } -int at_no_grad(int flag) { +C_API int at_no_grad(int flag) { PROTECT( torch::GradMode::set_enabled((bool)flag); return 0; @@ -77,7 +77,7 @@ int at_no_grad(int flag) { return 1; } -int at_sync() { +C_API int at_sync() { PROTECT( at::cuda::CUDAStream stream = at::cuda::getCurrentCUDAStream(); C10_CUDA_CHECK(cudaStreamSynchronize(stream)); @@ -87,7 +87,7 @@ int at_sync() { return 1; } -int at_tensor_of_data(tensor *out__, void *vs, int64_t *dims, int ndims, int element_size_in_bytes, int type) { +C_API int at_tensor_of_data(tensor *out__, void *vs, int64_t *dims, int ndims, int element_size_in_bytes, int type) { PROTECT( // auto options = torch::TensorOptions().dtype(torch::ScalarType(type)).requires_grad(false); torch::Tensor tensor = torch::zeros(torch::IntArrayRef(dims, ndims), torch::ScalarType(type)); @@ -103,7 +103,7 @@ int at_tensor_of_data(tensor *out__, void *vs, int64_t *dims, int ndims, int ele return 1; } -int at_copy_data(tensor tensor, void *vs, int64_t numel, int elt_size_in_bytes) { +C_API int at_copy_data(tensor tensor, void *vs, int64_t numel, int elt_size_in_bytes) { PROTECT( if (elt_size_in_bytes != tensor->element_size()) { myerr = strdup("incoherent element sizes in bytes"); @@ -127,7 +127,7 @@ int at_copy_data(tensor tensor, void *vs, int64_t numel, int elt_size_in_bytes) return 1; } -int at_float_vec(tensor *out__, double *vs, int len, int type) { +C_API int at_float_vec(tensor *out__, double *vs, int len, int type) { PROTECT( torch::Tensor tensor = torch::empty({len}, torch::ScalarType(type)); for (int i = 0; i < len; ++i) tensor[i] = vs[i]; @@ -138,7 +138,7 @@ int at_float_vec(tensor *out__, double *vs, int len, int type) { return 1; } -int at_int_vec(tensor *out__, int64_t *vs, int len, int type) { +C_API int at_int_vec(tensor *out__, int64_t *vs, int len, int type) { PROTECT( torch::Tensor tensor = torch::empty({len}, torch::ScalarType(type)); for (int i = 0; i < len; ++i) tensor[i] = vs[i]; @@ -149,7 +149,7 @@ int at_int_vec(tensor *out__, int64_t *vs, int len, int type) { return 1; } -int at_defined(int *i, tensor t) { +C_API int at_defined(int *i, tensor t) { PROTECT( i[0] = t->defined(); return 0; @@ -158,7 +158,7 @@ int at_defined(int *i, tensor t) { return 1; } -int at_dim(int *i, tensor t) { +C_API int at_dim(int *i, tensor t) { PROTECT( i[0] = t->dim(); return 0; @@ -167,7 +167,7 @@ int at_dim(int *i, tensor t) { return 1; } -int at_shape(tensor t, int *dims) { +C_API int at_shape(tensor t, int *dims) { PROTECT( int i = 0; for (int dim : t->sizes()) dims[i++] = dim; @@ -176,7 +176,7 @@ int at_shape(tensor t, int *dims) { return 1; } -int at_scalar_type(int *i, tensor t) { +C_API int at_scalar_type(int *i, tensor t) { PROTECT( i[0] = static_cast(t->scalar_type()); return 0; @@ -184,7 +184,7 @@ int at_scalar_type(int *i, tensor t) { return 1; } -int at_backward(tensor t, int keep_graph, int create_graph) { +C_API int at_backward(tensor t, int keep_graph, int create_graph) { PROTECT( t->backward({}, keep_graph, create_graph); return 0; @@ -192,7 +192,7 @@ int at_backward(tensor t, int keep_graph, int create_graph) { return 1; } -int at_requires_grad(int *i, tensor t) { +C_API int at_requires_grad(int *i, tensor t) { PROTECT( i[0] = t->requires_grad(); return 0; @@ -201,7 +201,7 @@ int at_requires_grad(int *i, tensor t) { return 1; } -int at_grad_set_enabled(int b) { +C_API int at_grad_set_enabled(int b) { PROTECT( bool is_enabled = torch::autograd::GradMode::is_enabled(); torch::autograd::GradMode::set_enabled(b); @@ -211,7 +211,7 @@ int at_grad_set_enabled(int b) { return 1; } -int at_get(tensor *out__, tensor t, int index) { +C_API int at_get(tensor *out__, tensor t, int index) { PROTECT( out__[0] = new torch::Tensor((*t)[index]); return 0; @@ -232,7 +232,7 @@ T at_value_at_indexes(tensor t, int *indexes, int indexes_len) { return T(); } -int at_double_value_at_indexes(double *i, tensor t, int *indexes, int indexes_len) { +C_API int at_double_value_at_indexes(double *i, tensor t, int *indexes, int indexes_len) { PROTECT( i[0] = at_value_at_indexes(t, indexes, indexes_len); return 0; @@ -240,7 +240,7 @@ int at_double_value_at_indexes(double *i, tensor t, int *indexes, int indexes_le return 1; } -int at_int64_value_at_indexes(int64_t *i, tensor t, int *indexes, int indexes_len) { +C_API int at_int64_value_at_indexes(int64_t *i, tensor t, int *indexes, int indexes_len) { PROTECT( i[0] = at_value_at_indexes(t, indexes, indexes_len); return 0; @@ -261,15 +261,15 @@ int at_set_value_at_indexes(tensor t, int *indexes, int indexes_len, T v) { return 1; } -int at_set_double_value_at_indexes(tensor t, int *indexes, int indexes_len, double v) { +C_API int at_set_double_value_at_indexes(tensor t, int *indexes, int indexes_len, double v) { return at_set_value_at_indexes(t, indexes, indexes_len, v); } -int at_set_int64_value_at_indexes(tensor t, int *indexes, int indexes_len, int64_t v) { +C_API int at_set_int64_value_at_indexes(tensor t, int *indexes, int indexes_len, int64_t v) { return at_set_value_at_indexes(t, indexes, indexes_len, v); } -int at_fill_double(tensor t, double v) { +C_API int at_fill_double(tensor t, double v) { PROTECT( t->fill_(v); return 0; @@ -277,7 +277,7 @@ int at_fill_double(tensor t, double v) { return 1; } -int at_fill_int64(tensor t, int64_t v) { +C_API int at_fill_int64(tensor t, int64_t v) { PROTECT( t->fill_(v); return 0; @@ -285,7 +285,7 @@ int at_fill_int64(tensor t, int64_t v) { return 1; } -int at_print(tensor t) { +C_API int at_print(tensor t) { PROTECT( torch::Tensor *tensor = (torch::Tensor*)t; cout << *tensor << endl; @@ -303,7 +303,7 @@ return 1; // return nullptr; // } -int at_copy_(tensor dst, tensor src) { +C_API int at_copy_(tensor dst, tensor src) { PROTECT( dst->copy_(*src); return 0; @@ -311,7 +311,7 @@ int at_copy_(tensor dst, tensor src) { return 1; } -int at_save(tensor t, char *filename) { +C_API int at_save(tensor t, char *filename) { PROTECT( torch::save(*t, filename); return 0; @@ -319,7 +319,7 @@ int at_save(tensor t, char *filename) { return 1; } -int at_save_multi(tensor *tensors, char **tensor_names, int ntensors, char *filename) { +C_API int at_save_multi(tensor *tensors, char **tensor_names, int ntensors, char *filename) { PROTECT( torch::serialize::OutputArchive archive; for (int i = 0; i < ntensors; ++i) @@ -330,7 +330,7 @@ int at_save_multi(tensor *tensors, char **tensor_names, int ntensors, char *file return 1; } -int at_load_multi(tensor *tensors, char **tensor_names, int ntensors, char *filename) { +C_API int at_load_multi(tensor *tensors, char **tensor_names, int ntensors, char *filename) { PROTECT( torch::serialize::InputArchive archive; archive.load_from(std::string(filename)); @@ -346,7 +346,7 @@ int at_load_multi(tensor *tensors, char **tensor_names, int ntensors, char *file return 1; } -int at_load_callback(char *filename, void (*f)(char *, tensor)) { +C_API int at_load_callback(char *filename, void (*f)(char *, tensor)) { PROTECT( auto module = torch::jit::load(filename); for (const auto &p : module.named_parameters()) { @@ -358,7 +358,7 @@ int at_load_callback(char *filename, void (*f)(char *, tensor)) { return 1; } -int at_load_multi_(tensor *tensors, char **tensor_names, int ntensors, char *filename) { +C_API int at_load_multi_(tensor *tensors, char **tensor_names, int ntensors, char *filename) { PROTECT( torch::NoGradGuard no_grad; torch::serialize::InputArchive archive; @@ -377,7 +377,7 @@ int at_load_multi_(tensor *tensors, char **tensor_names, int ntensors, char *fil return 1; } -int at_load(char *filename, tensor *out__) { +C_API int at_load(char *filename, tensor *out__) { PROTECT( torch::Tensor tensor; torch::load(tensor, filename); @@ -388,7 +388,7 @@ int at_load(char *filename, tensor *out__) { return 1; } -int at_free(tensor t) { +C_API int at_free(tensor t) { PROTECT( delete(t); return 0; @@ -396,13 +396,13 @@ int at_free(tensor t) { return 1; } -int at_run_backward(tensor *tensors, - int ntensors, - tensor *inputs, - int ninputs, - tensor *outputs, - int keep_graph, - int create_graph) { +C_API int at_run_backward(tensor *tensors, + int ntensors, + tensor *inputs, + int ninputs, + tensor *outputs, + int keep_graph, + int create_graph) { PROTECT( torch::autograd::Engine engine; vector roots; @@ -429,7 +429,7 @@ int at_run_backward(tensor *tensors, return 1; } -int ato_adam(optimizer *out__, double learning_rate, +C_API int ato_adam(optimizer *out__, double learning_rate, double beta1, double beta2, double weight_decay) { @@ -446,7 +446,7 @@ int ato_adam(optimizer *out__, double learning_rate, return 1; } -int ato_rmsprop(optimizer *out__, double learning_rate, +C_API int ato_rmsprop(optimizer *out__, double learning_rate, double alpha, double eps, double weight_decay, @@ -467,7 +467,7 @@ int ato_rmsprop(optimizer *out__, double learning_rate, return 1; } -int ato_sgd(optimizer *out__, double learning_rate, +C_API int ato_sgd(optimizer *out__, double learning_rate, double momentum, double dampening, double weight_decay, @@ -486,7 +486,7 @@ int ato_sgd(optimizer *out__, double learning_rate, return 1; } -int ato_add_parameters(optimizer t, tensor *tensors, int ntensors) { +C_API int ato_add_parameters(optimizer t, tensor *tensors, int ntensors) { PROTECT( t->add_parameters(of_carray_tensor(tensors, ntensors)); return 0; @@ -494,7 +494,7 @@ int ato_add_parameters(optimizer t, tensor *tensors, int ntensors) { return 1; } -int ato_set_learning_rate(optimizer t, double learning_rate) { +C_API int ato_set_learning_rate(optimizer t, double learning_rate) { PROTECT( if (auto adam = dynamic_cast(t)) adam->options.learning_rate(learning_rate); @@ -509,7 +509,7 @@ int ato_set_learning_rate(optimizer t, double learning_rate) { return 1; } -int ato_set_momentum(optimizer t, double momentum) { +C_API int ato_set_momentum(optimizer t, double momentum) { PROTECT( if (auto adam = dynamic_cast(t)) adam->options.beta1(momentum); @@ -524,7 +524,7 @@ int ato_set_momentum(optimizer t, double momentum) { return 1; } -int ato_zero_grad(optimizer t) { +C_API int ato_zero_grad(optimizer t) { PROTECT( t->zero_grad(); return 0; @@ -532,7 +532,7 @@ int ato_zero_grad(optimizer t) { return 1; } -int ato_step(optimizer t) { +C_API int ato_step(optimizer t) { PROTECT( t->step(); return 0; @@ -540,7 +540,7 @@ int ato_step(optimizer t) { return 1; } -int ato_free(optimizer t) { +C_API int ato_free(optimizer t) { PROTECT( delete(t); return 0; @@ -548,7 +548,7 @@ int ato_free(optimizer t) { return 1; } -int ats_int(scalar *out__, int64_t v) { +C_API int ats_int(scalar *out__, int64_t v) { PROTECT( out__[0] = new torch::Scalar(v); return 0; @@ -557,7 +557,7 @@ int ats_int(scalar *out__, int64_t v) { return 1; } -int ats_float(scalar *out__, double v) { +C_API int ats_float(scalar *out__, double v) { PROTECT( out__[0] = new torch::Scalar(v); return 0; @@ -566,7 +566,7 @@ int ats_float(scalar *out__, double v) { return 1; } -int ats_free(scalar s) { +C_API int ats_free(scalar s) { PROTECT( delete(s); return 0; @@ -574,7 +574,7 @@ int ats_free(scalar s) { return 1; } -int atc_cuda_device_count(int *i) { +C_API int atc_cuda_device_count(int *i) { PROTECT( i[0] = torch::cuda::device_count(); return 0; @@ -583,7 +583,7 @@ int atc_cuda_device_count(int *i) { return 1; } -int atc_cuda_is_available(int *i) { +C_API int atc_cuda_is_available(int *i) { PROTECT( i[0] = torch::cuda::is_available(); return 0; @@ -592,7 +592,7 @@ int atc_cuda_is_available(int *i) { return 1; } -int atc_cudnn_is_available(int *i) { +C_API int atc_cudnn_is_available(int *i) { PROTECT( i[0] = torch::cuda::cudnn_is_available(); return 0; @@ -601,7 +601,7 @@ int atc_cudnn_is_available(int *i) { return 1; } -int atc_set_benchmark_cudnn(int b) { +C_API int atc_set_benchmark_cudnn(int b) { PROTECT( at::globalContext().setBenchmarkCuDNN(b); return 0; @@ -609,7 +609,7 @@ int atc_set_benchmark_cudnn(int b) { return 1; } -int atm_load(char *filename, module *out__) { +C_API int atm_load(char *filename, module *out__) { PROTECT( out__[0] = new torch::jit::script::Module(torch::jit::load(filename)); return 0; @@ -618,7 +618,7 @@ int atm_load(char *filename, module *out__) { return 1; } -int atm_forward(tensor *out__, module m, tensor *tensors, int ntensors) { +C_API int atm_forward(tensor *out__, module m, tensor *tensors, int ntensors) { PROTECT( std::vector inputs; for (int i = 0; i < ntensors; ++i) @@ -635,9 +635,9 @@ int atm_forward(tensor *out__, module m, tensor *tensors, int ntensors) { return 1; } -int atm_forward_(ivalue *out__, module m, - ivalue *ivalues, - int nivalues) { +C_API int atm_forward_(ivalue *out__, module m, + ivalue *ivalues, + int nivalues) { PROTECT( std::vector inputs; for (int i = 0; i < nivalues; ++i) @@ -650,7 +650,7 @@ int atm_forward_(ivalue *out__, module m, return 1; } -int atm_free(module m) { +C_API int atm_free(module m) { PROTECT( delete(m); return 0; @@ -658,7 +658,7 @@ int atm_free(module m) { return 1; } -int ati_tensor(ivalue *out__, tensor t) { +C_API int ati_tensor(ivalue *out__, tensor t) { PROTECT( out__[0] = new torch::jit::IValue(*t); return 0; @@ -667,7 +667,7 @@ int ati_tensor(ivalue *out__, tensor t) { return 1; } -int ati_int(ivalue *out__, int64_t i) { +C_API int ati_int(ivalue *out__, int64_t i) { PROTECT( out__[0] = new torch::jit::IValue(i); return 0; @@ -676,7 +676,7 @@ int ati_int(ivalue *out__, int64_t i) { return 1; } -int ati_double(ivalue *out__, double d) { +C_API int ati_double(ivalue *out__, double d) { PROTECT( out__[0] = new torch::jit::IValue(d); return 0; @@ -685,7 +685,7 @@ int ati_double(ivalue *out__, double d) { return 1; } -int ati_tuple(ivalue *out__, ivalue *is, int nvalues) { +C_API int ati_tuple(ivalue *out__, ivalue *is, int nvalues) { PROTECT( vector vec; for (int i = 0; i < nvalues; ++i) vec.push_back(*(is[i])); @@ -696,7 +696,7 @@ int ati_tuple(ivalue *out__, ivalue *is, int nvalues) { return 1; } -int ati_tag(int *out__, ivalue i) { +C_API int ati_tag(int *out__, ivalue i) { PROTECT( if (i->isTensor()) out__[0] = 0; else if (i->isInt()) out__[0] = 1; @@ -709,7 +709,7 @@ int ati_tag(int *out__, ivalue i) { return 1; } -int ati_to_int(int64_t *out__, ivalue i) { +C_API int ati_to_int(int64_t *out__, ivalue i) { PROTECT( out__[0] = i->toInt(); return 0; @@ -717,7 +717,7 @@ int ati_to_int(int64_t *out__, ivalue i) { return 1; } -int ati_to_double(double *out__, ivalue i) { +C_API int ati_to_double(double *out__, ivalue i) { PROTECT( out__[0] = i->toDouble(); return 0; @@ -725,7 +725,7 @@ int ati_to_double(double *out__, ivalue i) { return 1; } -int ati_to_tensor(tensor *out__, ivalue i) { +C_API int ati_to_tensor(tensor *out__, ivalue i) { PROTECT( out__[0] = new torch::Tensor(i->toTensor()); return 0; @@ -735,7 +735,7 @@ return 1; } -int ati_tuple_length(int *out__, ivalue i) { +C_API int ati_tuple_length(int *out__, ivalue i) { PROTECT( out__[0] = i->toTuple()->elements().size(); return 0; @@ -743,9 +743,9 @@ int ati_tuple_length(int *out__, ivalue i) { return 1; } -int ati_to_tuple(ivalue i, - ivalue *outputs, - int noutputs) { +C_API int ati_to_tuple(ivalue i, + ivalue *outputs, + int noutputs) { PROTECT( auto vec = i->toTuple()->elements(); if (vec.size() != noutputs) { @@ -760,7 +760,7 @@ return 1; } -int ati_free(ivalue i) { +C_API int ati_free(ivalue i) { PROTECT( delete(i); return 0; @@ -768,7 +768,7 @@ int ati_free(ivalue i) { return 1; } -at::Device device_of_int(int d) { +C_API at::Device device_of_int(int d) { if (d < 0) return at::Device(at::kCPU); return at::Device(at::kCUDA, /*index=*/d); } diff --git a/build/torch_api.h b/build/torch_api.h index 2f74f9ec..5144ae92 100644 --- a/build/torch_api.h +++ b/build/torch_api.h @@ -2,6 +2,12 @@ #define __TORCH_API_H__ #include +#ifdef WIN32 +#define C_API extern "C" __declspec(dllexport) +#else +#define C_API extern "C" +#endif + #ifdef __cplusplus extern "C" { typedef torch::Tensor *tensor; @@ -28,115 +34,115 @@ typedef void *ivalue; typedef void *ngg; #endif -int get_last_error(char *); -int flush_error(); - -int at_manual_seed(int64_t); -int at_new_tensor(tensor *); -int at_empty_cache(); -int at_no_grad(int flag); -int at_sync(); -int at_from_blob(tensor *, void *data, int64_t *dims, int ndims, int64_t *strides, int nstrides, int dev); -int at_tensor_of_data(tensor *, void *vs, int64_t *dims, int ndims, int element_size_in_bytes, int type); -int at_copy_data(tensor tensor, void *vs, int64_t numel, int element_size_in_bytes); -int at_float_vec(tensor* tensor, double *values, int value_len, int type); -int at_int_vec(tensor* tensor, int64_t *values, int value_len, int type); - -int at_defined(int *i, tensor); -int at_dim(int *i, tensor); -int at_shape(tensor, int *); -int at_scalar_type(int *i, tensor); - -int at_backward(tensor, int, int); -int at_requires_grad(int *i, tensor); -int at_grad_set_enabled(int); - -int at_get(tensor *, tensor, int index); -int at_fill_double(tensor, double); -int at_fill_int64(tensor, int64_t); - -int at_double_value_at_indexes(double *i, tensor, int *indexes, int indexes_len); -int at_int64_value_at_indexes(int64_t *i, tensor, int *indexes, int indexes_len); -int at_set_double_value_at_indexes(tensor, int *indexes, int indexes_len, double v); -int at_set_int64_value_at_indexes(tensor, int *indexes, int indexes_len, int64_t v); - -int at_copy_(tensor dst, tensor src); - -int at_print(tensor); +C_API int get_last_error(char *); +C_API int flush_error(); + +C_API int at_manual_seed(int64_t); +C_API int at_new_tensor(tensor *); +C_API int at_empty_cache(); +C_API int at_no_grad(int flag); +C_API int at_sync(); +C_API int at_from_blob(tensor *, void *data, int64_t *dims, int ndims, int64_t *strides, int nstrides, int dev); +C_API int at_tensor_of_data(tensor *, void *vs, int64_t *dims, int ndims, int element_size_in_bytes, int type); +C_API int at_copy_data(tensor tensor, void *vs, int64_t numel, int element_size_in_bytes); +C_API int at_float_vec(tensor* tensor, double *values, int value_len, int type); +C_API int at_int_vec(tensor* tensor, int64_t *values, int value_len, int type); + +C_API int at_defined(int *i, tensor); +C_API int at_dim(int *i, tensor); +C_API int at_shape(tensor, int *); +C_API int at_scalar_type(int *i, tensor); + +C_API int at_backward(tensor, int, int); +C_API int at_requires_grad(int *i, tensor); +C_API int at_grad_set_enabled(int); + +C_API int at_get(tensor *, tensor, int index); +C_API int at_fill_double(tensor, double); +C_API int at_fill_int64(tensor, int64_t); + +C_API int at_double_value_at_indexes(double *i, tensor, int *indexes, int indexes_len); +C_API int at_int64_value_at_indexes(int64_t *i, tensor, int *indexes, int indexes_len); +C_API int at_set_double_value_at_indexes(tensor, int *indexes, int indexes_len, double v); +C_API int at_set_int64_value_at_indexes(tensor, int *indexes, int indexes_len, int64_t v); + +C_API int at_copy_(tensor dst, tensor src); + +C_API int at_print(tensor); // char *at_to_string(tensor, int line_size); -int at_save(tensor, char *filename); -int at_load(char *filename, tensor *tensor); +C_API int at_save(tensor, char *filename); +C_API int at_load(char *filename, tensor *tensor); -int at_save_multi(tensor *tensors, char **tensor_names, int ntensors, char *filename); +C_API int at_save_multi(tensor *tensors, char **tensor_names, int ntensors, char *filename); /* [at_load_multi] takes as input an array of nullptr for [tensors]. */ -int at_load_multi(tensor *tensors, char **tensor_names, int ntensors, char *filename); +C_API int at_load_multi(tensor *tensors, char **tensor_names, int ntensors, char *filename); /* [at_load_multi_] takes as input an array of allocation [tensors]. */ -int at_load_multi_(tensor *tensors, char **tensor_names, int ntensors, char *filename); +C_API int at_load_multi_(tensor *tensors, char **tensor_names, int ntensors, char *filename); -int at_load_callback(char *filename, void (*f)(char *, tensor)); +C_API int at_load_callback(char *filename, void (*f)(char *, tensor)); -int at_free(tensor); +C_API int at_free(tensor); -int at_run_backward(tensor *tensors, - int ntensors, - tensor *inputs, - int ninputs, - tensor *outputs, - int keep_graph, - int create_graph); +C_API int at_run_backward(tensor *tensors, + int ntensors, + tensor *inputs, + int ninputs, + tensor *outputs, + int keep_graph, + int create_graph); -int ato_adam(optimizer *, double learning_rate, +C_API int ato_adam(optimizer *, double learning_rate, double beta1, double beta2, double weight_decay); -int ato_rmsprop(optimizer *, double learning_rate, +C_API int ato_rmsprop(optimizer *, double learning_rate, double alpha, double eps, double weight_decay, double momentum, int centered); -int ato_sgd(optimizer *, double learning_rate, +C_API int ato_sgd(optimizer *, double learning_rate, double momentum, double dampening, double weight_decay, int nesterov); -int ato_add_parameters(optimizer, tensor *, int ntensors); -int ato_set_learning_rate(optimizer, double learning_rate); -int ato_set_momentum(optimizer, double momentum); -int ato_zero_grad(optimizer); -int ato_step(optimizer); -int ato_free(optimizer); - -int ats_int(scalar *, int64_t); -int ats_float(scalar *, double); -int ats_free(scalar); - -int atc_cuda_device_count(int *); -int atc_cuda_is_available(int *); -int atc_cudnn_is_available(int *); -int atc_set_benchmark_cudnn(int b); - -int atm_load(char *, module *); -int atm_forward(tensor *, module, tensor *tensors, int ntensors); -int atm_forward_(ivalue *, module, - ivalue *ivalues, - int nivalues); -int atm_free(module); - -int ati_tensor(ivalue *, tensor); -int ati_int(ivalue *, int64_t); -int ati_double(ivalue *, double); -int ati_tuple(ivalue *, ivalue *, int); - -int ati_to_tensor(tensor *, ivalue); -int ati_to_int(int64_t *, ivalue); -int ati_to_double(double *, ivalue); -int ati_tuple_length(int *, ivalue); -int ati_to_tuple(ivalue, ivalue *, int); - -int ati_tag(int *, ivalue); - -int ati_free(ivalue); +C_API int ato_add_parameters(optimizer, tensor *, int ntensors); +C_API int ato_set_learning_rate(optimizer, double learning_rate); +C_API int ato_set_momentum(optimizer, double momentum); +C_API int ato_zero_grad(optimizer); +C_API int ato_step(optimizer); +C_API int ato_free(optimizer); + +C_API int ats_int(scalar *, int64_t); +C_API int ats_float(scalar *, double); +C_API int ats_free(scalar); + +C_API int atc_cuda_device_count(int *); +C_API int atc_cuda_is_available(int *); +C_API int atc_cudnn_is_available(int *); +C_API int atc_set_benchmark_cudnn(int b); + +C_API int atm_load(char *, module *); +C_API int atm_forward(tensor *, module, tensor *tensors, int ntensors); +C_API int atm_forward_(ivalue *, module, + ivalue *ivalues, + int nivalues); +C_API int atm_free(module); + +C_API int ati_tensor(ivalue *, tensor); +C_API int ati_int(ivalue *, int64_t); +C_API int ati_double(ivalue *, double); +C_API int ati_tuple(ivalue *, ivalue *, int); + +C_API int ati_to_tensor(tensor *, ivalue); +C_API int ati_to_int(int64_t *, ivalue); +C_API int ati_to_double(double *, ivalue); +C_API int ati_tuple_length(int *, ivalue); +C_API int ati_to_tuple(ivalue, ivalue *, int); + +C_API int ati_tag(int *, ivalue); + +C_API int ati_free(ivalue); #include "torch_api_generated.h" diff --git a/build/torch_api_generated.cpp.h b/build/torch_api_generated.cpp.h index 36a44c4b..dc03f9a3 100644 --- a/build/torch_api_generated.cpp.h +++ b/build/torch_api_generated.cpp.h @@ -1,6 +1,6 @@ // THIS FILE IS AUTOMATICALLY GENERATED, DO NOT EDIT BY HAND! -int atg_abs(tensor *out__, tensor self) { +C_API int atg_abs(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::abs(*self); out__[0] = new torch::Tensor(outputs__); @@ -9,7 +9,7 @@ int atg_abs(tensor *out__, tensor self) { return 1; } -int atg_abs_(tensor *out__, tensor self) { +C_API int atg_abs_(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::abs_(*self); out__[0] = new torch::Tensor(outputs__); @@ -18,7 +18,7 @@ int atg_abs_(tensor *out__, tensor self) { return 1; } -int atg_abs_out(tensor *out__, tensor out, tensor self) { +C_API int atg_abs_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::abs_out(*out, *self); out__[0] = new torch::Tensor(outputs__); @@ -27,7 +27,7 @@ int atg_abs_out(tensor *out__, tensor out, tensor self) { return 1; } -int atg_acos(tensor *out__, tensor self) { +C_API int atg_acos(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::acos(*self); out__[0] = new torch::Tensor(outputs__); @@ -36,7 +36,7 @@ int atg_acos(tensor *out__, tensor self) { return 1; } -int atg_acos_(tensor *out__, tensor self) { +C_API int atg_acos_(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::acos_(*self); out__[0] = new torch::Tensor(outputs__); @@ -45,7 +45,7 @@ int atg_acos_(tensor *out__, tensor self) { return 1; } -int atg_acos_out(tensor *out__, tensor out, tensor self) { +C_API int atg_acos_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::acos_out(*out, *self); out__[0] = new torch::Tensor(outputs__); @@ -54,7 +54,7 @@ int atg_acos_out(tensor *out__, tensor out, tensor self) { return 1; } -int atg_adaptive_avg_pool1d(tensor *out__, tensor self, int64_t *output_size_data, int output_size_len) { +C_API int atg_adaptive_avg_pool1d(tensor *out__, tensor self, int64_t *output_size_data, int output_size_len) { PROTECT( auto outputs__ = torch::adaptive_avg_pool1d(*self, torch::IntArrayRef(output_size_data, output_size_len)); out__[0] = new torch::Tensor(outputs__); @@ -63,7 +63,7 @@ int atg_adaptive_avg_pool1d(tensor *out__, tensor self, int64_t *output_size_dat return 1; } -int atg_adaptive_avg_pool2d(tensor *out__, tensor self, int64_t *output_size_data, int output_size_len) { +C_API int atg_adaptive_avg_pool2d(tensor *out__, tensor self, int64_t *output_size_data, int output_size_len) { PROTECT( auto outputs__ = torch::adaptive_avg_pool2d(*self, torch::IntArrayRef(output_size_data, output_size_len)); out__[0] = new torch::Tensor(outputs__); @@ -72,7 +72,7 @@ int atg_adaptive_avg_pool2d(tensor *out__, tensor self, int64_t *output_size_dat return 1; } -int atg_adaptive_avg_pool2d_out(tensor *out__, tensor out, tensor self, int64_t *output_size_data, int output_size_len) { +C_API int atg_adaptive_avg_pool2d_out(tensor *out__, tensor out, tensor self, int64_t *output_size_data, int output_size_len) { PROTECT( auto outputs__ = torch::adaptive_avg_pool2d_out(*out, *self, torch::IntArrayRef(output_size_data, output_size_len)); out__[0] = new torch::Tensor(outputs__); @@ -81,7 +81,7 @@ int atg_adaptive_avg_pool2d_out(tensor *out__, tensor out, tensor self, int64_t return 1; } -int atg_adaptive_avg_pool3d(tensor *out__, tensor self, int64_t *output_size_data, int output_size_len) { +C_API int atg_adaptive_avg_pool3d(tensor *out__, tensor self, int64_t *output_size_data, int output_size_len) { PROTECT( auto outputs__ = torch::adaptive_avg_pool3d(*self, torch::IntArrayRef(output_size_data, output_size_len)); out__[0] = new torch::Tensor(outputs__); @@ -90,7 +90,7 @@ int atg_adaptive_avg_pool3d(tensor *out__, tensor self, int64_t *output_size_dat return 1; } -int atg_adaptive_avg_pool3d_backward(tensor *out__, tensor grad_output, tensor self) { +C_API int atg_adaptive_avg_pool3d_backward(tensor *out__, tensor grad_output, tensor self) { PROTECT( auto outputs__ = torch::adaptive_avg_pool3d_backward(*grad_output, *self); out__[0] = new torch::Tensor(outputs__); @@ -99,7 +99,7 @@ int atg_adaptive_avg_pool3d_backward(tensor *out__, tensor grad_output, tensor s return 1; } -int atg_adaptive_avg_pool3d_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self) { +C_API int atg_adaptive_avg_pool3d_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self) { PROTECT( auto outputs__ = torch::adaptive_avg_pool3d_backward_out(*grad_input, *grad_output, *self); out__[0] = new torch::Tensor(outputs__); @@ -108,7 +108,7 @@ int atg_adaptive_avg_pool3d_backward_out(tensor *out__, tensor grad_input, tenso return 1; } -int atg_adaptive_avg_pool3d_out(tensor *out__, tensor out, tensor self, int64_t *output_size_data, int output_size_len) { +C_API int atg_adaptive_avg_pool3d_out(tensor *out__, tensor out, tensor self, int64_t *output_size_data, int output_size_len) { PROTECT( auto outputs__ = torch::adaptive_avg_pool3d_out(*out, *self, torch::IntArrayRef(output_size_data, output_size_len)); out__[0] = new torch::Tensor(outputs__); @@ -117,7 +117,7 @@ int atg_adaptive_avg_pool3d_out(tensor *out__, tensor out, tensor self, int64_t return 1; } -int atg_adaptive_max_pool1d(tensor *out__, tensor self, int64_t *output_size_data, int output_size_len) { +C_API int atg_adaptive_max_pool1d(tensor *out__, tensor self, int64_t *output_size_data, int output_size_len) { PROTECT( auto outputs__ = torch::adaptive_max_pool1d(*self, torch::IntArrayRef(output_size_data, output_size_len)); out__[0] = new torch::Tensor(std::get<0>(outputs__)); @@ -127,7 +127,7 @@ int atg_adaptive_max_pool1d(tensor *out__, tensor self, int64_t *output_size_dat return 1; } -int atg_adaptive_max_pool2d(tensor *out__, tensor self, int64_t *output_size_data, int output_size_len) { +C_API int atg_adaptive_max_pool2d(tensor *out__, tensor self, int64_t *output_size_data, int output_size_len) { PROTECT( auto outputs__ = torch::adaptive_max_pool2d(*self, torch::IntArrayRef(output_size_data, output_size_len)); out__[0] = new torch::Tensor(std::get<0>(outputs__)); @@ -137,7 +137,7 @@ int atg_adaptive_max_pool2d(tensor *out__, tensor self, int64_t *output_size_dat return 1; } -int atg_adaptive_max_pool2d_backward(tensor *out__, tensor grad_output, tensor self, tensor indices) { +C_API int atg_adaptive_max_pool2d_backward(tensor *out__, tensor grad_output, tensor self, tensor indices) { PROTECT( auto outputs__ = torch::adaptive_max_pool2d_backward(*grad_output, *self, *indices); out__[0] = new torch::Tensor(outputs__); @@ -146,7 +146,7 @@ int atg_adaptive_max_pool2d_backward(tensor *out__, tensor grad_output, tensor s return 1; } -int atg_adaptive_max_pool2d_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self, tensor indices) { +C_API int atg_adaptive_max_pool2d_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self, tensor indices) { PROTECT( auto outputs__ = torch::adaptive_max_pool2d_backward_out(*grad_input, *grad_output, *self, *indices); out__[0] = new torch::Tensor(outputs__); @@ -155,7 +155,7 @@ int atg_adaptive_max_pool2d_backward_out(tensor *out__, tensor grad_input, tenso return 1; } -int atg_adaptive_max_pool2d_out(tensor *out__, tensor out, tensor indices, tensor self, int64_t *output_size_data, int output_size_len) { +C_API int atg_adaptive_max_pool2d_out(tensor *out__, tensor out, tensor indices, tensor self, int64_t *output_size_data, int output_size_len) { PROTECT( auto outputs__ = torch::adaptive_max_pool2d_out(*out, *indices, *self, torch::IntArrayRef(output_size_data, output_size_len)); out__[0] = new torch::Tensor(std::get<0>(outputs__)); @@ -165,7 +165,7 @@ int atg_adaptive_max_pool2d_out(tensor *out__, tensor out, tensor indices, tenso return 1; } -int atg_adaptive_max_pool3d(tensor *out__, tensor self, int64_t *output_size_data, int output_size_len) { +C_API int atg_adaptive_max_pool3d(tensor *out__, tensor self, int64_t *output_size_data, int output_size_len) { PROTECT( auto outputs__ = torch::adaptive_max_pool3d(*self, torch::IntArrayRef(output_size_data, output_size_len)); out__[0] = new torch::Tensor(std::get<0>(outputs__)); @@ -175,7 +175,7 @@ int atg_adaptive_max_pool3d(tensor *out__, tensor self, int64_t *output_size_dat return 1; } -int atg_adaptive_max_pool3d_backward(tensor *out__, tensor grad_output, tensor self, tensor indices) { +C_API int atg_adaptive_max_pool3d_backward(tensor *out__, tensor grad_output, tensor self, tensor indices) { PROTECT( auto outputs__ = torch::adaptive_max_pool3d_backward(*grad_output, *self, *indices); out__[0] = new torch::Tensor(outputs__); @@ -184,7 +184,7 @@ int atg_adaptive_max_pool3d_backward(tensor *out__, tensor grad_output, tensor s return 1; } -int atg_adaptive_max_pool3d_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self, tensor indices) { +C_API int atg_adaptive_max_pool3d_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self, tensor indices) { PROTECT( auto outputs__ = torch::adaptive_max_pool3d_backward_out(*grad_input, *grad_output, *self, *indices); out__[0] = new torch::Tensor(outputs__); @@ -193,7 +193,7 @@ int atg_adaptive_max_pool3d_backward_out(tensor *out__, tensor grad_input, tenso return 1; } -int atg_adaptive_max_pool3d_out(tensor *out__, tensor out, tensor indices, tensor self, int64_t *output_size_data, int output_size_len) { +C_API int atg_adaptive_max_pool3d_out(tensor *out__, tensor out, tensor indices, tensor self, int64_t *output_size_data, int output_size_len) { PROTECT( auto outputs__ = torch::adaptive_max_pool3d_out(*out, *indices, *self, torch::IntArrayRef(output_size_data, output_size_len)); out__[0] = new torch::Tensor(std::get<0>(outputs__)); @@ -203,7 +203,7 @@ int atg_adaptive_max_pool3d_out(tensor *out__, tensor out, tensor indices, tenso return 1; } -int atg_add(tensor *out__, tensor self, tensor other) { +C_API int atg_add(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = torch::add(*self, *other); out__[0] = new torch::Tensor(outputs__); @@ -212,7 +212,7 @@ int atg_add(tensor *out__, tensor self, tensor other) { return 1; } -int atg_add1(tensor *out__, tensor self, scalar other) { +C_API int atg_add1(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = torch::add(*self, *other); out__[0] = new torch::Tensor(outputs__); @@ -221,7 +221,7 @@ int atg_add1(tensor *out__, tensor self, scalar other) { return 1; } -int atg_add_(tensor *out__, tensor self, tensor other) { +C_API int atg_add_(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = self->add_(*other); out__[0] = new torch::Tensor(outputs__); @@ -230,7 +230,7 @@ int atg_add_(tensor *out__, tensor self, tensor other) { return 1; } -int atg_add_1(tensor *out__, tensor self, scalar other) { +C_API int atg_add_1(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = self->add_(*other); out__[0] = new torch::Tensor(outputs__); @@ -239,7 +239,7 @@ int atg_add_1(tensor *out__, tensor self, scalar other) { return 1; } -int atg_add_out(tensor *out__, tensor out, tensor self, tensor other) { +C_API int atg_add_out(tensor *out__, tensor out, tensor self, tensor other) { PROTECT( auto outputs__ = torch::add_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); @@ -248,7 +248,7 @@ int atg_add_out(tensor *out__, tensor out, tensor self, tensor other) { return 1; } -int atg_addbmm(tensor *out__, tensor self, tensor batch1, tensor batch2) { +C_API int atg_addbmm(tensor *out__, tensor self, tensor batch1, tensor batch2) { PROTECT( auto outputs__ = torch::addbmm(*self, *batch1, *batch2); out__[0] = new torch::Tensor(outputs__); @@ -257,7 +257,7 @@ int atg_addbmm(tensor *out__, tensor self, tensor batch1, tensor batch2) { return 1; } -int atg_addbmm_(tensor *out__, tensor self, tensor batch1, tensor batch2) { +C_API int atg_addbmm_(tensor *out__, tensor self, tensor batch1, tensor batch2) { PROTECT( auto outputs__ = self->addbmm_(*batch1, *batch2); out__[0] = new torch::Tensor(outputs__); @@ -266,7 +266,7 @@ int atg_addbmm_(tensor *out__, tensor self, tensor batch1, tensor batch2) { return 1; } -int atg_addbmm_out(tensor *out__, tensor out, tensor self, tensor batch1, tensor batch2) { +C_API int atg_addbmm_out(tensor *out__, tensor out, tensor self, tensor batch1, tensor batch2) { PROTECT( auto outputs__ = torch::addbmm_out(*out, *self, *batch1, *batch2); out__[0] = new torch::Tensor(outputs__); @@ -275,7 +275,7 @@ int atg_addbmm_out(tensor *out__, tensor out, tensor self, tensor batch1, tensor return 1; } -int atg_addcdiv(tensor *out__, tensor self, tensor tensor1, tensor tensor2) { +C_API int atg_addcdiv(tensor *out__, tensor self, tensor tensor1, tensor tensor2) { PROTECT( auto outputs__ = torch::addcdiv(*self, *tensor1, *tensor2); out__[0] = new torch::Tensor(outputs__); @@ -284,7 +284,7 @@ int atg_addcdiv(tensor *out__, tensor self, tensor tensor1, tensor tensor2) { return 1; } -int atg_addcdiv_(tensor *out__, tensor self, tensor tensor1, tensor tensor2) { +C_API int atg_addcdiv_(tensor *out__, tensor self, tensor tensor1, tensor tensor2) { PROTECT( auto outputs__ = self->addcdiv_(*tensor1, *tensor2); out__[0] = new torch::Tensor(outputs__); @@ -293,7 +293,7 @@ int atg_addcdiv_(tensor *out__, tensor self, tensor tensor1, tensor tensor2) { return 1; } -int atg_addcdiv_out(tensor *out__, tensor out, tensor self, tensor tensor1, tensor tensor2) { +C_API int atg_addcdiv_out(tensor *out__, tensor out, tensor self, tensor tensor1, tensor tensor2) { PROTECT( auto outputs__ = torch::addcdiv_out(*out, *self, *tensor1, *tensor2); out__[0] = new torch::Tensor(outputs__); @@ -302,7 +302,7 @@ int atg_addcdiv_out(tensor *out__, tensor out, tensor self, tensor tensor1, tens return 1; } -int atg_addcmul(tensor *out__, tensor self, tensor tensor1, tensor tensor2) { +C_API int atg_addcmul(tensor *out__, tensor self, tensor tensor1, tensor tensor2) { PROTECT( auto outputs__ = torch::addcmul(*self, *tensor1, *tensor2); out__[0] = new torch::Tensor(outputs__); @@ -311,7 +311,7 @@ int atg_addcmul(tensor *out__, tensor self, tensor tensor1, tensor tensor2) { return 1; } -int atg_addcmul_(tensor *out__, tensor self, tensor tensor1, tensor tensor2) { +C_API int atg_addcmul_(tensor *out__, tensor self, tensor tensor1, tensor tensor2) { PROTECT( auto outputs__ = self->addcmul_(*tensor1, *tensor2); out__[0] = new torch::Tensor(outputs__); @@ -320,7 +320,7 @@ int atg_addcmul_(tensor *out__, tensor self, tensor tensor1, tensor tensor2) { return 1; } -int atg_addcmul_out(tensor *out__, tensor out, tensor self, tensor tensor1, tensor tensor2) { +C_API int atg_addcmul_out(tensor *out__, tensor out, tensor self, tensor tensor1, tensor tensor2) { PROTECT( auto outputs__ = torch::addcmul_out(*out, *self, *tensor1, *tensor2); out__[0] = new torch::Tensor(outputs__); @@ -329,7 +329,7 @@ int atg_addcmul_out(tensor *out__, tensor out, tensor self, tensor tensor1, tens return 1; } -int atg_addmm(tensor *out__, tensor self, tensor mat1, tensor mat2) { +C_API int atg_addmm(tensor *out__, tensor self, tensor mat1, tensor mat2) { PROTECT( auto outputs__ = torch::addmm(*self, *mat1, *mat2); out__[0] = new torch::Tensor(outputs__); @@ -338,7 +338,7 @@ int atg_addmm(tensor *out__, tensor self, tensor mat1, tensor mat2) { return 1; } -int atg_addmm_(tensor *out__, tensor self, tensor mat1, tensor mat2) { +C_API int atg_addmm_(tensor *out__, tensor self, tensor mat1, tensor mat2) { PROTECT( auto outputs__ = self->addmm_(*mat1, *mat2); out__[0] = new torch::Tensor(outputs__); @@ -347,7 +347,7 @@ int atg_addmm_(tensor *out__, tensor self, tensor mat1, tensor mat2) { return 1; } -int atg_addmm_out(tensor *out__, tensor out, tensor self, tensor mat1, tensor mat2) { +C_API int atg_addmm_out(tensor *out__, tensor out, tensor self, tensor mat1, tensor mat2) { PROTECT( auto outputs__ = torch::addmm_out(*out, *self, *mat1, *mat2); out__[0] = new torch::Tensor(outputs__); @@ -356,7 +356,7 @@ int atg_addmm_out(tensor *out__, tensor out, tensor self, tensor mat1, tensor ma return 1; } -int atg_addmv(tensor *out__, tensor self, tensor mat, tensor vec) { +C_API int atg_addmv(tensor *out__, tensor self, tensor mat, tensor vec) { PROTECT( auto outputs__ = torch::addmv(*self, *mat, *vec); out__[0] = new torch::Tensor(outputs__); @@ -365,7 +365,7 @@ int atg_addmv(tensor *out__, tensor self, tensor mat, tensor vec) { return 1; } -int atg_addmv_(tensor *out__, tensor self, tensor mat, tensor vec) { +C_API int atg_addmv_(tensor *out__, tensor self, tensor mat, tensor vec) { PROTECT( auto outputs__ = torch::addmv_(*self, *mat, *vec); out__[0] = new torch::Tensor(outputs__); @@ -374,7 +374,7 @@ int atg_addmv_(tensor *out__, tensor self, tensor mat, tensor vec) { return 1; } -int atg_addmv_out(tensor *out__, tensor out, tensor self, tensor mat, tensor vec) { +C_API int atg_addmv_out(tensor *out__, tensor out, tensor self, tensor mat, tensor vec) { PROTECT( auto outputs__ = torch::addmv_out(*out, *self, *mat, *vec); out__[0] = new torch::Tensor(outputs__); @@ -383,7 +383,7 @@ int atg_addmv_out(tensor *out__, tensor out, tensor self, tensor mat, tensor vec return 1; } -int atg_addr(tensor *out__, tensor self, tensor vec1, tensor vec2) { +C_API int atg_addr(tensor *out__, tensor self, tensor vec1, tensor vec2) { PROTECT( auto outputs__ = torch::addr(*self, *vec1, *vec2); out__[0] = new torch::Tensor(outputs__); @@ -392,7 +392,7 @@ int atg_addr(tensor *out__, tensor self, tensor vec1, tensor vec2) { return 1; } -int atg_addr_(tensor *out__, tensor self, tensor vec1, tensor vec2) { +C_API int atg_addr_(tensor *out__, tensor self, tensor vec1, tensor vec2) { PROTECT( auto outputs__ = self->addr_(*vec1, *vec2); out__[0] = new torch::Tensor(outputs__); @@ -401,7 +401,7 @@ int atg_addr_(tensor *out__, tensor self, tensor vec1, tensor vec2) { return 1; } -int atg_addr_out(tensor *out__, tensor out, tensor self, tensor vec1, tensor vec2) { +C_API int atg_addr_out(tensor *out__, tensor out, tensor self, tensor vec1, tensor vec2) { PROTECT( auto outputs__ = torch::addr_out(*out, *self, *vec1, *vec2); out__[0] = new torch::Tensor(outputs__); @@ -410,7 +410,7 @@ int atg_addr_out(tensor *out__, tensor out, tensor self, tensor vec1, tensor vec return 1; } -int atg_affine_grid_generator(tensor *out__, tensor theta, int64_t *size_data, int size_len, int align_corners) { +C_API int atg_affine_grid_generator(tensor *out__, tensor theta, int64_t *size_data, int size_len, int align_corners) { PROTECT( auto outputs__ = torch::affine_grid_generator(*theta, torch::IntArrayRef(size_data, size_len), (bool)align_corners); out__[0] = new torch::Tensor(outputs__); @@ -419,7 +419,7 @@ int atg_affine_grid_generator(tensor *out__, tensor theta, int64_t *size_data, i return 1; } -int atg_affine_grid_generator_backward(tensor *out__, tensor grad, int64_t *size_data, int size_len, int align_corners) { +C_API int atg_affine_grid_generator_backward(tensor *out__, tensor grad, int64_t *size_data, int size_len, int align_corners) { PROTECT( auto outputs__ = torch::affine_grid_generator_backward(*grad, torch::IntArrayRef(size_data, size_len), (bool)align_corners); out__[0] = new torch::Tensor(outputs__); @@ -428,7 +428,7 @@ int atg_affine_grid_generator_backward(tensor *out__, tensor grad, int64_t *size return 1; } -int atg_alias(tensor *out__, tensor self) { +C_API int atg_alias(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::alias(*self); out__[0] = new torch::Tensor(outputs__); @@ -437,7 +437,7 @@ int atg_alias(tensor *out__, tensor self) { return 1; } -int atg_align_as(tensor *out__, tensor self, tensor other) { +C_API int atg_align_as(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = self->align_as(*other); out__[0] = new torch::Tensor(outputs__); @@ -446,7 +446,7 @@ int atg_align_as(tensor *out__, tensor self, tensor other) { return 1; } -int atg_align_tensors(tensor *out__, tensor *tensors_data, int tensors_len) { +C_API int atg_align_tensors(tensor *out__, tensor *tensors_data, int tensors_len) { PROTECT( auto outputs__ = torch::align_tensors(of_carray_tensor(tensors_data, tensors_len)); int sz = outputs__.size(); @@ -460,7 +460,7 @@ int atg_align_tensors(tensor *out__, tensor *tensors_data, int tensors_len) { return 1; } -int atg_all(tensor *out__, tensor self) { +C_API int atg_all(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::all(*self); out__[0] = new torch::Tensor(outputs__); @@ -469,7 +469,7 @@ int atg_all(tensor *out__, tensor self) { return 1; } -int atg_all1(tensor *out__, tensor self, int64_t dim, int keepdim) { +C_API int atg_all1(tensor *out__, tensor self, int64_t dim, int keepdim) { PROTECT( auto outputs__ = torch::all(*self, dim, (bool)keepdim); out__[0] = new torch::Tensor(outputs__); @@ -478,7 +478,7 @@ int atg_all1(tensor *out__, tensor self, int64_t dim, int keepdim) { return 1; } -int atg_all_out(tensor *out__, tensor out, tensor self, int64_t dim, int keepdim) { +C_API int atg_all_out(tensor *out__, tensor out, tensor self, int64_t dim, int keepdim) { PROTECT( auto outputs__ = torch::all_out(*out, *self, dim, (bool)keepdim); out__[0] = new torch::Tensor(outputs__); @@ -487,7 +487,7 @@ int atg_all_out(tensor *out__, tensor out, tensor self, int64_t dim, int keepdim return 1; } -int atg_alpha_dropout(tensor *out__, tensor input, double p, int train) { +C_API int atg_alpha_dropout(tensor *out__, tensor input, double p, int train) { PROTECT( auto outputs__ = torch::alpha_dropout(*input, p, (bool)train); out__[0] = new torch::Tensor(outputs__); @@ -496,7 +496,7 @@ int atg_alpha_dropout(tensor *out__, tensor input, double p, int train) { return 1; } -int atg_alpha_dropout_(tensor *out__, tensor self, double p, int train) { +C_API int atg_alpha_dropout_(tensor *out__, tensor self, double p, int train) { PROTECT( auto outputs__ = torch::alpha_dropout_(*self, p, (bool)train); out__[0] = new torch::Tensor(outputs__); @@ -505,7 +505,7 @@ int atg_alpha_dropout_(tensor *out__, tensor self, double p, int train) { return 1; } -int atg_angle(tensor *out__, tensor self) { +C_API int atg_angle(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::angle(*self); out__[0] = new torch::Tensor(outputs__); @@ -514,7 +514,7 @@ int atg_angle(tensor *out__, tensor self) { return 1; } -int atg_angle_out(tensor *out__, tensor out, tensor self) { +C_API int atg_angle_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::angle_out(*out, *self); out__[0] = new torch::Tensor(outputs__); @@ -523,7 +523,7 @@ int atg_angle_out(tensor *out__, tensor out, tensor self) { return 1; } -int atg_any(tensor *out__, tensor self) { +C_API int atg_any(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::any(*self); out__[0] = new torch::Tensor(outputs__); @@ -532,7 +532,7 @@ int atg_any(tensor *out__, tensor self) { return 1; } -int atg_any1(tensor *out__, tensor self, int64_t dim, int keepdim) { +C_API int atg_any1(tensor *out__, tensor self, int64_t dim, int keepdim) { PROTECT( auto outputs__ = torch::any(*self, dim, (bool)keepdim); out__[0] = new torch::Tensor(outputs__); @@ -541,7 +541,7 @@ int atg_any1(tensor *out__, tensor self, int64_t dim, int keepdim) { return 1; } -int atg_any_out(tensor *out__, tensor out, tensor self, int64_t dim, int keepdim) { +C_API int atg_any_out(tensor *out__, tensor out, tensor self, int64_t dim, int keepdim) { PROTECT( auto outputs__ = torch::any_out(*out, *self, dim, (bool)keepdim); out__[0] = new torch::Tensor(outputs__); @@ -550,7 +550,7 @@ int atg_any_out(tensor *out__, tensor out, tensor self, int64_t dim, int keepdim return 1; } -int atg_arange(tensor *out__, scalar end, int options_kind, int options_device) { +C_API int atg_arange(tensor *out__, scalar end, int options_kind, int options_device) { PROTECT( auto outputs__ = torch::arange(*end, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); @@ -559,7 +559,7 @@ int atg_arange(tensor *out__, scalar end, int options_kind, int options_device) return 1; } -int atg_arange1(tensor *out__, scalar start, scalar end, int options_kind, int options_device) { +C_API int atg_arange1(tensor *out__, scalar start, scalar end, int options_kind, int options_device) { PROTECT( auto outputs__ = torch::arange(*start, *end, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); @@ -568,7 +568,7 @@ int atg_arange1(tensor *out__, scalar start, scalar end, int options_kind, int o return 1; } -int atg_arange2(tensor *out__, scalar start, scalar end, scalar step, int options_kind, int options_device) { +C_API int atg_arange2(tensor *out__, scalar start, scalar end, scalar step, int options_kind, int options_device) { PROTECT( auto outputs__ = torch::arange(*start, *end, *step, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); @@ -577,7 +577,7 @@ int atg_arange2(tensor *out__, scalar start, scalar end, scalar step, int option return 1; } -int atg_arange_out(tensor *out__, tensor out, scalar end) { +C_API int atg_arange_out(tensor *out__, tensor out, scalar end) { PROTECT( auto outputs__ = torch::arange_out(*out, *end); out__[0] = new torch::Tensor(outputs__); @@ -586,7 +586,7 @@ int atg_arange_out(tensor *out__, tensor out, scalar end) { return 1; } -int atg_arange_out1(tensor *out__, tensor out, scalar start, scalar end) { +C_API int atg_arange_out1(tensor *out__, tensor out, scalar start, scalar end) { PROTECT( auto outputs__ = torch::arange_out(*out, *start, *end); out__[0] = new torch::Tensor(outputs__); @@ -595,7 +595,7 @@ int atg_arange_out1(tensor *out__, tensor out, scalar start, scalar end) { return 1; } -int atg_argmax(tensor *out__, tensor self, int64_t dim, int keepdim) { +C_API int atg_argmax(tensor *out__, tensor self, int64_t dim, int keepdim) { PROTECT( auto outputs__ = torch::argmax(*self, dim, (bool)keepdim); out__[0] = new torch::Tensor(outputs__); @@ -604,7 +604,7 @@ int atg_argmax(tensor *out__, tensor self, int64_t dim, int keepdim) { return 1; } -int atg_argmin(tensor *out__, tensor self, int64_t dim, int keepdim) { +C_API int atg_argmin(tensor *out__, tensor self, int64_t dim, int keepdim) { PROTECT( auto outputs__ = torch::argmin(*self, dim, (bool)keepdim); out__[0] = new torch::Tensor(outputs__); @@ -613,7 +613,7 @@ int atg_argmin(tensor *out__, tensor self, int64_t dim, int keepdim) { return 1; } -int atg_argsort(tensor *out__, tensor self, int64_t dim, int descending) { +C_API int atg_argsort(tensor *out__, tensor self, int64_t dim, int descending) { PROTECT( auto outputs__ = torch::argsort(*self, dim, (bool)descending); out__[0] = new torch::Tensor(outputs__); @@ -622,7 +622,7 @@ int atg_argsort(tensor *out__, tensor self, int64_t dim, int descending) { return 1; } -int atg_as_strided(tensor *out__, tensor self, int64_t *size_data, int size_len, int64_t *stride_data, int stride_len, int64_t storage_offset) { +C_API int atg_as_strided(tensor *out__, tensor self, int64_t *size_data, int size_len, int64_t *stride_data, int stride_len, int64_t storage_offset) { PROTECT( auto outputs__ = torch::as_strided(*self, torch::IntArrayRef(size_data, size_len), torch::IntArrayRef(stride_data, stride_len), storage_offset); out__[0] = new torch::Tensor(outputs__); @@ -631,7 +631,7 @@ int atg_as_strided(tensor *out__, tensor self, int64_t *size_data, int size_len, return 1; } -int atg_as_strided_(tensor *out__, tensor self, int64_t *size_data, int size_len, int64_t *stride_data, int stride_len, int64_t storage_offset) { +C_API int atg_as_strided_(tensor *out__, tensor self, int64_t *size_data, int size_len, int64_t *stride_data, int stride_len, int64_t storage_offset) { PROTECT( auto outputs__ = torch::as_strided_(*self, torch::IntArrayRef(size_data, size_len), torch::IntArrayRef(stride_data, stride_len), storage_offset); out__[0] = new torch::Tensor(outputs__); @@ -640,7 +640,7 @@ int atg_as_strided_(tensor *out__, tensor self, int64_t *size_data, int size_len return 1; } -int atg_asin(tensor *out__, tensor self) { +C_API int atg_asin(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::asin(*self); out__[0] = new torch::Tensor(outputs__); @@ -649,7 +649,7 @@ int atg_asin(tensor *out__, tensor self) { return 1; } -int atg_asin_(tensor *out__, tensor self) { +C_API int atg_asin_(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::asin_(*self); out__[0] = new torch::Tensor(outputs__); @@ -658,7 +658,7 @@ int atg_asin_(tensor *out__, tensor self) { return 1; } -int atg_asin_out(tensor *out__, tensor out, tensor self) { +C_API int atg_asin_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::asin_out(*out, *self); out__[0] = new torch::Tensor(outputs__); @@ -667,7 +667,7 @@ int atg_asin_out(tensor *out__, tensor out, tensor self) { return 1; } -int atg_atan(tensor *out__, tensor self) { +C_API int atg_atan(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::atan(*self); out__[0] = new torch::Tensor(outputs__); @@ -676,7 +676,7 @@ int atg_atan(tensor *out__, tensor self) { return 1; } -int atg_atan2(tensor *out__, tensor self, tensor other) { +C_API int atg_atan2(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = torch::atan2(*self, *other); out__[0] = new torch::Tensor(outputs__); @@ -685,7 +685,7 @@ int atg_atan2(tensor *out__, tensor self, tensor other) { return 1; } -int atg_atan2_(tensor *out__, tensor self, tensor other) { +C_API int atg_atan2_(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = self->atan2_(*other); out__[0] = new torch::Tensor(outputs__); @@ -694,7 +694,7 @@ int atg_atan2_(tensor *out__, tensor self, tensor other) { return 1; } -int atg_atan2_out(tensor *out__, tensor out, tensor self, tensor other) { +C_API int atg_atan2_out(tensor *out__, tensor out, tensor self, tensor other) { PROTECT( auto outputs__ = torch::atan2_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); @@ -703,7 +703,7 @@ int atg_atan2_out(tensor *out__, tensor out, tensor self, tensor other) { return 1; } -int atg_atan_(tensor *out__, tensor self) { +C_API int atg_atan_(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::atan_(*self); out__[0] = new torch::Tensor(outputs__); @@ -712,7 +712,7 @@ int atg_atan_(tensor *out__, tensor self) { return 1; } -int atg_atan_out(tensor *out__, tensor out, tensor self) { +C_API int atg_atan_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::atan_out(*out, *self); out__[0] = new torch::Tensor(outputs__); @@ -721,7 +721,7 @@ int atg_atan_out(tensor *out__, tensor out, tensor self) { return 1; } -int atg_avg_pool1d(tensor *out__, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad) { +C_API int atg_avg_pool1d(tensor *out__, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad) { PROTECT( auto outputs__ = torch::avg_pool1d(*self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), (bool)ceil_mode, (bool)count_include_pad); out__[0] = new torch::Tensor(outputs__); @@ -730,7 +730,7 @@ int atg_avg_pool1d(tensor *out__, tensor self, int64_t *kernel_size_data, int ke return 1; } -int atg_avg_pool2d(tensor *out__, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad, int64_t divisor_override) { +C_API int atg_avg_pool2d(tensor *out__, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad, int64_t divisor_override) { PROTECT( auto outputs__ = torch::avg_pool2d(*self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), (bool)ceil_mode, (bool)count_include_pad, divisor_override); out__[0] = new torch::Tensor(outputs__); @@ -739,7 +739,7 @@ int atg_avg_pool2d(tensor *out__, tensor self, int64_t *kernel_size_data, int ke return 1; } -int atg_avg_pool2d_backward(tensor *out__, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad, int64_t divisor_override) { +C_API int atg_avg_pool2d_backward(tensor *out__, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad, int64_t divisor_override) { PROTECT( auto outputs__ = torch::avg_pool2d_backward(*grad_output, *self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), (bool)ceil_mode, (bool)count_include_pad, divisor_override); out__[0] = new torch::Tensor(outputs__); @@ -748,7 +748,7 @@ int atg_avg_pool2d_backward(tensor *out__, tensor grad_output, tensor self, int6 return 1; } -int atg_avg_pool2d_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad, int64_t divisor_override) { +C_API int atg_avg_pool2d_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad, int64_t divisor_override) { PROTECT( auto outputs__ = torch::avg_pool2d_backward_out(*grad_input, *grad_output, *self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), (bool)ceil_mode, (bool)count_include_pad, divisor_override); out__[0] = new torch::Tensor(outputs__); @@ -757,7 +757,7 @@ int atg_avg_pool2d_backward_out(tensor *out__, tensor grad_input, tensor grad_ou return 1; } -int atg_avg_pool2d_out(tensor *out__, tensor out, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad, int64_t divisor_override) { +C_API int atg_avg_pool2d_out(tensor *out__, tensor out, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad, int64_t divisor_override) { PROTECT( auto outputs__ = torch::avg_pool2d_out(*out, *self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), (bool)ceil_mode, (bool)count_include_pad, divisor_override); out__[0] = new torch::Tensor(outputs__); @@ -766,7 +766,7 @@ int atg_avg_pool2d_out(tensor *out__, tensor out, tensor self, int64_t *kernel_s return 1; } -int atg_avg_pool3d(tensor *out__, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad, int64_t divisor_override) { +C_API int atg_avg_pool3d(tensor *out__, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad, int64_t divisor_override) { PROTECT( auto outputs__ = torch::avg_pool3d(*self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), (bool)ceil_mode, (bool)count_include_pad, divisor_override); out__[0] = new torch::Tensor(outputs__); @@ -775,7 +775,7 @@ int atg_avg_pool3d(tensor *out__, tensor self, int64_t *kernel_size_data, int ke return 1; } -int atg_avg_pool3d_backward(tensor *out__, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad, int64_t divisor_override) { +C_API int atg_avg_pool3d_backward(tensor *out__, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad, int64_t divisor_override) { PROTECT( auto outputs__ = torch::avg_pool3d_backward(*grad_output, *self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), (bool)ceil_mode, (bool)count_include_pad, divisor_override); out__[0] = new torch::Tensor(outputs__); @@ -784,7 +784,7 @@ int atg_avg_pool3d_backward(tensor *out__, tensor grad_output, tensor self, int6 return 1; } -int atg_avg_pool3d_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad, int64_t divisor_override) { +C_API int atg_avg_pool3d_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad, int64_t divisor_override) { PROTECT( auto outputs__ = torch::avg_pool3d_backward_out(*grad_input, *grad_output, *self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), (bool)ceil_mode, (bool)count_include_pad, divisor_override); out__[0] = new torch::Tensor(outputs__); @@ -793,7 +793,7 @@ int atg_avg_pool3d_backward_out(tensor *out__, tensor grad_input, tensor grad_ou return 1; } -int atg_avg_pool3d_out(tensor *out__, tensor out, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad, int64_t divisor_override) { +C_API int atg_avg_pool3d_out(tensor *out__, tensor out, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad, int64_t divisor_override) { PROTECT( auto outputs__ = torch::avg_pool3d_out(*out, *self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), (bool)ceil_mode, (bool)count_include_pad, divisor_override); out__[0] = new torch::Tensor(outputs__); @@ -802,7 +802,7 @@ int atg_avg_pool3d_out(tensor *out__, tensor out, tensor self, int64_t *kernel_s return 1; } -int atg_baddbmm(tensor *out__, tensor self, tensor batch1, tensor batch2) { +C_API int atg_baddbmm(tensor *out__, tensor self, tensor batch1, tensor batch2) { PROTECT( auto outputs__ = torch::baddbmm(*self, *batch1, *batch2); out__[0] = new torch::Tensor(outputs__); @@ -811,7 +811,7 @@ int atg_baddbmm(tensor *out__, tensor self, tensor batch1, tensor batch2) { return 1; } -int atg_baddbmm_(tensor *out__, tensor self, tensor batch1, tensor batch2) { +C_API int atg_baddbmm_(tensor *out__, tensor self, tensor batch1, tensor batch2) { PROTECT( auto outputs__ = self->baddbmm_(*batch1, *batch2); out__[0] = new torch::Tensor(outputs__); @@ -820,7 +820,7 @@ int atg_baddbmm_(tensor *out__, tensor self, tensor batch1, tensor batch2) { return 1; } -int atg_baddbmm_out(tensor *out__, tensor out, tensor self, tensor batch1, tensor batch2) { +C_API int atg_baddbmm_out(tensor *out__, tensor out, tensor self, tensor batch1, tensor batch2) { PROTECT( auto outputs__ = torch::baddbmm_out(*out, *self, *batch1, *batch2); out__[0] = new torch::Tensor(outputs__); @@ -829,7 +829,7 @@ int atg_baddbmm_out(tensor *out__, tensor out, tensor self, tensor batch1, tenso return 1; } -int atg_bartlett_window(tensor *out__, int64_t window_length, int options_kind, int options_device) { +C_API int atg_bartlett_window(tensor *out__, int64_t window_length, int options_kind, int options_device) { PROTECT( auto outputs__ = torch::bartlett_window(window_length, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); @@ -838,7 +838,7 @@ int atg_bartlett_window(tensor *out__, int64_t window_length, int options_kind, return 1; } -int atg_bartlett_window1(tensor *out__, int64_t window_length, int periodic, int options_kind, int options_device) { +C_API int atg_bartlett_window1(tensor *out__, int64_t window_length, int periodic, int options_kind, int options_device) { PROTECT( auto outputs__ = torch::bartlett_window(window_length, (bool)periodic, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); @@ -847,7 +847,7 @@ int atg_bartlett_window1(tensor *out__, int64_t window_length, int periodic, int return 1; } -int atg_batch_norm(tensor *out__, tensor input, tensor weight, tensor bias, tensor running_mean, tensor running_var, int training, double momentum, double eps, int cudnn_enabled) { +C_API int atg_batch_norm(tensor *out__, tensor input, tensor weight, tensor bias, tensor running_mean, tensor running_var, int training, double momentum, double eps, int cudnn_enabled) { PROTECT( auto outputs__ = torch::batch_norm(*input, (weight ? *weight : torch::Tensor()), (bias ? *bias : torch::Tensor()), (running_mean ? *running_mean : torch::Tensor()), (running_var ? *running_var : torch::Tensor()), (bool)training, momentum, eps, (bool)cudnn_enabled); out__[0] = new torch::Tensor(outputs__); @@ -856,7 +856,7 @@ int atg_batch_norm(tensor *out__, tensor input, tensor weight, tensor bias, tens return 1; } -int atg_batch_norm_backward_elemt(tensor *out__, tensor grad_out, tensor input, tensor mean, tensor invstd, tensor weight, tensor mean_dy, tensor mean_dy_xmu) { +C_API int atg_batch_norm_backward_elemt(tensor *out__, tensor grad_out, tensor input, tensor mean, tensor invstd, tensor weight, tensor mean_dy, tensor mean_dy_xmu) { PROTECT( auto outputs__ = torch::batch_norm_backward_elemt(*grad_out, *input, *mean, *invstd, (weight ? *weight : torch::Tensor()), *mean_dy, *mean_dy_xmu); out__[0] = new torch::Tensor(outputs__); @@ -865,7 +865,7 @@ int atg_batch_norm_backward_elemt(tensor *out__, tensor grad_out, tensor input, return 1; } -int atg_batch_norm_backward_reduce(tensor *out__, tensor grad_out, tensor input, tensor mean, tensor invstd, tensor weight, int input_g, int weight_g, int bias_g) { +C_API int atg_batch_norm_backward_reduce(tensor *out__, tensor grad_out, tensor input, tensor mean, tensor invstd, tensor weight, int input_g, int weight_g, int bias_g) { PROTECT( auto outputs__ = torch::batch_norm_backward_reduce(*grad_out, *input, *mean, *invstd, (weight ? *weight : torch::Tensor()), (bool)input_g, (bool)weight_g, (bool)bias_g); out__[0] = new torch::Tensor(std::get<0>(outputs__)); @@ -877,7 +877,7 @@ int atg_batch_norm_backward_reduce(tensor *out__, tensor grad_out, tensor input, return 1; } -int atg_batch_norm_elemt(tensor *out__, tensor input, tensor weight, tensor bias, tensor mean, tensor invstd, double eps) { +C_API int atg_batch_norm_elemt(tensor *out__, tensor input, tensor weight, tensor bias, tensor mean, tensor invstd, double eps) { PROTECT( auto outputs__ = torch::batch_norm_elemt(*input, (weight ? *weight : torch::Tensor()), (bias ? *bias : torch::Tensor()), *mean, *invstd, eps); out__[0] = new torch::Tensor(outputs__); @@ -886,7 +886,7 @@ int atg_batch_norm_elemt(tensor *out__, tensor input, tensor weight, tensor bias return 1; } -int atg_batch_norm_elemt_out(tensor *out__, tensor out, tensor input, tensor weight, tensor bias, tensor mean, tensor invstd, double eps) { +C_API int atg_batch_norm_elemt_out(tensor *out__, tensor out, tensor input, tensor weight, tensor bias, tensor mean, tensor invstd, double eps) { PROTECT( auto outputs__ = torch::batch_norm_elemt_out(*out, *input, (weight ? *weight : torch::Tensor()), (bias ? *bias : torch::Tensor()), *mean, *invstd, eps); out__[0] = new torch::Tensor(outputs__); @@ -895,7 +895,7 @@ int atg_batch_norm_elemt_out(tensor *out__, tensor out, tensor input, tensor wei return 1; } -int atg_batch_norm_gather_stats(tensor *out__, tensor input, tensor mean, tensor invstd, tensor running_mean, tensor running_var, double momentum, double eps, int64_t count) { +C_API int atg_batch_norm_gather_stats(tensor *out__, tensor input, tensor mean, tensor invstd, tensor running_mean, tensor running_var, double momentum, double eps, int64_t count) { PROTECT( auto outputs__ = torch::batch_norm_gather_stats(*input, *mean, *invstd, (running_mean ? *running_mean : torch::Tensor()), (running_var ? *running_var : torch::Tensor()), momentum, eps, count); out__[0] = new torch::Tensor(std::get<0>(outputs__)); @@ -905,7 +905,7 @@ int atg_batch_norm_gather_stats(tensor *out__, tensor input, tensor mean, tensor return 1; } -int atg_batch_norm_gather_stats_with_counts(tensor *out__, tensor input, tensor mean, tensor invstd, tensor running_mean, tensor running_var, double momentum, double eps, int64_t *counts_data, int counts_len) { +C_API int atg_batch_norm_gather_stats_with_counts(tensor *out__, tensor input, tensor mean, tensor invstd, tensor running_mean, tensor running_var, double momentum, double eps, int64_t *counts_data, int counts_len) { PROTECT( auto outputs__ = torch::batch_norm_gather_stats_with_counts(*input, *mean, *invstd, (running_mean ? *running_mean : torch::Tensor()), (running_var ? *running_var : torch::Tensor()), momentum, eps, torch::IntArrayRef(counts_data, counts_len)); out__[0] = new torch::Tensor(std::get<0>(outputs__)); @@ -915,7 +915,7 @@ int atg_batch_norm_gather_stats_with_counts(tensor *out__, tensor input, tensor return 1; } -int atg_batch_norm_stats(tensor *out__, tensor input, double eps) { +C_API int atg_batch_norm_stats(tensor *out__, tensor input, double eps) { PROTECT( auto outputs__ = torch::batch_norm_stats(*input, eps); out__[0] = new torch::Tensor(std::get<0>(outputs__)); @@ -925,7 +925,7 @@ int atg_batch_norm_stats(tensor *out__, tensor input, double eps) { return 1; } -int atg_batch_norm_update_stats(tensor *out__, tensor input, tensor running_mean, tensor running_var, double momentum) { +C_API int atg_batch_norm_update_stats(tensor *out__, tensor input, tensor running_mean, tensor running_var, double momentum) { PROTECT( auto outputs__ = torch::batch_norm_update_stats(*input, (running_mean ? *running_mean : torch::Tensor()), (running_var ? *running_var : torch::Tensor()), momentum); out__[0] = new torch::Tensor(std::get<0>(outputs__)); @@ -935,7 +935,7 @@ int atg_batch_norm_update_stats(tensor *out__, tensor input, tensor running_mean return 1; } -int atg_bernoulli(tensor *out__, tensor self) { +C_API int atg_bernoulli(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::bernoulli(*self); out__[0] = new torch::Tensor(outputs__); @@ -944,7 +944,7 @@ int atg_bernoulli(tensor *out__, tensor self) { return 1; } -int atg_bernoulli1(tensor *out__, tensor self, double p) { +C_API int atg_bernoulli1(tensor *out__, tensor self, double p) { PROTECT( auto outputs__ = torch::bernoulli(*self, p); out__[0] = new torch::Tensor(outputs__); @@ -953,7 +953,7 @@ int atg_bernoulli1(tensor *out__, tensor self, double p) { return 1; } -int atg_bernoulli_(tensor *out__, tensor self, tensor p) { +C_API int atg_bernoulli_(tensor *out__, tensor self, tensor p) { PROTECT( auto outputs__ = self->bernoulli_(*p); out__[0] = new torch::Tensor(outputs__); @@ -962,7 +962,7 @@ int atg_bernoulli_(tensor *out__, tensor self, tensor p) { return 1; } -int atg_bernoulli_1(tensor *out__, tensor self, double p) { +C_API int atg_bernoulli_1(tensor *out__, tensor self, double p) { PROTECT( auto outputs__ = self->bernoulli_(p); out__[0] = new torch::Tensor(outputs__); @@ -971,7 +971,7 @@ int atg_bernoulli_1(tensor *out__, tensor self, double p) { return 1; } -int atg_bernoulli_out(tensor *out__, tensor out, tensor self) { +C_API int atg_bernoulli_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::bernoulli_out(*out, *self); out__[0] = new torch::Tensor(outputs__); @@ -980,7 +980,7 @@ int atg_bernoulli_out(tensor *out__, tensor out, tensor self) { return 1; } -int atg_bilinear(tensor *out__, tensor input1, tensor input2, tensor weight, tensor bias) { +C_API int atg_bilinear(tensor *out__, tensor input1, tensor input2, tensor weight, tensor bias) { PROTECT( auto outputs__ = torch::bilinear(*input1, *input2, *weight, (bias ? *bias : torch::Tensor())); out__[0] = new torch::Tensor(outputs__); @@ -989,7 +989,7 @@ int atg_bilinear(tensor *out__, tensor input1, tensor input2, tensor weight, ten return 1; } -int atg_binary_cross_entropy(tensor *out__, tensor self, tensor target, tensor weight, int64_t reduction) { +C_API int atg_binary_cross_entropy(tensor *out__, tensor self, tensor target, tensor weight, int64_t reduction) { PROTECT( auto outputs__ = torch::binary_cross_entropy(*self, *target, (weight ? *weight : torch::Tensor()), reduction); out__[0] = new torch::Tensor(outputs__); @@ -998,7 +998,7 @@ int atg_binary_cross_entropy(tensor *out__, tensor self, tensor target, tensor w return 1; } -int atg_binary_cross_entropy_backward(tensor *out__, tensor grad_output, tensor self, tensor target, tensor weight, int64_t reduction) { +C_API int atg_binary_cross_entropy_backward(tensor *out__, tensor grad_output, tensor self, tensor target, tensor weight, int64_t reduction) { PROTECT( auto outputs__ = torch::binary_cross_entropy_backward(*grad_output, *self, *target, (weight ? *weight : torch::Tensor()), reduction); out__[0] = new torch::Tensor(outputs__); @@ -1007,7 +1007,7 @@ int atg_binary_cross_entropy_backward(tensor *out__, tensor grad_output, tensor return 1; } -int atg_binary_cross_entropy_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self, tensor target, tensor weight, int64_t reduction) { +C_API int atg_binary_cross_entropy_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self, tensor target, tensor weight, int64_t reduction) { PROTECT( auto outputs__ = torch::binary_cross_entropy_backward_out(*grad_input, *grad_output, *self, *target, (weight ? *weight : torch::Tensor()), reduction); out__[0] = new torch::Tensor(outputs__); @@ -1016,7 +1016,7 @@ int atg_binary_cross_entropy_backward_out(tensor *out__, tensor grad_input, tens return 1; } -int atg_binary_cross_entropy_out(tensor *out__, tensor out, tensor self, tensor target, tensor weight, int64_t reduction) { +C_API int atg_binary_cross_entropy_out(tensor *out__, tensor out, tensor self, tensor target, tensor weight, int64_t reduction) { PROTECT( auto outputs__ = torch::binary_cross_entropy_out(*out, *self, *target, (weight ? *weight : torch::Tensor()), reduction); out__[0] = new torch::Tensor(outputs__); @@ -1025,7 +1025,7 @@ int atg_binary_cross_entropy_out(tensor *out__, tensor out, tensor self, tensor return 1; } -int atg_binary_cross_entropy_with_logits(tensor *out__, tensor self, tensor target, tensor weight, tensor pos_weight, int64_t reduction) { +C_API int atg_binary_cross_entropy_with_logits(tensor *out__, tensor self, tensor target, tensor weight, tensor pos_weight, int64_t reduction) { PROTECT( auto outputs__ = torch::binary_cross_entropy_with_logits(*self, *target, (weight ? *weight : torch::Tensor()), (pos_weight ? *pos_weight : torch::Tensor()), reduction); out__[0] = new torch::Tensor(outputs__); @@ -1034,7 +1034,7 @@ int atg_binary_cross_entropy_with_logits(tensor *out__, tensor self, tensor targ return 1; } -int atg_binary_cross_entropy_with_logits_backward(tensor *out__, tensor grad_output, tensor self, tensor target, tensor weight, tensor pos_weight, int64_t reduction) { +C_API int atg_binary_cross_entropy_with_logits_backward(tensor *out__, tensor grad_output, tensor self, tensor target, tensor weight, tensor pos_weight, int64_t reduction) { PROTECT( auto outputs__ = torch::binary_cross_entropy_with_logits_backward(*grad_output, *self, *target, (weight ? *weight : torch::Tensor()), (pos_weight ? *pos_weight : torch::Tensor()), reduction); out__[0] = new torch::Tensor(outputs__); @@ -1043,7 +1043,7 @@ int atg_binary_cross_entropy_with_logits_backward(tensor *out__, tensor grad_out return 1; } -int atg_bincount(tensor *out__, tensor self, tensor weights, int64_t minlength) { +C_API int atg_bincount(tensor *out__, tensor self, tensor weights, int64_t minlength) { PROTECT( auto outputs__ = torch::bincount(*self, (weights ? *weights : torch::Tensor()), minlength); out__[0] = new torch::Tensor(outputs__); @@ -1052,7 +1052,7 @@ int atg_bincount(tensor *out__, tensor self, tensor weights, int64_t minlength) return 1; } -int atg_bitwise_not(tensor *out__, tensor self) { +C_API int atg_bitwise_not(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::bitwise_not(*self); out__[0] = new torch::Tensor(outputs__); @@ -1061,7 +1061,7 @@ int atg_bitwise_not(tensor *out__, tensor self) { return 1; } -int atg_bitwise_not_(tensor *out__, tensor self) { +C_API int atg_bitwise_not_(tensor *out__, tensor self) { PROTECT( auto outputs__ = self->bitwise_not_(); out__[0] = new torch::Tensor(outputs__); @@ -1070,7 +1070,7 @@ int atg_bitwise_not_(tensor *out__, tensor self) { return 1; } -int atg_bitwise_not_out(tensor *out__, tensor out, tensor self) { +C_API int atg_bitwise_not_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::bitwise_not_out(*out, *self); out__[0] = new torch::Tensor(outputs__); @@ -1079,7 +1079,7 @@ int atg_bitwise_not_out(tensor *out__, tensor out, tensor self) { return 1; } -int atg_bitwise_xor(tensor *out__, tensor self, scalar other) { +C_API int atg_bitwise_xor(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = torch::bitwise_xor(*self, *other); out__[0] = new torch::Tensor(outputs__); @@ -1088,7 +1088,7 @@ int atg_bitwise_xor(tensor *out__, tensor self, scalar other) { return 1; } -int atg_bitwise_xor1(tensor *out__, tensor self, tensor other) { +C_API int atg_bitwise_xor1(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = torch::bitwise_xor(*self, *other); out__[0] = new torch::Tensor(outputs__); @@ -1097,7 +1097,7 @@ int atg_bitwise_xor1(tensor *out__, tensor self, tensor other) { return 1; } -int atg_bitwise_xor_(tensor *out__, tensor self, scalar other) { +C_API int atg_bitwise_xor_(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = self->bitwise_xor_(*other); out__[0] = new torch::Tensor(outputs__); @@ -1106,7 +1106,7 @@ int atg_bitwise_xor_(tensor *out__, tensor self, scalar other) { return 1; } -int atg_bitwise_xor_1(tensor *out__, tensor self, tensor other) { +C_API int atg_bitwise_xor_1(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = self->bitwise_xor_(*other); out__[0] = new torch::Tensor(outputs__); @@ -1115,7 +1115,7 @@ int atg_bitwise_xor_1(tensor *out__, tensor self, tensor other) { return 1; } -int atg_bitwise_xor_out(tensor *out__, tensor out, tensor self, tensor other) { +C_API int atg_bitwise_xor_out(tensor *out__, tensor out, tensor self, tensor other) { PROTECT( auto outputs__ = torch::bitwise_xor_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); @@ -1124,7 +1124,7 @@ int atg_bitwise_xor_out(tensor *out__, tensor out, tensor self, tensor other) { return 1; } -int atg_bitwise_xor_out1(tensor *out__, tensor out, tensor self, scalar other) { +C_API int atg_bitwise_xor_out1(tensor *out__, tensor out, tensor self, scalar other) { PROTECT( auto outputs__ = torch::bitwise_xor_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); @@ -1133,7 +1133,7 @@ int atg_bitwise_xor_out1(tensor *out__, tensor out, tensor self, scalar other) { return 1; } -int atg_blackman_window(tensor *out__, int64_t window_length, int options_kind, int options_device) { +C_API int atg_blackman_window(tensor *out__, int64_t window_length, int options_kind, int options_device) { PROTECT( auto outputs__ = torch::blackman_window(window_length, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); @@ -1142,7 +1142,7 @@ int atg_blackman_window(tensor *out__, int64_t window_length, int options_kind, return 1; } -int atg_blackman_window1(tensor *out__, int64_t window_length, int periodic, int options_kind, int options_device) { +C_API int atg_blackman_window1(tensor *out__, int64_t window_length, int periodic, int options_kind, int options_device) { PROTECT( auto outputs__ = torch::blackman_window(window_length, (bool)periodic, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); @@ -1151,7 +1151,7 @@ int atg_blackman_window1(tensor *out__, int64_t window_length, int periodic, int return 1; } -int atg_bmm(tensor *out__, tensor self, tensor mat2) { +C_API int atg_bmm(tensor *out__, tensor self, tensor mat2) { PROTECT( auto outputs__ = torch::bmm(*self, *mat2); out__[0] = new torch::Tensor(outputs__); @@ -1160,7 +1160,7 @@ int atg_bmm(tensor *out__, tensor self, tensor mat2) { return 1; } -int atg_bmm_out(tensor *out__, tensor out, tensor self, tensor mat2) { +C_API int atg_bmm_out(tensor *out__, tensor out, tensor self, tensor mat2) { PROTECT( auto outputs__ = torch::bmm_out(*out, *self, *mat2); out__[0] = new torch::Tensor(outputs__); @@ -1169,7 +1169,7 @@ int atg_bmm_out(tensor *out__, tensor out, tensor self, tensor mat2) { return 1; } -int atg_broadcast_tensors(tensor *out__, tensor *tensors_data, int tensors_len) { +C_API int atg_broadcast_tensors(tensor *out__, tensor *tensors_data, int tensors_len) { PROTECT( auto outputs__ = torch::broadcast_tensors(of_carray_tensor(tensors_data, tensors_len)); int sz = outputs__.size(); @@ -1193,7 +1193,7 @@ return 1; // ) // } -int atg_cartesian_prod(tensor *out__, tensor *tensors_data, int tensors_len) { +C_API int atg_cartesian_prod(tensor *out__, tensor *tensors_data, int tensors_len) { PROTECT( auto outputs__ = torch::cartesian_prod(of_carray_tensor(tensors_data, tensors_len)); out__[0] = new torch::Tensor(outputs__); @@ -1202,7 +1202,7 @@ int atg_cartesian_prod(tensor *out__, tensor *tensors_data, int tensors_len) { return 1; } -int atg_cat(tensor *out__, tensor *tensors_data, int tensors_len, int64_t dim) { +C_API int atg_cat(tensor *out__, tensor *tensors_data, int tensors_len, int64_t dim) { PROTECT( auto outputs__ = torch::cat(of_carray_tensor(tensors_data, tensors_len), dim); out__[0] = new torch::Tensor(outputs__); @@ -1211,7 +1211,7 @@ int atg_cat(tensor *out__, tensor *tensors_data, int tensors_len, int64_t dim) { return 1; } -int atg_cat_out(tensor *out__, tensor out, tensor *tensors_data, int tensors_len, int64_t dim) { +C_API int atg_cat_out(tensor *out__, tensor out, tensor *tensors_data, int tensors_len, int64_t dim) { PROTECT( auto outputs__ = torch::cat_out(*out, of_carray_tensor(tensors_data, tensors_len), dim); out__[0] = new torch::Tensor(outputs__); @@ -1220,7 +1220,7 @@ int atg_cat_out(tensor *out__, tensor out, tensor *tensors_data, int tensors_len return 1; } -int atg_cauchy_(tensor *out__, tensor self, double median, double sigma) { +C_API int atg_cauchy_(tensor *out__, tensor self, double median, double sigma) { PROTECT( auto outputs__ = self->cauchy_(median, sigma); out__[0] = new torch::Tensor(outputs__); @@ -1229,7 +1229,7 @@ int atg_cauchy_(tensor *out__, tensor self, double median, double sigma) { return 1; } -int atg_cdist(tensor *out__, tensor x1, tensor x2, double p, int64_t compute_mode) { +C_API int atg_cdist(tensor *out__, tensor x1, tensor x2, double p, int64_t compute_mode) { PROTECT( auto outputs__ = torch::cdist(*x1, *x2, p, compute_mode); out__[0] = new torch::Tensor(outputs__); @@ -1238,7 +1238,7 @@ int atg_cdist(tensor *out__, tensor x1, tensor x2, double p, int64_t compute_mod return 1; } -int atg_ceil(tensor *out__, tensor self) { +C_API int atg_ceil(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::ceil(*self); out__[0] = new torch::Tensor(outputs__); @@ -1247,7 +1247,7 @@ int atg_ceil(tensor *out__, tensor self) { return 1; } -int atg_ceil_(tensor *out__, tensor self) { +C_API int atg_ceil_(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::ceil_(*self); out__[0] = new torch::Tensor(outputs__); @@ -1256,7 +1256,7 @@ int atg_ceil_(tensor *out__, tensor self) { return 1; } -int atg_ceil_out(tensor *out__, tensor out, tensor self) { +C_API int atg_ceil_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::ceil_out(*out, *self); out__[0] = new torch::Tensor(outputs__); @@ -1265,7 +1265,7 @@ int atg_ceil_out(tensor *out__, tensor out, tensor self) { return 1; } -int atg_celu(tensor *out__, tensor self) { +C_API int atg_celu(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::celu(*self); out__[0] = new torch::Tensor(outputs__); @@ -1274,7 +1274,7 @@ int atg_celu(tensor *out__, tensor self) { return 1; } -int atg_celu_(tensor *out__, tensor self) { +C_API int atg_celu_(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::celu_(*self); out__[0] = new torch::Tensor(outputs__); @@ -1283,7 +1283,7 @@ int atg_celu_(tensor *out__, tensor self) { return 1; } -int atg_chain_matmul(tensor *out__, tensor *matrices_data, int matrices_len) { +C_API int atg_chain_matmul(tensor *out__, tensor *matrices_data, int matrices_len) { PROTECT( auto outputs__ = torch::chain_matmul(of_carray_tensor(matrices_data, matrices_len)); out__[0] = new torch::Tensor(outputs__); @@ -1292,7 +1292,7 @@ int atg_chain_matmul(tensor *out__, tensor *matrices_data, int matrices_len) { return 1; } -int atg_cholesky(tensor *out__, tensor self, int upper) { +C_API int atg_cholesky(tensor *out__, tensor self, int upper) { PROTECT( auto outputs__ = torch::cholesky(*self, (bool)upper); out__[0] = new torch::Tensor(outputs__); @@ -1301,7 +1301,7 @@ int atg_cholesky(tensor *out__, tensor self, int upper) { return 1; } -int atg_cholesky_inverse(tensor *out__, tensor self, int upper) { +C_API int atg_cholesky_inverse(tensor *out__, tensor self, int upper) { PROTECT( auto outputs__ = torch::cholesky_inverse(*self, (bool)upper); out__[0] = new torch::Tensor(outputs__); @@ -1310,7 +1310,7 @@ int atg_cholesky_inverse(tensor *out__, tensor self, int upper) { return 1; } -int atg_cholesky_inverse_out(tensor *out__, tensor out, tensor self, int upper) { +C_API int atg_cholesky_inverse_out(tensor *out__, tensor out, tensor self, int upper) { PROTECT( auto outputs__ = torch::cholesky_inverse_out(*out, *self, (bool)upper); out__[0] = new torch::Tensor(outputs__); @@ -1319,7 +1319,7 @@ int atg_cholesky_inverse_out(tensor *out__, tensor out, tensor self, int upper) return 1; } -int atg_cholesky_out(tensor *out__, tensor out, tensor self, int upper) { +C_API int atg_cholesky_out(tensor *out__, tensor out, tensor self, int upper) { PROTECT( auto outputs__ = torch::cholesky_out(*out, *self, (bool)upper); out__[0] = new torch::Tensor(outputs__); @@ -1328,7 +1328,7 @@ int atg_cholesky_out(tensor *out__, tensor out, tensor self, int upper) { return 1; } -int atg_cholesky_solve(tensor *out__, tensor self, tensor input2, int upper) { +C_API int atg_cholesky_solve(tensor *out__, tensor self, tensor input2, int upper) { PROTECT( auto outputs__ = torch::cholesky_solve(*self, *input2, (bool)upper); out__[0] = new torch::Tensor(outputs__); @@ -1337,7 +1337,7 @@ int atg_cholesky_solve(tensor *out__, tensor self, tensor input2, int upper) { return 1; } -int atg_cholesky_solve_out(tensor *out__, tensor out, tensor self, tensor input2, int upper) { +C_API int atg_cholesky_solve_out(tensor *out__, tensor out, tensor self, tensor input2, int upper) { PROTECT( auto outputs__ = torch::cholesky_solve_out(*out, *self, *input2, (bool)upper); out__[0] = new torch::Tensor(outputs__); @@ -1346,7 +1346,7 @@ int atg_cholesky_solve_out(tensor *out__, tensor out, tensor self, tensor input2 return 1; } -int atg_chunk(tensor *out__, tensor self, int64_t chunks, int64_t dim) { +C_API int atg_chunk(tensor *out__, tensor self, int64_t chunks, int64_t dim) { PROTECT( auto outputs__ = torch::chunk(*self, chunks, dim); int sz = outputs__.size(); @@ -1360,7 +1360,7 @@ int atg_chunk(tensor *out__, tensor self, int64_t chunks, int64_t dim) { return 1; } -int atg_clamp(tensor *out__, tensor self, scalar min, scalar max) { +C_API int atg_clamp(tensor *out__, tensor self, scalar min, scalar max) { PROTECT( auto outputs__ = torch::clamp(*self, *min, *max); out__[0] = new torch::Tensor(outputs__); @@ -1369,7 +1369,7 @@ int atg_clamp(tensor *out__, tensor self, scalar min, scalar max) { return 1; } -int atg_clamp_(tensor *out__, tensor self, scalar min, scalar max) { +C_API int atg_clamp_(tensor *out__, tensor self, scalar min, scalar max) { PROTECT( auto outputs__ = torch::clamp_(*self, *min, *max); out__[0] = new torch::Tensor(outputs__); @@ -1378,7 +1378,7 @@ int atg_clamp_(tensor *out__, tensor self, scalar min, scalar max) { return 1; } -int atg_clamp_max(tensor *out__, tensor self, scalar max) { +C_API int atg_clamp_max(tensor *out__, tensor self, scalar max) { PROTECT( auto outputs__ = torch::clamp_max(*self, *max); out__[0] = new torch::Tensor(outputs__); @@ -1387,7 +1387,7 @@ int atg_clamp_max(tensor *out__, tensor self, scalar max) { return 1; } -int atg_clamp_max_(tensor *out__, tensor self, scalar max) { +C_API int atg_clamp_max_(tensor *out__, tensor self, scalar max) { PROTECT( auto outputs__ = torch::clamp_max_(*self, *max); out__[0] = new torch::Tensor(outputs__); @@ -1396,7 +1396,7 @@ int atg_clamp_max_(tensor *out__, tensor self, scalar max) { return 1; } -int atg_clamp_max_out(tensor *out__, tensor out, tensor self, scalar max) { +C_API int atg_clamp_max_out(tensor *out__, tensor out, tensor self, scalar max) { PROTECT( auto outputs__ = torch::clamp_max_out(*out, *self, *max); out__[0] = new torch::Tensor(outputs__); @@ -1405,7 +1405,7 @@ int atg_clamp_max_out(tensor *out__, tensor out, tensor self, scalar max) { return 1; } -int atg_clamp_min(tensor *out__, tensor self, scalar min) { +C_API int atg_clamp_min(tensor *out__, tensor self, scalar min) { PROTECT( auto outputs__ = torch::clamp_min(*self, *min); out__[0] = new torch::Tensor(outputs__); @@ -1414,7 +1414,7 @@ int atg_clamp_min(tensor *out__, tensor self, scalar min) { return 1; } -int atg_clamp_min_(tensor *out__, tensor self, scalar min) { +C_API int atg_clamp_min_(tensor *out__, tensor self, scalar min) { PROTECT( auto outputs__ = torch::clamp_min_(*self, *min); out__[0] = new torch::Tensor(outputs__); @@ -1423,7 +1423,7 @@ int atg_clamp_min_(tensor *out__, tensor self, scalar min) { return 1; } -int atg_clamp_min_out(tensor *out__, tensor out, tensor self, scalar min) { +C_API int atg_clamp_min_out(tensor *out__, tensor out, tensor self, scalar min) { PROTECT( auto outputs__ = torch::clamp_min_out(*out, *self, *min); out__[0] = new torch::Tensor(outputs__); @@ -1432,7 +1432,7 @@ int atg_clamp_min_out(tensor *out__, tensor out, tensor self, scalar min) { return 1; } -int atg_clamp_out(tensor *out__, tensor out, tensor self, scalar min, scalar max) { +C_API int atg_clamp_out(tensor *out__, tensor out, tensor self, scalar min, scalar max) { PROTECT( auto outputs__ = torch::clamp_out(*out, *self, *min, *max); out__[0] = new torch::Tensor(outputs__); @@ -1441,7 +1441,7 @@ int atg_clamp_out(tensor *out__, tensor out, tensor self, scalar min, scalar max return 1; } -int atg_clone(tensor *out__, tensor self) { +C_API int atg_clone(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::clone(*self); out__[0] = new torch::Tensor(outputs__); @@ -1450,7 +1450,7 @@ int atg_clone(tensor *out__, tensor self) { return 1; } -int atg_coalesce(tensor *out__, tensor self) { +C_API int atg_coalesce(tensor *out__, tensor self) { PROTECT( auto outputs__ = self->coalesce(); out__[0] = new torch::Tensor(outputs__); @@ -1459,7 +1459,7 @@ int atg_coalesce(tensor *out__, tensor self) { return 1; } -int atg_col2im(tensor *out__, tensor self, int64_t *output_size_data, int output_size_len, int64_t *kernel_size_data, int kernel_size_len, int64_t *dilation_data, int dilation_len, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len) { +C_API int atg_col2im(tensor *out__, tensor self, int64_t *output_size_data, int output_size_len, int64_t *kernel_size_data, int kernel_size_len, int64_t *dilation_data, int dilation_len, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len) { PROTECT( auto outputs__ = torch::col2im(*self, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(dilation_data, dilation_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len)); out__[0] = new torch::Tensor(outputs__); @@ -1468,7 +1468,7 @@ int atg_col2im(tensor *out__, tensor self, int64_t *output_size_data, int output return 1; } -int atg_col2im_backward(tensor *out__, tensor grad_output, int64_t *kernel_size_data, int kernel_size_len, int64_t *dilation_data, int dilation_len, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len) { +C_API int atg_col2im_backward(tensor *out__, tensor grad_output, int64_t *kernel_size_data, int kernel_size_len, int64_t *dilation_data, int dilation_len, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len) { PROTECT( auto outputs__ = torch::col2im_backward(*grad_output, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(dilation_data, dilation_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len)); out__[0] = new torch::Tensor(outputs__); @@ -1477,7 +1477,7 @@ int atg_col2im_backward(tensor *out__, tensor grad_output, int64_t *kernel_size_ return 1; } -int atg_col2im_backward_out(tensor *out__, tensor grad_input, tensor grad_output, int64_t *kernel_size_data, int kernel_size_len, int64_t *dilation_data, int dilation_len, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len) { +C_API int atg_col2im_backward_out(tensor *out__, tensor grad_input, tensor grad_output, int64_t *kernel_size_data, int kernel_size_len, int64_t *dilation_data, int dilation_len, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len) { PROTECT( auto outputs__ = torch::col2im_backward_out(*grad_input, *grad_output, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(dilation_data, dilation_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len)); out__[0] = new torch::Tensor(outputs__); @@ -1486,7 +1486,7 @@ int atg_col2im_backward_out(tensor *out__, tensor grad_input, tensor grad_output return 1; } -int atg_col2im_out(tensor *out__, tensor out, tensor self, int64_t *output_size_data, int output_size_len, int64_t *kernel_size_data, int kernel_size_len, int64_t *dilation_data, int dilation_len, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len) { +C_API int atg_col2im_out(tensor *out__, tensor out, tensor self, int64_t *output_size_data, int output_size_len, int64_t *kernel_size_data, int kernel_size_len, int64_t *dilation_data, int dilation_len, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len) { PROTECT( auto outputs__ = torch::col2im_out(*out, *self, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(dilation_data, dilation_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len)); out__[0] = new torch::Tensor(outputs__); @@ -1495,7 +1495,7 @@ int atg_col2im_out(tensor *out__, tensor out, tensor self, int64_t *output_size_ return 1; } -int atg_combinations(tensor *out__, tensor self, int64_t r, int with_replacement) { +C_API int atg_combinations(tensor *out__, tensor self, int64_t r, int with_replacement) { PROTECT( auto outputs__ = torch::combinations(*self, r, (bool)with_replacement); out__[0] = new torch::Tensor(outputs__); @@ -1504,7 +1504,7 @@ int atg_combinations(tensor *out__, tensor self, int64_t r, int with_replacement return 1; } -int atg_conj(tensor *out__, tensor self) { +C_API int atg_conj(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::conj(*self); out__[0] = new torch::Tensor(outputs__); @@ -1513,7 +1513,7 @@ int atg_conj(tensor *out__, tensor self) { return 1; } -int atg_conj_out(tensor *out__, tensor out, tensor self) { +C_API int atg_conj_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::conj_out(*out, *self); out__[0] = new torch::Tensor(outputs__); @@ -1522,7 +1522,7 @@ int atg_conj_out(tensor *out__, tensor out, tensor self) { return 1; } -int atg_constant_pad_nd(tensor *out__, tensor self, int64_t *pad_data, int pad_len) { +C_API int atg_constant_pad_nd(tensor *out__, tensor self, int64_t *pad_data, int pad_len) { PROTECT( auto outputs__ = torch::constant_pad_nd(*self, torch::IntArrayRef(pad_data, pad_len)); out__[0] = new torch::Tensor(outputs__); @@ -1531,7 +1531,7 @@ int atg_constant_pad_nd(tensor *out__, tensor self, int64_t *pad_data, int pad_l return 1; } -int atg_contiguous(tensor *out__, tensor self) { +C_API int atg_contiguous(tensor *out__, tensor self) { PROTECT( auto outputs__ = self->contiguous(); out__[0] = new torch::Tensor(outputs__); @@ -1540,7 +1540,7 @@ int atg_contiguous(tensor *out__, tensor self) { return 1; } -int atg_conv1d(tensor *out__, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int64_t groups) { +C_API int atg_conv1d(tensor *out__, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int64_t groups) { PROTECT( auto outputs__ = torch::conv1d(*input, *weight, (bias ? *bias : torch::Tensor()), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), groups); out__[0] = new torch::Tensor(outputs__); @@ -1549,7 +1549,7 @@ int atg_conv1d(tensor *out__, tensor input, tensor weight, tensor bias, int64_t return 1; } -int atg_conv2d(tensor *out__, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int64_t groups) { +C_API int atg_conv2d(tensor *out__, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int64_t groups) { PROTECT( auto outputs__ = torch::conv2d(*input, *weight, (bias ? *bias : torch::Tensor()), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), groups); out__[0] = new torch::Tensor(outputs__); @@ -1558,7 +1558,7 @@ int atg_conv2d(tensor *out__, tensor input, tensor weight, tensor bias, int64_t return 1; } -int atg_conv3d(tensor *out__, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int64_t groups) { +C_API int atg_conv3d(tensor *out__, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int64_t groups) { PROTECT( auto outputs__ = torch::conv3d(*input, *weight, (bias ? *bias : torch::Tensor()), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), groups); out__[0] = new torch::Tensor(outputs__); @@ -1567,7 +1567,7 @@ int atg_conv3d(tensor *out__, tensor input, tensor weight, tensor bias, int64_t return 1; } -int atg_conv_tbc(tensor *out__, tensor self, tensor weight, tensor bias, int64_t pad) { +C_API int atg_conv_tbc(tensor *out__, tensor self, tensor weight, tensor bias, int64_t pad) { PROTECT( auto outputs__ = torch::conv_tbc(*self, *weight, *bias, pad); out__[0] = new torch::Tensor(outputs__); @@ -1576,7 +1576,7 @@ int atg_conv_tbc(tensor *out__, tensor self, tensor weight, tensor bias, int64_t return 1; } -int atg_conv_tbc_backward(tensor *out__, tensor self, tensor input, tensor weight, tensor bias, int64_t pad) { +C_API int atg_conv_tbc_backward(tensor *out__, tensor self, tensor input, tensor weight, tensor bias, int64_t pad) { PROTECT( auto outputs__ = torch::conv_tbc_backward(*self, *input, *weight, *bias, pad); out__[0] = new torch::Tensor(std::get<0>(outputs__)); @@ -1587,7 +1587,7 @@ int atg_conv_tbc_backward(tensor *out__, tensor self, tensor input, tensor weigh return 1; } -int atg_conv_transpose1d(tensor *out__, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *output_padding_data, int output_padding_len, int64_t groups, int64_t *dilation_data, int dilation_len) { +C_API int atg_conv_transpose1d(tensor *out__, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *output_padding_data, int output_padding_len, int64_t groups, int64_t *dilation_data, int dilation_len) { PROTECT( auto outputs__ = torch::conv_transpose1d(*input, *weight, (bias ? *bias : torch::Tensor()), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(output_padding_data, output_padding_len), groups, torch::IntArrayRef(dilation_data, dilation_len)); out__[0] = new torch::Tensor(outputs__); @@ -1596,7 +1596,7 @@ int atg_conv_transpose1d(tensor *out__, tensor input, tensor weight, tensor bias return 1; } -int atg_conv_transpose2d(tensor *out__, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *output_padding_data, int output_padding_len, int64_t groups, int64_t *dilation_data, int dilation_len) { +C_API int atg_conv_transpose2d(tensor *out__, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *output_padding_data, int output_padding_len, int64_t groups, int64_t *dilation_data, int dilation_len) { PROTECT( auto outputs__ = torch::conv_transpose2d(*input, *weight, (bias ? *bias : torch::Tensor()), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(output_padding_data, output_padding_len), groups, torch::IntArrayRef(dilation_data, dilation_len)); out__[0] = new torch::Tensor(outputs__); @@ -1605,7 +1605,7 @@ int atg_conv_transpose2d(tensor *out__, tensor input, tensor weight, tensor bias return 1; } -int atg_conv_transpose3d(tensor *out__, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *output_padding_data, int output_padding_len, int64_t groups, int64_t *dilation_data, int dilation_len) { +C_API int atg_conv_transpose3d(tensor *out__, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *output_padding_data, int output_padding_len, int64_t groups, int64_t *dilation_data, int dilation_len) { PROTECT( auto outputs__ = torch::conv_transpose3d(*input, *weight, (bias ? *bias : torch::Tensor()), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(output_padding_data, output_padding_len), groups, torch::IntArrayRef(dilation_data, dilation_len)); out__[0] = new torch::Tensor(outputs__); @@ -1614,7 +1614,7 @@ int atg_conv_transpose3d(tensor *out__, tensor input, tensor weight, tensor bias return 1; } -int atg_convolution(tensor *out__, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int transposed, int64_t *output_padding_data, int output_padding_len, int64_t groups) { +C_API int atg_convolution(tensor *out__, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int transposed, int64_t *output_padding_data, int output_padding_len, int64_t groups) { PROTECT( auto outputs__ = torch::convolution(*input, *weight, (bias ? *bias : torch::Tensor()), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), (bool)transposed, torch::IntArrayRef(output_padding_data, output_padding_len), groups); out__[0] = new torch::Tensor(outputs__); @@ -1623,7 +1623,7 @@ int atg_convolution(tensor *out__, tensor input, tensor weight, tensor bias, int return 1; } -int atg_convolution_overrideable(tensor *out__, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int transposed, int64_t *output_padding_data, int output_padding_len, int64_t groups) { +C_API int atg_convolution_overrideable(tensor *out__, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int transposed, int64_t *output_padding_data, int output_padding_len, int64_t groups) { PROTECT( auto outputs__ = torch::convolution_overrideable(*input, *weight, (bias ? *bias : torch::Tensor()), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), (bool)transposed, torch::IntArrayRef(output_padding_data, output_padding_len), groups); out__[0] = new torch::Tensor(outputs__); @@ -1632,7 +1632,7 @@ int atg_convolution_overrideable(tensor *out__, tensor input, tensor weight, ten return 1; } -int atg_copy_sparse_to_sparse_(tensor *out__, tensor self, tensor src, int non_blocking) { +C_API int atg_copy_sparse_to_sparse_(tensor *out__, tensor self, tensor src, int non_blocking) { PROTECT( auto outputs__ = torch::copy_sparse_to_sparse_(*self, *src, (bool)non_blocking); out__[0] = new torch::Tensor(outputs__); @@ -1641,7 +1641,7 @@ int atg_copy_sparse_to_sparse_(tensor *out__, tensor self, tensor src, int non_b return 1; } -int atg_cos(tensor *out__, tensor self) { +C_API int atg_cos(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::cos(*self); out__[0] = new torch::Tensor(outputs__); @@ -1650,7 +1650,7 @@ int atg_cos(tensor *out__, tensor self) { return 1; } -int atg_cos_(tensor *out__, tensor self) { +C_API int atg_cos_(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::cos_(*self); out__[0] = new torch::Tensor(outputs__); @@ -1659,7 +1659,7 @@ int atg_cos_(tensor *out__, tensor self) { return 1; } -int atg_cos_out(tensor *out__, tensor out, tensor self) { +C_API int atg_cos_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::cos_out(*out, *self); out__[0] = new torch::Tensor(outputs__); @@ -1668,7 +1668,7 @@ int atg_cos_out(tensor *out__, tensor out, tensor self) { return 1; } -int atg_cosh(tensor *out__, tensor self) { +C_API int atg_cosh(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::cosh(*self); out__[0] = new torch::Tensor(outputs__); @@ -1677,7 +1677,7 @@ int atg_cosh(tensor *out__, tensor self) { return 1; } -int atg_cosh_(tensor *out__, tensor self) { +C_API int atg_cosh_(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::cosh_(*self); out__[0] = new torch::Tensor(outputs__); @@ -1686,7 +1686,7 @@ int atg_cosh_(tensor *out__, tensor self) { return 1; } -int atg_cosh_out(tensor *out__, tensor out, tensor self) { +C_API int atg_cosh_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::cosh_out(*out, *self); out__[0] = new torch::Tensor(outputs__); @@ -1695,7 +1695,7 @@ int atg_cosh_out(tensor *out__, tensor out, tensor self) { return 1; } -int atg_cosine_embedding_loss(tensor *out__, tensor input1, tensor input2, tensor target, double margin, int64_t reduction) { +C_API int atg_cosine_embedding_loss(tensor *out__, tensor input1, tensor input2, tensor target, double margin, int64_t reduction) { PROTECT( auto outputs__ = torch::cosine_embedding_loss(*input1, *input2, *target, margin, reduction); out__[0] = new torch::Tensor(outputs__); @@ -1704,7 +1704,7 @@ int atg_cosine_embedding_loss(tensor *out__, tensor input1, tensor input2, tenso return 1; } -int atg_cosine_similarity(tensor *out__, tensor x1, tensor x2, int64_t dim, double eps) { +C_API int atg_cosine_similarity(tensor *out__, tensor x1, tensor x2, int64_t dim, double eps) { PROTECT( auto outputs__ = torch::cosine_similarity(*x1, *x2, dim, eps); out__[0] = new torch::Tensor(outputs__); @@ -1713,7 +1713,7 @@ int atg_cosine_similarity(tensor *out__, tensor x1, tensor x2, int64_t dim, doub return 1; } -int atg_cross(tensor *out__, tensor self, tensor other, int64_t dim) { +C_API int atg_cross(tensor *out__, tensor self, tensor other, int64_t dim) { PROTECT( auto outputs__ = torch::cross(*self, *other, dim); out__[0] = new torch::Tensor(outputs__); @@ -1722,7 +1722,7 @@ int atg_cross(tensor *out__, tensor self, tensor other, int64_t dim) { return 1; } -int atg_cross_out(tensor *out__, tensor out, tensor self, tensor other, int64_t dim) { +C_API int atg_cross_out(tensor *out__, tensor out, tensor self, tensor other, int64_t dim) { PROTECT( auto outputs__ = torch::cross_out(*out, *self, *other, dim); out__[0] = new torch::Tensor(outputs__); @@ -1731,7 +1731,7 @@ int atg_cross_out(tensor *out__, tensor out, tensor self, tensor other, int64_t return 1; } -int atg_ctc_loss(tensor *out__, tensor log_probs, tensor targets, int64_t *input_lengths_data, int input_lengths_len, int64_t *target_lengths_data, int target_lengths_len, int64_t blank, int64_t reduction, int zero_infinity) { +C_API int atg_ctc_loss(tensor *out__, tensor log_probs, tensor targets, int64_t *input_lengths_data, int input_lengths_len, int64_t *target_lengths_data, int target_lengths_len, int64_t blank, int64_t reduction, int zero_infinity) { PROTECT( auto outputs__ = torch::ctc_loss(*log_probs, *targets, torch::IntArrayRef(input_lengths_data, input_lengths_len), torch::IntArrayRef(target_lengths_data, target_lengths_len), blank, reduction, (bool)zero_infinity); out__[0] = new torch::Tensor(outputs__); @@ -1740,7 +1740,7 @@ int atg_ctc_loss(tensor *out__, tensor log_probs, tensor targets, int64_t *input return 1; } -int atg_ctc_loss1(tensor *out__, tensor log_probs, tensor targets, tensor input_lengths, tensor target_lengths, int64_t blank, int64_t reduction, int zero_infinity) { +C_API int atg_ctc_loss1(tensor *out__, tensor log_probs, tensor targets, tensor input_lengths, tensor target_lengths, int64_t blank, int64_t reduction, int zero_infinity) { PROTECT( auto outputs__ = torch::ctc_loss(*log_probs, *targets, *input_lengths, *target_lengths, blank, reduction, (bool)zero_infinity); out__[0] = new torch::Tensor(outputs__); @@ -1749,7 +1749,7 @@ int atg_ctc_loss1(tensor *out__, tensor log_probs, tensor targets, tensor input_ return 1; } -int atg_cudnn_affine_grid_generator(tensor *out__, tensor theta, int64_t n, int64_t C, int64_t H, int64_t W) { +C_API int atg_cudnn_affine_grid_generator(tensor *out__, tensor theta, int64_t n, int64_t C, int64_t H, int64_t W) { PROTECT( auto outputs__ = torch::cudnn_affine_grid_generator(*theta, n, C, H, W); out__[0] = new torch::Tensor(outputs__); @@ -1758,7 +1758,7 @@ int atg_cudnn_affine_grid_generator(tensor *out__, tensor theta, int64_t n, int6 return 1; } -int atg_cudnn_affine_grid_generator_backward(tensor *out__, tensor grad, int64_t n, int64_t C, int64_t H, int64_t W) { +C_API int atg_cudnn_affine_grid_generator_backward(tensor *out__, tensor grad, int64_t n, int64_t C, int64_t H, int64_t W) { PROTECT( auto outputs__ = torch::cudnn_affine_grid_generator_backward(*grad, n, C, H, W); out__[0] = new torch::Tensor(outputs__); @@ -1767,7 +1767,7 @@ int atg_cudnn_affine_grid_generator_backward(tensor *out__, tensor grad, int64_t return 1; } -int atg_cudnn_batch_norm(tensor *out__, tensor input, tensor weight, tensor bias, tensor running_mean, tensor running_var, int training, double exponential_average_factor, double epsilon) { +C_API int atg_cudnn_batch_norm(tensor *out__, tensor input, tensor weight, tensor bias, tensor running_mean, tensor running_var, int training, double exponential_average_factor, double epsilon) { PROTECT( auto outputs__ = torch::cudnn_batch_norm(*input, *weight, (bias ? *bias : torch::Tensor()), (running_mean ? *running_mean : torch::Tensor()), (running_var ? *running_var : torch::Tensor()), (bool)training, exponential_average_factor, epsilon); out__[0] = new torch::Tensor(std::get<0>(outputs__)); @@ -1779,7 +1779,7 @@ int atg_cudnn_batch_norm(tensor *out__, tensor input, tensor weight, tensor bias return 1; } -int atg_cudnn_batch_norm_backward(tensor *out__, tensor input, tensor grad_output, tensor weight, tensor running_mean, tensor running_var, tensor save_mean, tensor save_var, double epsilon, tensor reserveSpace) { +C_API int atg_cudnn_batch_norm_backward(tensor *out__, tensor input, tensor grad_output, tensor weight, tensor running_mean, tensor running_var, tensor save_mean, tensor save_var, double epsilon, tensor reserveSpace) { PROTECT( auto outputs__ = torch::cudnn_batch_norm_backward(*input, *grad_output, *weight, (running_mean ? *running_mean : torch::Tensor()), (running_var ? *running_var : torch::Tensor()), (save_mean ? *save_mean : torch::Tensor()), (save_var ? *save_var : torch::Tensor()), epsilon, *reserveSpace); out__[0] = new torch::Tensor(std::get<0>(outputs__)); @@ -1790,7 +1790,7 @@ int atg_cudnn_batch_norm_backward(tensor *out__, tensor input, tensor grad_outpu return 1; } -int atg_cudnn_convolution(tensor *out__, tensor self, tensor weight, tensor bias, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic) { +C_API int atg_cudnn_convolution(tensor *out__, tensor self, tensor weight, tensor bias, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic) { PROTECT( auto outputs__ = torch::cudnn_convolution(*self, *weight, (bias ? *bias : torch::Tensor()), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)benchmark, (bool)deterministic); out__[0] = new torch::Tensor(outputs__); @@ -1799,7 +1799,7 @@ int atg_cudnn_convolution(tensor *out__, tensor self, tensor weight, tensor bias return 1; } -int atg_cudnn_convolution_backward_bias(tensor *out__, tensor grad_output) { +C_API int atg_cudnn_convolution_backward_bias(tensor *out__, tensor grad_output) { PROTECT( auto outputs__ = torch::cudnn_convolution_backward_bias(*grad_output); out__[0] = new torch::Tensor(outputs__); @@ -1808,7 +1808,7 @@ int atg_cudnn_convolution_backward_bias(tensor *out__, tensor grad_output) { return 1; } -int atg_cudnn_convolution_backward_input(tensor *out__, int64_t *self_size_data, int self_size_len, tensor grad_output, tensor weight, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic) { +C_API int atg_cudnn_convolution_backward_input(tensor *out__, int64_t *self_size_data, int self_size_len, tensor grad_output, tensor weight, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic) { PROTECT( auto outputs__ = torch::cudnn_convolution_backward_input(torch::IntArrayRef(self_size_data, self_size_len), *grad_output, *weight, torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)benchmark, (bool)deterministic); out__[0] = new torch::Tensor(outputs__); @@ -1817,7 +1817,7 @@ int atg_cudnn_convolution_backward_input(tensor *out__, int64_t *self_size_data, return 1; } -int atg_cudnn_convolution_backward_weight(tensor *out__, int64_t *weight_size_data, int weight_size_len, tensor grad_output, tensor self, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic) { +C_API int atg_cudnn_convolution_backward_weight(tensor *out__, int64_t *weight_size_data, int weight_size_len, tensor grad_output, tensor self, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic) { PROTECT( auto outputs__ = torch::cudnn_convolution_backward_weight(torch::IntArrayRef(weight_size_data, weight_size_len), *grad_output, *self, torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)benchmark, (bool)deterministic); out__[0] = new torch::Tensor(outputs__); @@ -1826,7 +1826,7 @@ int atg_cudnn_convolution_backward_weight(tensor *out__, int64_t *weight_size_da return 1; } -int atg_cudnn_convolution_transpose(tensor *out__, tensor self, tensor weight, tensor bias, int64_t *padding_data, int padding_len, int64_t *output_padding_data, int output_padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic) { +C_API int atg_cudnn_convolution_transpose(tensor *out__, tensor self, tensor weight, tensor bias, int64_t *padding_data, int padding_len, int64_t *output_padding_data, int output_padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic) { PROTECT( auto outputs__ = torch::cudnn_convolution_transpose(*self, *weight, (bias ? *bias : torch::Tensor()), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(output_padding_data, output_padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)benchmark, (bool)deterministic); out__[0] = new torch::Tensor(outputs__); @@ -1835,7 +1835,7 @@ int atg_cudnn_convolution_transpose(tensor *out__, tensor self, tensor weight, t return 1; } -int atg_cudnn_convolution_transpose_backward_bias(tensor *out__, tensor grad_output) { +C_API int atg_cudnn_convolution_transpose_backward_bias(tensor *out__, tensor grad_output) { PROTECT( auto outputs__ = torch::cudnn_convolution_transpose_backward_bias(*grad_output); out__[0] = new torch::Tensor(outputs__); @@ -1844,7 +1844,7 @@ int atg_cudnn_convolution_transpose_backward_bias(tensor *out__, tensor grad_out return 1; } -int atg_cudnn_convolution_transpose_backward_input(tensor *out__, tensor grad_output, tensor weight, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic) { +C_API int atg_cudnn_convolution_transpose_backward_input(tensor *out__, tensor grad_output, tensor weight, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic) { PROTECT( auto outputs__ = torch::cudnn_convolution_transpose_backward_input(*grad_output, *weight, torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)benchmark, (bool)deterministic); out__[0] = new torch::Tensor(outputs__); @@ -1853,7 +1853,7 @@ int atg_cudnn_convolution_transpose_backward_input(tensor *out__, tensor grad_ou return 1; } -int atg_cudnn_convolution_transpose_backward_weight(tensor *out__, int64_t *weight_size_data, int weight_size_len, tensor grad_output, tensor self, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic) { +C_API int atg_cudnn_convolution_transpose_backward_weight(tensor *out__, int64_t *weight_size_data, int weight_size_len, tensor grad_output, tensor self, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic) { PROTECT( auto outputs__ = torch::cudnn_convolution_transpose_backward_weight(torch::IntArrayRef(weight_size_data, weight_size_len), *grad_output, *self, torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)benchmark, (bool)deterministic); out__[0] = new torch::Tensor(outputs__); @@ -1862,7 +1862,7 @@ int atg_cudnn_convolution_transpose_backward_weight(tensor *out__, int64_t *weig return 1; } -int atg_cudnn_grid_sampler(tensor *out__, tensor self, tensor grid) { +C_API int atg_cudnn_grid_sampler(tensor *out__, tensor self, tensor grid) { PROTECT( auto outputs__ = torch::cudnn_grid_sampler(*self, *grid); out__[0] = new torch::Tensor(outputs__); @@ -1871,7 +1871,7 @@ int atg_cudnn_grid_sampler(tensor *out__, tensor self, tensor grid) { return 1; } -int atg_cudnn_grid_sampler_backward(tensor *out__, tensor self, tensor grid, tensor grad_output) { +C_API int atg_cudnn_grid_sampler_backward(tensor *out__, tensor self, tensor grid, tensor grad_output) { PROTECT( auto outputs__ = torch::cudnn_grid_sampler_backward(*self, *grid, *grad_output); out__[0] = new torch::Tensor(std::get<0>(outputs__)); @@ -1881,7 +1881,7 @@ int atg_cudnn_grid_sampler_backward(tensor *out__, tensor self, tensor grid, ten return 1; } -int atg_cumprod(tensor *out__, tensor self, int64_t dim, int dtype) { +C_API int atg_cumprod(tensor *out__, tensor self, int64_t dim, int dtype) { PROTECT( auto outputs__ = torch::cumprod(*self, dim, torch::ScalarType(dtype)); out__[0] = new torch::Tensor(outputs__); @@ -1890,7 +1890,7 @@ int atg_cumprod(tensor *out__, tensor self, int64_t dim, int dtype) { return 1; } -int atg_cumprod_out(tensor *out__, tensor out, tensor self, int64_t dim, int dtype) { +C_API int atg_cumprod_out(tensor *out__, tensor out, tensor self, int64_t dim, int dtype) { PROTECT( auto outputs__ = torch::cumprod_out(*out, *self, dim, torch::ScalarType(dtype)); out__[0] = new torch::Tensor(outputs__); @@ -1899,7 +1899,7 @@ int atg_cumprod_out(tensor *out__, tensor out, tensor self, int64_t dim, int dty return 1; } -int atg_cumsum(tensor *out__, tensor self, int64_t dim, int dtype) { +C_API int atg_cumsum(tensor *out__, tensor self, int64_t dim, int dtype) { PROTECT( auto outputs__ = torch::cumsum(*self, dim, torch::ScalarType(dtype)); out__[0] = new torch::Tensor(outputs__); @@ -1908,7 +1908,7 @@ int atg_cumsum(tensor *out__, tensor self, int64_t dim, int dtype) { return 1; } -int atg_cumsum_out(tensor *out__, tensor out, tensor self, int64_t dim, int dtype) { +C_API int atg_cumsum_out(tensor *out__, tensor out, tensor self, int64_t dim, int dtype) { PROTECT( auto outputs__ = torch::cumsum_out(*out, *self, dim, torch::ScalarType(dtype)); out__[0] = new torch::Tensor(outputs__); @@ -1917,7 +1917,7 @@ int atg_cumsum_out(tensor *out__, tensor out, tensor self, int64_t dim, int dtyp return 1; } -int atg_data(tensor *out__, tensor self) { +C_API int atg_data(tensor *out__, tensor self) { PROTECT( auto outputs__ = self->data(); out__[0] = new torch::Tensor(outputs__); @@ -1926,7 +1926,7 @@ int atg_data(tensor *out__, tensor self) { return 1; } -int atg_dequantize(tensor *out__, tensor self) { +C_API int atg_dequantize(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::dequantize(*self); out__[0] = new torch::Tensor(outputs__); @@ -1935,7 +1935,7 @@ int atg_dequantize(tensor *out__, tensor self) { return 1; } -int atg_det(tensor *out__, tensor self) { +C_API int atg_det(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::det(*self); out__[0] = new torch::Tensor(outputs__); @@ -1944,7 +1944,7 @@ int atg_det(tensor *out__, tensor self) { return 1; } -int atg_detach(tensor *out__, tensor self) { +C_API int atg_detach(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::detach(*self); out__[0] = new torch::Tensor(outputs__); @@ -1953,7 +1953,7 @@ int atg_detach(tensor *out__, tensor self) { return 1; } -int atg_detach_(tensor *out__, tensor self) { +C_API int atg_detach_(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::detach_(*self); out__[0] = new torch::Tensor(outputs__); @@ -1962,7 +1962,7 @@ int atg_detach_(tensor *out__, tensor self) { return 1; } -int atg_diag(tensor *out__, tensor self, int64_t diagonal) { +C_API int atg_diag(tensor *out__, tensor self, int64_t diagonal) { PROTECT( auto outputs__ = torch::diag(*self, diagonal); out__[0] = new torch::Tensor(outputs__); @@ -1971,7 +1971,7 @@ int atg_diag(tensor *out__, tensor self, int64_t diagonal) { return 1; } -int atg_diag_embed(tensor *out__, tensor self, int64_t offset, int64_t dim1, int64_t dim2) { +C_API int atg_diag_embed(tensor *out__, tensor self, int64_t offset, int64_t dim1, int64_t dim2) { PROTECT( auto outputs__ = torch::diag_embed(*self, offset, dim1, dim2); out__[0] = new torch::Tensor(outputs__); @@ -1980,7 +1980,7 @@ int atg_diag_embed(tensor *out__, tensor self, int64_t offset, int64_t dim1, int return 1; } -int atg_diag_out(tensor *out__, tensor out, tensor self, int64_t diagonal) { +C_API int atg_diag_out(tensor *out__, tensor out, tensor self, int64_t diagonal) { PROTECT( auto outputs__ = torch::diag_out(*out, *self, diagonal); out__[0] = new torch::Tensor(outputs__); @@ -1989,7 +1989,7 @@ int atg_diag_out(tensor *out__, tensor out, tensor self, int64_t diagonal) { return 1; } -int atg_diagflat(tensor *out__, tensor self, int64_t offset) { +C_API int atg_diagflat(tensor *out__, tensor self, int64_t offset) { PROTECT( auto outputs__ = torch::diagflat(*self, offset); out__[0] = new torch::Tensor(outputs__); @@ -1998,7 +1998,7 @@ int atg_diagflat(tensor *out__, tensor self, int64_t offset) { return 1; } -int atg_diagonal(tensor *out__, tensor self, int64_t offset, int64_t dim1, int64_t dim2) { +C_API int atg_diagonal(tensor *out__, tensor self, int64_t offset, int64_t dim1, int64_t dim2) { PROTECT( auto outputs__ = torch::diagonal(*self, offset, dim1, dim2); out__[0] = new torch::Tensor(outputs__); @@ -2007,7 +2007,7 @@ int atg_diagonal(tensor *out__, tensor self, int64_t offset, int64_t dim1, int64 return 1; } -int atg_digamma(tensor *out__, tensor self) { +C_API int atg_digamma(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::digamma(*self); out__[0] = new torch::Tensor(outputs__); @@ -2016,7 +2016,7 @@ int atg_digamma(tensor *out__, tensor self) { return 1; } -int atg_digamma_(tensor *out__, tensor self) { +C_API int atg_digamma_(tensor *out__, tensor self) { PROTECT( auto outputs__ = self->digamma_(); out__[0] = new torch::Tensor(outputs__); @@ -2025,7 +2025,7 @@ int atg_digamma_(tensor *out__, tensor self) { return 1; } -int atg_digamma_out(tensor *out__, tensor out, tensor self) { +C_API int atg_digamma_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::digamma_out(*out, *self); out__[0] = new torch::Tensor(outputs__); @@ -2034,7 +2034,7 @@ int atg_digamma_out(tensor *out__, tensor out, tensor self) { return 1; } -int atg_dist(tensor *out__, tensor self, tensor other) { +C_API int atg_dist(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = torch::dist(*self, *other); out__[0] = new torch::Tensor(outputs__); @@ -2043,7 +2043,7 @@ int atg_dist(tensor *out__, tensor self, tensor other) { return 1; } -int atg_div(tensor *out__, tensor self, tensor other) { +C_API int atg_div(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = torch::div(*self, *other); out__[0] = new torch::Tensor(outputs__); @@ -2052,7 +2052,7 @@ int atg_div(tensor *out__, tensor self, tensor other) { return 1; } -int atg_div1(tensor *out__, tensor self, scalar other) { +C_API int atg_div1(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = torch::div(*self, *other); out__[0] = new torch::Tensor(outputs__); @@ -2061,7 +2061,7 @@ int atg_div1(tensor *out__, tensor self, scalar other) { return 1; } -int atg_div_(tensor *out__, tensor self, tensor other) { +C_API int atg_div_(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = self->div_(*other); out__[0] = new torch::Tensor(outputs__); @@ -2070,7 +2070,7 @@ int atg_div_(tensor *out__, tensor self, tensor other) { return 1; } -int atg_div_1(tensor *out__, tensor self, scalar other) { +C_API int atg_div_1(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = self->div_(*other); out__[0] = new torch::Tensor(outputs__); @@ -2079,7 +2079,7 @@ int atg_div_1(tensor *out__, tensor self, scalar other) { return 1; } -int atg_div_out(tensor *out__, tensor out, tensor self, tensor other) { +C_API int atg_div_out(tensor *out__, tensor out, tensor self, tensor other) { PROTECT( auto outputs__ = torch::div_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); @@ -2088,7 +2088,7 @@ int atg_div_out(tensor *out__, tensor out, tensor self, tensor other) { return 1; } -int atg_dot(tensor *out__, tensor self, tensor tensor) { +C_API int atg_dot(tensor *out__, tensor self, tensor tensor) { PROTECT( auto outputs__ = torch::dot(*self, *tensor); out__[0] = new torch::Tensor(outputs__); @@ -2097,7 +2097,7 @@ int atg_dot(tensor *out__, tensor self, tensor tensor) { return 1; } -int atg_dot_out(tensor *out__, tensor out, tensor self, tensor tensor) { +C_API int atg_dot_out(tensor *out__, tensor out, tensor self, tensor tensor) { PROTECT( auto outputs__ = torch::dot_out(*out, *self, *tensor); out__[0] = new torch::Tensor(outputs__); @@ -2106,7 +2106,7 @@ int atg_dot_out(tensor *out__, tensor out, tensor self, tensor tensor) { return 1; } -int atg_dropout(tensor *out__, tensor input, double p, int train) { +C_API int atg_dropout(tensor *out__, tensor input, double p, int train) { PROTECT( auto outputs__ = torch::dropout(*input, p, (bool)train); out__[0] = new torch::Tensor(outputs__); @@ -2115,7 +2115,7 @@ int atg_dropout(tensor *out__, tensor input, double p, int train) { return 1; } -int atg_dropout_(tensor *out__, tensor self, double p, int train) { +C_API int atg_dropout_(tensor *out__, tensor self, double p, int train) { PROTECT( auto outputs__ = torch::dropout_(*self, p, (bool)train); out__[0] = new torch::Tensor(outputs__); @@ -2124,7 +2124,7 @@ int atg_dropout_(tensor *out__, tensor self, double p, int train) { return 1; } -int atg_eig(tensor *out__, tensor self, int eigenvectors) { +C_API int atg_eig(tensor *out__, tensor self, int eigenvectors) { PROTECT( auto outputs__ = torch::eig(*self, (bool)eigenvectors); out__[0] = new torch::Tensor(std::get<0>(outputs__)); @@ -2134,7 +2134,7 @@ int atg_eig(tensor *out__, tensor self, int eigenvectors) { return 1; } -int atg_eig_out(tensor *out__, tensor e, tensor v, tensor self, int eigenvectors) { +C_API int atg_eig_out(tensor *out__, tensor e, tensor v, tensor self, int eigenvectors) { PROTECT( auto outputs__ = torch::eig_out(*e, *v, *self, (bool)eigenvectors); out__[0] = new torch::Tensor(std::get<0>(outputs__)); @@ -2144,7 +2144,7 @@ int atg_eig_out(tensor *out__, tensor e, tensor v, tensor self, int eigenvectors return 1; } -int atg_elu(tensor *out__, tensor self) { +C_API int atg_elu(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::elu(*self); out__[0] = new torch::Tensor(outputs__); @@ -2153,7 +2153,7 @@ int atg_elu(tensor *out__, tensor self) { return 1; } -int atg_elu_(tensor *out__, tensor self) { +C_API int atg_elu_(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::elu_(*self); out__[0] = new torch::Tensor(outputs__); @@ -2162,7 +2162,7 @@ int atg_elu_(tensor *out__, tensor self) { return 1; } -int atg_elu_backward(tensor *out__, tensor grad_output, scalar alpha, scalar scale, scalar input_scale, tensor output) { +C_API int atg_elu_backward(tensor *out__, tensor grad_output, scalar alpha, scalar scale, scalar input_scale, tensor output) { PROTECT( auto outputs__ = torch::elu_backward(*grad_output, *alpha, *scale, *input_scale, *output); out__[0] = new torch::Tensor(outputs__); @@ -2171,7 +2171,7 @@ int atg_elu_backward(tensor *out__, tensor grad_output, scalar alpha, scalar sca return 1; } -int atg_elu_backward_out(tensor *out__, tensor grad_input, tensor grad_output, scalar alpha, scalar scale, scalar input_scale, tensor output) { +C_API int atg_elu_backward_out(tensor *out__, tensor grad_input, tensor grad_output, scalar alpha, scalar scale, scalar input_scale, tensor output) { PROTECT( auto outputs__ = torch::elu_backward_out(*grad_input, *grad_output, *alpha, *scale, *input_scale, *output); out__[0] = new torch::Tensor(outputs__); @@ -2180,7 +2180,7 @@ int atg_elu_backward_out(tensor *out__, tensor grad_input, tensor grad_output, s return 1; } -int atg_elu_out(tensor *out__, tensor out, tensor self) { +C_API int atg_elu_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::elu_out(*out, *self); out__[0] = new torch::Tensor(outputs__); @@ -2189,7 +2189,7 @@ int atg_elu_out(tensor *out__, tensor out, tensor self) { return 1; } -int atg_embedding(tensor *out__, tensor weight, tensor indices, int64_t padding_idx, int scale_grad_by_freq, int sparse) { +C_API int atg_embedding(tensor *out__, tensor weight, tensor indices, int64_t padding_idx, int scale_grad_by_freq, int sparse) { PROTECT( auto outputs__ = torch::embedding(*weight, *indices, padding_idx, (bool)scale_grad_by_freq, (bool)sparse); out__[0] = new torch::Tensor(outputs__); @@ -2198,7 +2198,7 @@ int atg_embedding(tensor *out__, tensor weight, tensor indices, int64_t padding_ return 1; } -int atg_embedding_backward(tensor *out__, tensor grad, tensor indices, int64_t num_weights, int64_t padding_idx, int scale_grad_by_freq, int sparse) { +C_API int atg_embedding_backward(tensor *out__, tensor grad, tensor indices, int64_t num_weights, int64_t padding_idx, int scale_grad_by_freq, int sparse) { PROTECT( auto outputs__ = torch::embedding_backward(*grad, *indices, num_weights, padding_idx, (bool)scale_grad_by_freq, (bool)sparse); out__[0] = new torch::Tensor(outputs__); @@ -2207,7 +2207,7 @@ int atg_embedding_backward(tensor *out__, tensor grad, tensor indices, int64_t n return 1; } -int atg_embedding_bag(tensor *out__, tensor weight, tensor indices, tensor offsets, int scale_grad_by_freq, int64_t mode, int sparse, tensor per_sample_weights) { +C_API int atg_embedding_bag(tensor *out__, tensor weight, tensor indices, tensor offsets, int scale_grad_by_freq, int64_t mode, int sparse, tensor per_sample_weights) { PROTECT( auto outputs__ = torch::embedding_bag(*weight, *indices, *offsets, (bool)scale_grad_by_freq, mode, (bool)sparse, (per_sample_weights ? *per_sample_weights : torch::Tensor())); out__[0] = new torch::Tensor(std::get<0>(outputs__)); @@ -2219,7 +2219,7 @@ int atg_embedding_bag(tensor *out__, tensor weight, tensor indices, tensor offse return 1; } -int atg_embedding_dense_backward(tensor *out__, tensor grad_output, tensor indices, int64_t num_weights, int64_t padding_idx, int scale_grad_by_freq) { +C_API int atg_embedding_dense_backward(tensor *out__, tensor grad_output, tensor indices, int64_t num_weights, int64_t padding_idx, int scale_grad_by_freq) { PROTECT( auto outputs__ = torch::embedding_dense_backward(*grad_output, *indices, num_weights, padding_idx, (bool)scale_grad_by_freq); out__[0] = new torch::Tensor(outputs__); @@ -2228,7 +2228,7 @@ int atg_embedding_dense_backward(tensor *out__, tensor grad_output, tensor indic return 1; } -int atg_embedding_renorm_(tensor *out__, tensor self, tensor indices, double max_norm, double norm_type) { +C_API int atg_embedding_renorm_(tensor *out__, tensor self, tensor indices, double max_norm, double norm_type) { PROTECT( auto outputs__ = torch::embedding_renorm_(*self, *indices, max_norm, norm_type); out__[0] = new torch::Tensor(outputs__); @@ -2237,7 +2237,7 @@ int atg_embedding_renorm_(tensor *out__, tensor self, tensor indices, double max return 1; } -int atg_embedding_sparse_backward(tensor *out__, tensor grad, tensor indices, int64_t num_weights, int64_t padding_idx, int scale_grad_by_freq) { +C_API int atg_embedding_sparse_backward(tensor *out__, tensor grad, tensor indices, int64_t num_weights, int64_t padding_idx, int scale_grad_by_freq) { PROTECT( auto outputs__ = torch::embedding_sparse_backward(*grad, *indices, num_weights, padding_idx, (bool)scale_grad_by_freq); out__[0] = new torch::Tensor(outputs__); @@ -2246,7 +2246,7 @@ int atg_embedding_sparse_backward(tensor *out__, tensor grad, tensor indices, in return 1; } -int atg_empty(tensor *out__, int64_t *size_data, int size_len, int options_kind, int options_device) { +C_API int atg_empty(tensor *out__, int64_t *size_data, int size_len, int options_kind, int options_device) { PROTECT( auto outputs__ = torch::empty(torch::IntArrayRef(size_data, size_len), at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); @@ -2255,7 +2255,7 @@ int atg_empty(tensor *out__, int64_t *size_data, int size_len, int options_kind, return 1; } -int atg_empty_like(tensor *out__, tensor self) { +C_API int atg_empty_like(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::empty_like(*self); out__[0] = new torch::Tensor(outputs__); @@ -2264,7 +2264,7 @@ int atg_empty_like(tensor *out__, tensor self) { return 1; } -int atg_empty_like1(tensor *out__, tensor self, int options_kind, int options_device) { +C_API int atg_empty_like1(tensor *out__, tensor self, int options_kind, int options_device) { PROTECT( auto outputs__ = torch::empty_like(*self, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); @@ -2273,7 +2273,7 @@ int atg_empty_like1(tensor *out__, tensor self, int options_kind, int options_de return 1; } -int atg_empty_out(tensor *out__, tensor out, int64_t *size_data, int size_len) { +C_API int atg_empty_out(tensor *out__, tensor out, int64_t *size_data, int size_len) { PROTECT( auto outputs__ = torch::empty_out(*out, torch::IntArrayRef(size_data, size_len)); out__[0] = new torch::Tensor(outputs__); @@ -2282,7 +2282,7 @@ int atg_empty_out(tensor *out__, tensor out, int64_t *size_data, int size_len) { return 1; } -int atg_empty_strided(tensor *out__, int64_t *size_data, int size_len, int64_t *stride_data, int stride_len, int options_kind, int options_device) { +C_API int atg_empty_strided(tensor *out__, int64_t *size_data, int size_len, int64_t *stride_data, int stride_len, int options_kind, int options_device) { PROTECT( auto outputs__ = torch::empty_strided(torch::IntArrayRef(size_data, size_len), torch::IntArrayRef(stride_data, stride_len), at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); @@ -2291,7 +2291,7 @@ int atg_empty_strided(tensor *out__, int64_t *size_data, int size_len, int64_t * return 1; } -int atg_eq(tensor *out__, tensor self, scalar other) { +C_API int atg_eq(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = torch::eq(*self, *other); out__[0] = new torch::Tensor(outputs__); @@ -2300,7 +2300,7 @@ int atg_eq(tensor *out__, tensor self, scalar other) { return 1; } -int atg_eq1(tensor *out__, tensor self, tensor other) { +C_API int atg_eq1(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = torch::eq(*self, *other); out__[0] = new torch::Tensor(outputs__); @@ -2309,7 +2309,7 @@ int atg_eq1(tensor *out__, tensor self, tensor other) { return 1; } -int atg_eq_(tensor *out__, tensor self, scalar other) { +C_API int atg_eq_(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = self->eq_(*other); out__[0] = new torch::Tensor(outputs__); @@ -2318,7 +2318,7 @@ int atg_eq_(tensor *out__, tensor self, scalar other) { return 1; } -int atg_eq_1(tensor *out__, tensor self, tensor other) { +C_API int atg_eq_1(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = self->eq_(*other); out__[0] = new torch::Tensor(outputs__); @@ -2327,7 +2327,7 @@ int atg_eq_1(tensor *out__, tensor self, tensor other) { return 1; } -int atg_eq_out(tensor *out__, tensor out, tensor self, scalar other) { +C_API int atg_eq_out(tensor *out__, tensor out, tensor self, scalar other) { PROTECT( auto outputs__ = torch::eq_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); @@ -2336,7 +2336,7 @@ int atg_eq_out(tensor *out__, tensor out, tensor self, scalar other) { return 1; } -int atg_eq_out1(tensor *out__, tensor out, tensor self, tensor other) { +C_API int atg_eq_out1(tensor *out__, tensor out, tensor self, tensor other) { PROTECT( auto outputs__ = torch::eq_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); @@ -2345,7 +2345,7 @@ int atg_eq_out1(tensor *out__, tensor out, tensor self, tensor other) { return 1; } -int atg_erf(tensor *out__, tensor self) { +C_API int atg_erf(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::erf(*self); out__[0] = new torch::Tensor(outputs__); @@ -2354,7 +2354,7 @@ int atg_erf(tensor *out__, tensor self) { return 1; } -int atg_erf_(tensor *out__, tensor self) { +C_API int atg_erf_(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::erf_(*self); out__[0] = new torch::Tensor(outputs__); @@ -2363,7 +2363,7 @@ int atg_erf_(tensor *out__, tensor self) { return 1; } -int atg_erf_out(tensor *out__, tensor out, tensor self) { +C_API int atg_erf_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::erf_out(*out, *self); out__[0] = new torch::Tensor(outputs__); @@ -2372,7 +2372,7 @@ int atg_erf_out(tensor *out__, tensor out, tensor self) { return 1; } -int atg_erfc(tensor *out__, tensor self) { +C_API int atg_erfc(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::erfc(*self); out__[0] = new torch::Tensor(outputs__); @@ -2381,7 +2381,7 @@ int atg_erfc(tensor *out__, tensor self) { return 1; } -int atg_erfc_(tensor *out__, tensor self) { +C_API int atg_erfc_(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::erfc_(*self); out__[0] = new torch::Tensor(outputs__); @@ -2390,7 +2390,7 @@ int atg_erfc_(tensor *out__, tensor self) { return 1; } -int atg_erfc_out(tensor *out__, tensor out, tensor self) { +C_API int atg_erfc_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::erfc_out(*out, *self); out__[0] = new torch::Tensor(outputs__); @@ -2399,7 +2399,7 @@ int atg_erfc_out(tensor *out__, tensor out, tensor self) { return 1; } -int atg_erfinv(tensor *out__, tensor self) { +C_API int atg_erfinv(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::erfinv(*self); out__[0] = new torch::Tensor(outputs__); @@ -2408,7 +2408,7 @@ int atg_erfinv(tensor *out__, tensor self) { return 1; } -int atg_erfinv_(tensor *out__, tensor self) { +C_API int atg_erfinv_(tensor *out__, tensor self) { PROTECT( auto outputs__ = self->erfinv_(); out__[0] = new torch::Tensor(outputs__); @@ -2417,7 +2417,7 @@ int atg_erfinv_(tensor *out__, tensor self) { return 1; } -int atg_erfinv_out(tensor *out__, tensor out, tensor self) { +C_API int atg_erfinv_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::erfinv_out(*out, *self); out__[0] = new torch::Tensor(outputs__); @@ -2426,7 +2426,7 @@ int atg_erfinv_out(tensor *out__, tensor out, tensor self) { return 1; } -int atg_exp(tensor *out__, tensor self) { +C_API int atg_exp(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::exp(*self); out__[0] = new torch::Tensor(outputs__); @@ -2435,7 +2435,7 @@ int atg_exp(tensor *out__, tensor self) { return 1; } -int atg_exp_(tensor *out__, tensor self) { +C_API int atg_exp_(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::exp_(*self); out__[0] = new torch::Tensor(outputs__); @@ -2444,7 +2444,7 @@ int atg_exp_(tensor *out__, tensor self) { return 1; } -int atg_exp_out(tensor *out__, tensor out, tensor self) { +C_API int atg_exp_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::exp_out(*out, *self); out__[0] = new torch::Tensor(outputs__); @@ -2453,7 +2453,7 @@ int atg_exp_out(tensor *out__, tensor out, tensor self) { return 1; } -int atg_expand(tensor *out__, tensor self, int64_t *size_data, int size_len, int implicit) { +C_API int atg_expand(tensor *out__, tensor self, int64_t *size_data, int size_len, int implicit) { PROTECT( auto outputs__ = self->expand(torch::IntArrayRef(size_data, size_len), (bool)implicit); out__[0] = new torch::Tensor(outputs__); @@ -2462,7 +2462,7 @@ int atg_expand(tensor *out__, tensor self, int64_t *size_data, int size_len, int return 1; } -int atg_expand_as(tensor *out__, tensor self, tensor other) { +C_API int atg_expand_as(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = self->expand_as(*other); out__[0] = new torch::Tensor(outputs__); @@ -2471,7 +2471,7 @@ int atg_expand_as(tensor *out__, tensor self, tensor other) { return 1; } -int atg_expm1(tensor *out__, tensor self) { +C_API int atg_expm1(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::expm1(*self); out__[0] = new torch::Tensor(outputs__); @@ -2480,7 +2480,7 @@ int atg_expm1(tensor *out__, tensor self) { return 1; } -int atg_expm1_(tensor *out__, tensor self) { +C_API int atg_expm1_(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::expm1_(*self); out__[0] = new torch::Tensor(outputs__); @@ -2489,7 +2489,7 @@ int atg_expm1_(tensor *out__, tensor self) { return 1; } -int atg_expm1_out(tensor *out__, tensor out, tensor self) { +C_API int atg_expm1_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::expm1_out(*out, *self); out__[0] = new torch::Tensor(outputs__); @@ -2498,7 +2498,7 @@ int atg_expm1_out(tensor *out__, tensor out, tensor self) { return 1; } -int atg_exponential_(tensor *out__, tensor self, double lambd) { +C_API int atg_exponential_(tensor *out__, tensor self, double lambd) { PROTECT( auto outputs__ = self->exponential_(lambd); out__[0] = new torch::Tensor(outputs__); @@ -2507,7 +2507,7 @@ int atg_exponential_(tensor *out__, tensor self, double lambd) { return 1; } -int atg_eye(tensor *out__, int64_t n, int options_kind, int options_device) { +C_API int atg_eye(tensor *out__, int64_t n, int options_kind, int options_device) { PROTECT( auto outputs__ = torch::eye(n, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); @@ -2516,7 +2516,7 @@ int atg_eye(tensor *out__, int64_t n, int options_kind, int options_device) { return 1; } -int atg_eye1(tensor *out__, int64_t n, int64_t m, int options_kind, int options_device) { +C_API int atg_eye1(tensor *out__, int64_t n, int64_t m, int options_kind, int options_device) { PROTECT( auto outputs__ = torch::eye(n, m, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); @@ -2525,7 +2525,7 @@ int atg_eye1(tensor *out__, int64_t n, int64_t m, int options_kind, int options_ return 1; } -int atg_eye_out(tensor *out__, tensor out, int64_t n) { +C_API int atg_eye_out(tensor *out__, tensor out, int64_t n) { PROTECT( auto outputs__ = torch::eye_out(*out, n); out__[0] = new torch::Tensor(outputs__); @@ -2534,7 +2534,7 @@ int atg_eye_out(tensor *out__, tensor out, int64_t n) { return 1; } -int atg_eye_out1(tensor *out__, tensor out, int64_t n, int64_t m) { +C_API int atg_eye_out1(tensor *out__, tensor out, int64_t n, int64_t m) { PROTECT( auto outputs__ = torch::eye_out(*out, n, m); out__[0] = new torch::Tensor(outputs__); @@ -2543,7 +2543,7 @@ int atg_eye_out1(tensor *out__, tensor out, int64_t n, int64_t m) { return 1; } -int atg_fake_quantize_per_channel_affine(tensor *out__, tensor self, tensor scale, tensor zero_point, int64_t axis, int64_t quant_min, int64_t quant_max) { +C_API int atg_fake_quantize_per_channel_affine(tensor *out__, tensor self, tensor scale, tensor zero_point, int64_t axis, int64_t quant_min, int64_t quant_max) { PROTECT( auto outputs__ = torch::fake_quantize_per_channel_affine(*self, *scale, *zero_point, axis, quant_min, quant_max); out__[0] = new torch::Tensor(outputs__); @@ -2552,7 +2552,7 @@ int atg_fake_quantize_per_channel_affine(tensor *out__, tensor self, tensor scal return 1; } -int atg_fake_quantize_per_channel_affine_backward(tensor *out__, tensor grad, tensor self, tensor scale, tensor zero_point, int64_t axis, int64_t quant_min, int64_t quant_max) { +C_API int atg_fake_quantize_per_channel_affine_backward(tensor *out__, tensor grad, tensor self, tensor scale, tensor zero_point, int64_t axis, int64_t quant_min, int64_t quant_max) { PROTECT( auto outputs__ = torch::fake_quantize_per_channel_affine_backward(*grad, *self, *scale, *zero_point, axis, quant_min, quant_max); out__[0] = new torch::Tensor(outputs__); @@ -2561,7 +2561,7 @@ int atg_fake_quantize_per_channel_affine_backward(tensor *out__, tensor grad, te return 1; } -int atg_fake_quantize_per_tensor_affine(tensor *out__, tensor self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max) { +C_API int atg_fake_quantize_per_tensor_affine(tensor *out__, tensor self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max) { PROTECT( auto outputs__ = torch::fake_quantize_per_tensor_affine(*self, scale, zero_point, quant_min, quant_max); out__[0] = new torch::Tensor(outputs__); @@ -2570,7 +2570,7 @@ int atg_fake_quantize_per_tensor_affine(tensor *out__, tensor self, double scale return 1; } -int atg_fake_quantize_per_tensor_affine_backward(tensor *out__, tensor grad, tensor self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max) { +C_API int atg_fake_quantize_per_tensor_affine_backward(tensor *out__, tensor grad, tensor self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max) { PROTECT( auto outputs__ = torch::fake_quantize_per_tensor_affine_backward(*grad, *self, scale, zero_point, quant_min, quant_max); out__[0] = new torch::Tensor(outputs__); @@ -2579,7 +2579,7 @@ int atg_fake_quantize_per_tensor_affine_backward(tensor *out__, tensor grad, ten return 1; } -int atg_fbgemm_linear_fp16_weight(tensor *out__, tensor input, tensor packed_weight, tensor bias) { +C_API int atg_fbgemm_linear_fp16_weight(tensor *out__, tensor input, tensor packed_weight, tensor bias) { PROTECT( auto outputs__ = torch::fbgemm_linear_fp16_weight(*input, *packed_weight, *bias); out__[0] = new torch::Tensor(outputs__); @@ -2588,7 +2588,7 @@ int atg_fbgemm_linear_fp16_weight(tensor *out__, tensor input, tensor packed_wei return 1; } -int atg_fbgemm_linear_fp16_weight_fp32_activation(tensor *out__, tensor input, tensor packed_weight, tensor bias) { +C_API int atg_fbgemm_linear_fp16_weight_fp32_activation(tensor *out__, tensor input, tensor packed_weight, tensor bias) { PROTECT( auto outputs__ = torch::fbgemm_linear_fp16_weight_fp32_activation(*input, *packed_weight, *bias); out__[0] = new torch::Tensor(outputs__); @@ -2597,7 +2597,7 @@ int atg_fbgemm_linear_fp16_weight_fp32_activation(tensor *out__, tensor input, t return 1; } -int atg_fbgemm_linear_int8_weight(tensor *out__, tensor input, tensor weight, tensor packed, tensor col_offsets, scalar weight_scale, scalar weight_zero_point, tensor bias) { +C_API int atg_fbgemm_linear_int8_weight(tensor *out__, tensor input, tensor weight, tensor packed, tensor col_offsets, scalar weight_scale, scalar weight_zero_point, tensor bias) { PROTECT( auto outputs__ = torch::fbgemm_linear_int8_weight(*input, *weight, *packed, *col_offsets, *weight_scale, *weight_zero_point, *bias); out__[0] = new torch::Tensor(outputs__); @@ -2606,7 +2606,7 @@ int atg_fbgemm_linear_int8_weight(tensor *out__, tensor input, tensor weight, te return 1; } -int atg_fbgemm_linear_int8_weight_fp32_activation(tensor *out__, tensor input, tensor weight, tensor packed, tensor col_offsets, scalar weight_scale, scalar weight_zero_point, tensor bias) { +C_API int atg_fbgemm_linear_int8_weight_fp32_activation(tensor *out__, tensor input, tensor weight, tensor packed, tensor col_offsets, scalar weight_scale, scalar weight_zero_point, tensor bias) { PROTECT( auto outputs__ = torch::fbgemm_linear_int8_weight_fp32_activation(*input, *weight, *packed, *col_offsets, *weight_scale, *weight_zero_point, *bias); out__[0] = new torch::Tensor(outputs__); @@ -2615,7 +2615,7 @@ int atg_fbgemm_linear_int8_weight_fp32_activation(tensor *out__, tensor input, t return 1; } -int atg_fbgemm_pack_gemm_matrix_fp16(tensor *out__, tensor input) { +C_API int atg_fbgemm_pack_gemm_matrix_fp16(tensor *out__, tensor input) { PROTECT( auto outputs__ = torch::fbgemm_pack_gemm_matrix_fp16(*input); out__[0] = new torch::Tensor(outputs__); @@ -2624,7 +2624,7 @@ int atg_fbgemm_pack_gemm_matrix_fp16(tensor *out__, tensor input) { return 1; } -int atg_fbgemm_pack_quantized_matrix(tensor *out__, tensor input) { +C_API int atg_fbgemm_pack_quantized_matrix(tensor *out__, tensor input) { PROTECT( auto outputs__ = torch::fbgemm_pack_quantized_matrix(*input); out__[0] = new torch::Tensor(outputs__); @@ -2633,7 +2633,7 @@ int atg_fbgemm_pack_quantized_matrix(tensor *out__, tensor input) { return 1; } -int atg_fbgemm_pack_quantized_matrix1(tensor *out__, tensor input, int64_t K, int64_t n) { +C_API int atg_fbgemm_pack_quantized_matrix1(tensor *out__, tensor input, int64_t K, int64_t n) { PROTECT( auto outputs__ = torch::fbgemm_pack_quantized_matrix(*input, K, n); out__[0] = new torch::Tensor(outputs__); @@ -2642,7 +2642,7 @@ int atg_fbgemm_pack_quantized_matrix1(tensor *out__, tensor input, int64_t K, in return 1; } -int atg_feature_alpha_dropout(tensor *out__, tensor input, double p, int train) { +C_API int atg_feature_alpha_dropout(tensor *out__, tensor input, double p, int train) { PROTECT( auto outputs__ = torch::feature_alpha_dropout(*input, p, (bool)train); out__[0] = new torch::Tensor(outputs__); @@ -2651,7 +2651,7 @@ int atg_feature_alpha_dropout(tensor *out__, tensor input, double p, int train) return 1; } -int atg_feature_alpha_dropout_(tensor *out__, tensor self, double p, int train) { +C_API int atg_feature_alpha_dropout_(tensor *out__, tensor self, double p, int train) { PROTECT( auto outputs__ = torch::feature_alpha_dropout_(*self, p, (bool)train); out__[0] = new torch::Tensor(outputs__); @@ -2660,7 +2660,7 @@ int atg_feature_alpha_dropout_(tensor *out__, tensor self, double p, int train) return 1; } -int atg_feature_dropout(tensor *out__, tensor input, double p, int train) { +C_API int atg_feature_dropout(tensor *out__, tensor input, double p, int train) { PROTECT( auto outputs__ = torch::feature_dropout(*input, p, (bool)train); out__[0] = new torch::Tensor(outputs__); @@ -2669,7 +2669,7 @@ int atg_feature_dropout(tensor *out__, tensor input, double p, int train) { return 1; } -int atg_feature_dropout_(tensor *out__, tensor self, double p, int train) { +C_API int atg_feature_dropout_(tensor *out__, tensor self, double p, int train) { PROTECT( auto outputs__ = torch::feature_dropout_(*self, p, (bool)train); out__[0] = new torch::Tensor(outputs__); @@ -2678,7 +2678,7 @@ int atg_feature_dropout_(tensor *out__, tensor self, double p, int train) { return 1; } -int atg_fft(tensor *out__, tensor self, int64_t signal_ndim, int normalized) { +C_API int atg_fft(tensor *out__, tensor self, int64_t signal_ndim, int normalized) { PROTECT( auto outputs__ = torch::fft(*self, signal_ndim, (bool)normalized); out__[0] = new torch::Tensor(outputs__); @@ -2687,7 +2687,7 @@ int atg_fft(tensor *out__, tensor self, int64_t signal_ndim, int normalized) { return 1; } -int atg_fill_(tensor *out__, tensor self, scalar value) { +C_API int atg_fill_(tensor *out__, tensor self, scalar value) { PROTECT( auto outputs__ = torch::fill_(*self, *value); out__[0] = new torch::Tensor(outputs__); @@ -2696,7 +2696,7 @@ int atg_fill_(tensor *out__, tensor self, scalar value) { return 1; } -int atg_fill_1(tensor *out__, tensor self, tensor value) { +C_API int atg_fill_1(tensor *out__, tensor self, tensor value) { PROTECT( auto outputs__ = torch::fill_(*self, *value); out__[0] = new torch::Tensor(outputs__); @@ -2705,7 +2705,7 @@ int atg_fill_1(tensor *out__, tensor self, tensor value) { return 1; } -int atg_fill_diagonal_(tensor *out__, tensor self, scalar fill_value, int wrap) { +C_API int atg_fill_diagonal_(tensor *out__, tensor self, scalar fill_value, int wrap) { PROTECT( auto outputs__ = self->fill_diagonal_(*fill_value, (bool)wrap); out__[0] = new torch::Tensor(outputs__); @@ -2714,7 +2714,7 @@ int atg_fill_diagonal_(tensor *out__, tensor self, scalar fill_value, int wrap) return 1; } -int atg_flatten(tensor *out__, tensor self, int64_t start_dim, int64_t end_dim) { +C_API int atg_flatten(tensor *out__, tensor self, int64_t start_dim, int64_t end_dim) { PROTECT( auto outputs__ = torch::flatten(*self, start_dim, end_dim); out__[0] = new torch::Tensor(outputs__); @@ -2723,7 +2723,7 @@ int atg_flatten(tensor *out__, tensor self, int64_t start_dim, int64_t end_dim) return 1; } -int atg_flip(tensor *out__, tensor self, int64_t *dims_data, int dims_len) { +C_API int atg_flip(tensor *out__, tensor self, int64_t *dims_data, int dims_len) { PROTECT( auto outputs__ = torch::flip(*self, torch::IntArrayRef(dims_data, dims_len)); out__[0] = new torch::Tensor(outputs__); @@ -2732,7 +2732,7 @@ int atg_flip(tensor *out__, tensor self, int64_t *dims_data, int dims_len) { return 1; } -int atg_floor(tensor *out__, tensor self) { +C_API int atg_floor(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::floor(*self); out__[0] = new torch::Tensor(outputs__); @@ -2741,7 +2741,7 @@ int atg_floor(tensor *out__, tensor self) { return 1; } -int atg_floor_(tensor *out__, tensor self) { +C_API int atg_floor_(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::floor_(*self); out__[0] = new torch::Tensor(outputs__); @@ -2750,7 +2750,7 @@ int atg_floor_(tensor *out__, tensor self) { return 1; } -int atg_floor_out(tensor *out__, tensor out, tensor self) { +C_API int atg_floor_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::floor_out(*out, *self); out__[0] = new torch::Tensor(outputs__); @@ -2759,7 +2759,7 @@ int atg_floor_out(tensor *out__, tensor out, tensor self) { return 1; } -int atg_fmod(tensor *out__, tensor self, scalar other) { +C_API int atg_fmod(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = torch::fmod(*self, *other); out__[0] = new torch::Tensor(outputs__); @@ -2768,7 +2768,7 @@ int atg_fmod(tensor *out__, tensor self, scalar other) { return 1; } -int atg_fmod1(tensor *out__, tensor self, tensor other) { +C_API int atg_fmod1(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = torch::fmod(*self, *other); out__[0] = new torch::Tensor(outputs__); @@ -2777,7 +2777,7 @@ int atg_fmod1(tensor *out__, tensor self, tensor other) { return 1; } -int atg_fmod_(tensor *out__, tensor self, scalar other) { +C_API int atg_fmod_(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = self->fmod_(*other); out__[0] = new torch::Tensor(outputs__); @@ -2786,7 +2786,7 @@ int atg_fmod_(tensor *out__, tensor self, scalar other) { return 1; } -int atg_fmod_1(tensor *out__, tensor self, tensor other) { +C_API int atg_fmod_1(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = self->fmod_(*other); out__[0] = new torch::Tensor(outputs__); @@ -2795,7 +2795,7 @@ int atg_fmod_1(tensor *out__, tensor self, tensor other) { return 1; } -int atg_fmod_out(tensor *out__, tensor out, tensor self, scalar other) { +C_API int atg_fmod_out(tensor *out__, tensor out, tensor self, scalar other) { PROTECT( auto outputs__ = torch::fmod_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); @@ -2804,7 +2804,7 @@ int atg_fmod_out(tensor *out__, tensor out, tensor self, scalar other) { return 1; } -int atg_fmod_out1(tensor *out__, tensor out, tensor self, tensor other) { +C_API int atg_fmod_out1(tensor *out__, tensor out, tensor self, tensor other) { PROTECT( auto outputs__ = torch::fmod_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); @@ -2813,7 +2813,7 @@ int atg_fmod_out1(tensor *out__, tensor out, tensor self, tensor other) { return 1; } -int atg_frac(tensor *out__, tensor self) { +C_API int atg_frac(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::frac(*self); out__[0] = new torch::Tensor(outputs__); @@ -2822,7 +2822,7 @@ int atg_frac(tensor *out__, tensor self) { return 1; } -int atg_frac_(tensor *out__, tensor self) { +C_API int atg_frac_(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::frac_(*self); out__[0] = new torch::Tensor(outputs__); @@ -2831,7 +2831,7 @@ int atg_frac_(tensor *out__, tensor self) { return 1; } -int atg_frac_out(tensor *out__, tensor out, tensor self) { +C_API int atg_frac_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::frac_out(*out, *self); out__[0] = new torch::Tensor(outputs__); @@ -2840,7 +2840,7 @@ int atg_frac_out(tensor *out__, tensor out, tensor self) { return 1; } -int atg_fractional_max_pool2d(tensor *out__, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *output_size_data, int output_size_len, tensor random_samples) { +C_API int atg_fractional_max_pool2d(tensor *out__, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *output_size_data, int output_size_len, tensor random_samples) { PROTECT( auto outputs__ = torch::fractional_max_pool2d(*self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(output_size_data, output_size_len), *random_samples); out__[0] = new torch::Tensor(std::get<0>(outputs__)); @@ -2850,7 +2850,7 @@ int atg_fractional_max_pool2d(tensor *out__, tensor self, int64_t *kernel_size_d return 1; } -int atg_fractional_max_pool2d_backward(tensor *out__, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *output_size_data, int output_size_len, tensor indices) { +C_API int atg_fractional_max_pool2d_backward(tensor *out__, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *output_size_data, int output_size_len, tensor indices) { PROTECT( auto outputs__ = torch::fractional_max_pool2d_backward(*grad_output, *self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(output_size_data, output_size_len), *indices); out__[0] = new torch::Tensor(outputs__); @@ -2859,7 +2859,7 @@ int atg_fractional_max_pool2d_backward(tensor *out__, tensor grad_output, tensor return 1; } -int atg_fractional_max_pool2d_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *output_size_data, int output_size_len, tensor indices) { +C_API int atg_fractional_max_pool2d_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *output_size_data, int output_size_len, tensor indices) { PROTECT( auto outputs__ = torch::fractional_max_pool2d_backward_out(*grad_input, *grad_output, *self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(output_size_data, output_size_len), *indices); out__[0] = new torch::Tensor(outputs__); @@ -2868,7 +2868,7 @@ int atg_fractional_max_pool2d_backward_out(tensor *out__, tensor grad_input, ten return 1; } -int atg_fractional_max_pool2d_out(tensor *out__, tensor output, tensor indices, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *output_size_data, int output_size_len, tensor random_samples) { +C_API int atg_fractional_max_pool2d_out(tensor *out__, tensor output, tensor indices, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *output_size_data, int output_size_len, tensor random_samples) { PROTECT( auto outputs__ = torch::fractional_max_pool2d_out(*output, *indices, *self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(output_size_data, output_size_len), *random_samples); out__[0] = new torch::Tensor(std::get<0>(outputs__)); @@ -2878,7 +2878,7 @@ int atg_fractional_max_pool2d_out(tensor *out__, tensor output, tensor indices, return 1; } -int atg_fractional_max_pool3d(tensor *out__, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *output_size_data, int output_size_len, tensor random_samples) { +C_API int atg_fractional_max_pool3d(tensor *out__, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *output_size_data, int output_size_len, tensor random_samples) { PROTECT( auto outputs__ = torch::fractional_max_pool3d(*self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(output_size_data, output_size_len), *random_samples); out__[0] = new torch::Tensor(std::get<0>(outputs__)); @@ -2888,7 +2888,7 @@ int atg_fractional_max_pool3d(tensor *out__, tensor self, int64_t *kernel_size_d return 1; } -int atg_fractional_max_pool3d_backward(tensor *out__, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *output_size_data, int output_size_len, tensor indices) { +C_API int atg_fractional_max_pool3d_backward(tensor *out__, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *output_size_data, int output_size_len, tensor indices) { PROTECT( auto outputs__ = torch::fractional_max_pool3d_backward(*grad_output, *self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(output_size_data, output_size_len), *indices); out__[0] = new torch::Tensor(outputs__); @@ -2897,7 +2897,7 @@ int atg_fractional_max_pool3d_backward(tensor *out__, tensor grad_output, tensor return 1; } -int atg_fractional_max_pool3d_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *output_size_data, int output_size_len, tensor indices) { +C_API int atg_fractional_max_pool3d_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *output_size_data, int output_size_len, tensor indices) { PROTECT( auto outputs__ = torch::fractional_max_pool3d_backward_out(*grad_input, *grad_output, *self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(output_size_data, output_size_len), *indices); out__[0] = new torch::Tensor(outputs__); @@ -2906,7 +2906,7 @@ int atg_fractional_max_pool3d_backward_out(tensor *out__, tensor grad_input, ten return 1; } -int atg_fractional_max_pool3d_out(tensor *out__, tensor output, tensor indices, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *output_size_data, int output_size_len, tensor random_samples) { +C_API int atg_fractional_max_pool3d_out(tensor *out__, tensor output, tensor indices, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *output_size_data, int output_size_len, tensor random_samples) { PROTECT( auto outputs__ = torch::fractional_max_pool3d_out(*output, *indices, *self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(output_size_data, output_size_len), *random_samples); out__[0] = new torch::Tensor(std::get<0>(outputs__)); @@ -2916,7 +2916,7 @@ int atg_fractional_max_pool3d_out(tensor *out__, tensor output, tensor indices, return 1; } -int atg_frobenius_norm(tensor *out__, tensor self) { +C_API int atg_frobenius_norm(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::frobenius_norm(*self); out__[0] = new torch::Tensor(outputs__); @@ -2925,7 +2925,7 @@ int atg_frobenius_norm(tensor *out__, tensor self) { return 1; } -int atg_frobenius_norm1(tensor *out__, tensor self, int64_t *dim_data, int dim_len, int keepdim) { +C_API int atg_frobenius_norm1(tensor *out__, tensor self, int64_t *dim_data, int dim_len, int keepdim) { PROTECT( auto outputs__ = torch::frobenius_norm(*self, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim); out__[0] = new torch::Tensor(outputs__); @@ -2934,7 +2934,7 @@ int atg_frobenius_norm1(tensor *out__, tensor self, int64_t *dim_data, int dim_l return 1; } -int atg_frobenius_norm_out(tensor *out__, tensor out, tensor self, int64_t *dim_data, int dim_len, int keepdim) { +C_API int atg_frobenius_norm_out(tensor *out__, tensor out, tensor self, int64_t *dim_data, int dim_len, int keepdim) { PROTECT( auto outputs__ = torch::frobenius_norm_out(*out, *self, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim); out__[0] = new torch::Tensor(outputs__); @@ -2943,7 +2943,7 @@ int atg_frobenius_norm_out(tensor *out__, tensor out, tensor self, int64_t *dim_ return 1; } -int atg_full(tensor *out__, int64_t *size_data, int size_len, scalar fill_value, int options_kind, int options_device) { +C_API int atg_full(tensor *out__, int64_t *size_data, int size_len, scalar fill_value, int options_kind, int options_device) { PROTECT( auto outputs__ = torch::full(torch::IntArrayRef(size_data, size_len), *fill_value, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); @@ -2952,7 +2952,7 @@ int atg_full(tensor *out__, int64_t *size_data, int size_len, scalar fill_value, return 1; } -int atg_full_like(tensor *out__, tensor self, scalar fill_value) { +C_API int atg_full_like(tensor *out__, tensor self, scalar fill_value) { PROTECT( auto outputs__ = torch::full_like(*self, *fill_value); out__[0] = new torch::Tensor(outputs__); @@ -2961,7 +2961,7 @@ int atg_full_like(tensor *out__, tensor self, scalar fill_value) { return 1; } -int atg_full_like1(tensor *out__, tensor self, scalar fill_value, int options_kind, int options_device) { +C_API int atg_full_like1(tensor *out__, tensor self, scalar fill_value, int options_kind, int options_device) { PROTECT( auto outputs__ = torch::full_like(*self, *fill_value, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); @@ -2970,7 +2970,7 @@ int atg_full_like1(tensor *out__, tensor self, scalar fill_value, int options_ki return 1; } -int atg_full_out(tensor *out__, tensor out, int64_t *size_data, int size_len, scalar fill_value) { +C_API int atg_full_out(tensor *out__, tensor out, int64_t *size_data, int size_len, scalar fill_value) { PROTECT( auto outputs__ = torch::full_out(*out, torch::IntArrayRef(size_data, size_len), *fill_value); out__[0] = new torch::Tensor(outputs__); @@ -2979,7 +2979,7 @@ int atg_full_out(tensor *out__, tensor out, int64_t *size_data, int size_len, sc return 1; } -int atg_gather(tensor *out__, tensor self, int64_t dim, tensor index, int sparse_grad) { +C_API int atg_gather(tensor *out__, tensor self, int64_t dim, tensor index, int sparse_grad) { PROTECT( auto outputs__ = torch::gather(*self, dim, *index, (bool)sparse_grad); out__[0] = new torch::Tensor(outputs__); @@ -2988,7 +2988,7 @@ int atg_gather(tensor *out__, tensor self, int64_t dim, tensor index, int sparse return 1; } -int atg_gather_out(tensor *out__, tensor out, tensor self, int64_t dim, tensor index, int sparse_grad) { +C_API int atg_gather_out(tensor *out__, tensor out, tensor self, int64_t dim, tensor index, int sparse_grad) { PROTECT( auto outputs__ = torch::gather_out(*out, *self, dim, *index, (bool)sparse_grad); out__[0] = new torch::Tensor(outputs__); @@ -2997,7 +2997,7 @@ int atg_gather_out(tensor *out__, tensor out, tensor self, int64_t dim, tensor i return 1; } -int atg_ge(tensor *out__, tensor self, scalar other) { +C_API int atg_ge(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = torch::ge(*self, *other); out__[0] = new torch::Tensor(outputs__); @@ -3006,7 +3006,7 @@ int atg_ge(tensor *out__, tensor self, scalar other) { return 1; } -int atg_ge1(tensor *out__, tensor self, tensor other) { +C_API int atg_ge1(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = torch::ge(*self, *other); out__[0] = new torch::Tensor(outputs__); @@ -3015,7 +3015,7 @@ int atg_ge1(tensor *out__, tensor self, tensor other) { return 1; } -int atg_ge_(tensor *out__, tensor self, scalar other) { +C_API int atg_ge_(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = self->ge_(*other); out__[0] = new torch::Tensor(outputs__); @@ -3024,7 +3024,7 @@ int atg_ge_(tensor *out__, tensor self, scalar other) { return 1; } -int atg_ge_1(tensor *out__, tensor self, tensor other) { +C_API int atg_ge_1(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = self->ge_(*other); out__[0] = new torch::Tensor(outputs__); @@ -3033,7 +3033,7 @@ int atg_ge_1(tensor *out__, tensor self, tensor other) { return 1; } -int atg_ge_out(tensor *out__, tensor out, tensor self, scalar other) { +C_API int atg_ge_out(tensor *out__, tensor out, tensor self, scalar other) { PROTECT( auto outputs__ = torch::ge_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); @@ -3042,7 +3042,7 @@ int atg_ge_out(tensor *out__, tensor out, tensor self, scalar other) { return 1; } -int atg_ge_out1(tensor *out__, tensor out, tensor self, tensor other) { +C_API int atg_ge_out1(tensor *out__, tensor out, tensor self, tensor other) { PROTECT( auto outputs__ = torch::ge_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); @@ -3051,7 +3051,7 @@ int atg_ge_out1(tensor *out__, tensor out, tensor self, tensor other) { return 1; } -int atg_gelu(tensor *out__, tensor self) { +C_API int atg_gelu(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::gelu(*self); out__[0] = new torch::Tensor(outputs__); @@ -3060,7 +3060,7 @@ int atg_gelu(tensor *out__, tensor self) { return 1; } -int atg_gelu_backward(tensor *out__, tensor grad, tensor self) { +C_API int atg_gelu_backward(tensor *out__, tensor grad, tensor self) { PROTECT( auto outputs__ = torch::gelu_backward(*grad, *self); out__[0] = new torch::Tensor(outputs__); @@ -3069,7 +3069,7 @@ int atg_gelu_backward(tensor *out__, tensor grad, tensor self) { return 1; } -int atg_geometric_(tensor *out__, tensor self, double p) { +C_API int atg_geometric_(tensor *out__, tensor self, double p) { PROTECT( auto outputs__ = self->geometric_(p); out__[0] = new torch::Tensor(outputs__); @@ -3078,7 +3078,7 @@ int atg_geometric_(tensor *out__, tensor self, double p) { return 1; } -int atg_geqrf(tensor *out__, tensor self) { +C_API int atg_geqrf(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::geqrf(*self); out__[0] = new torch::Tensor(std::get<0>(outputs__)); @@ -3088,7 +3088,7 @@ int atg_geqrf(tensor *out__, tensor self) { return 1; } -int atg_geqrf_out(tensor *out__, tensor a, tensor tau, tensor self) { +C_API int atg_geqrf_out(tensor *out__, tensor a, tensor tau, tensor self) { PROTECT( auto outputs__ = torch::geqrf_out(*a, *tau, *self); out__[0] = new torch::Tensor(std::get<0>(outputs__)); @@ -3098,7 +3098,7 @@ int atg_geqrf_out(tensor *out__, tensor a, tensor tau, tensor self) { return 1; } -int atg_ger(tensor *out__, tensor self, tensor vec2) { +C_API int atg_ger(tensor *out__, tensor self, tensor vec2) { PROTECT( auto outputs__ = torch::ger(*self, *vec2); out__[0] = new torch::Tensor(outputs__); @@ -3107,7 +3107,7 @@ int atg_ger(tensor *out__, tensor self, tensor vec2) { return 1; } -int atg_ger_out(tensor *out__, tensor out, tensor self, tensor vec2) { +C_API int atg_ger_out(tensor *out__, tensor out, tensor self, tensor vec2) { PROTECT( auto outputs__ = torch::ger_out(*out, *self, *vec2); out__[0] = new torch::Tensor(outputs__); @@ -3116,7 +3116,7 @@ int atg_ger_out(tensor *out__, tensor out, tensor self, tensor vec2) { return 1; } -int atg_glu(tensor *out__, tensor self, int64_t dim) { +C_API int atg_glu(tensor *out__, tensor self, int64_t dim) { PROTECT( auto outputs__ = torch::glu(*self, dim); out__[0] = new torch::Tensor(outputs__); @@ -3125,7 +3125,7 @@ int atg_glu(tensor *out__, tensor self, int64_t dim) { return 1; } -int atg_glu_backward(tensor *out__, tensor grad_output, tensor self, int64_t dim) { +C_API int atg_glu_backward(tensor *out__, tensor grad_output, tensor self, int64_t dim) { PROTECT( auto outputs__ = torch::glu_backward(*grad_output, *self, dim); out__[0] = new torch::Tensor(outputs__); @@ -3134,7 +3134,7 @@ int atg_glu_backward(tensor *out__, tensor grad_output, tensor self, int64_t dim return 1; } -int atg_glu_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self, int64_t dim) { +C_API int atg_glu_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self, int64_t dim) { PROTECT( auto outputs__ = torch::glu_backward_out(*grad_input, *grad_output, *self, dim); out__[0] = new torch::Tensor(outputs__); @@ -3143,7 +3143,7 @@ int atg_glu_backward_out(tensor *out__, tensor grad_input, tensor grad_output, t return 1; } -int atg_glu_out(tensor *out__, tensor out, tensor self, int64_t dim) { +C_API int atg_glu_out(tensor *out__, tensor out, tensor self, int64_t dim) { PROTECT( auto outputs__ = torch::glu_out(*out, *self, dim); out__[0] = new torch::Tensor(outputs__); @@ -3152,7 +3152,7 @@ int atg_glu_out(tensor *out__, tensor out, tensor self, int64_t dim) { return 1; } -int atg_grad(tensor *out__, tensor self) { +C_API int atg_grad(tensor *out__, tensor self) { PROTECT( auto outputs__ = self->grad(); out__[0] = new torch::Tensor(outputs__); @@ -3161,7 +3161,7 @@ int atg_grad(tensor *out__, tensor self) { return 1; } -int atg_grid_sampler(tensor *out__, tensor input, tensor grid, int64_t interpolation_mode, int64_t padding_mode, int align_corners) { +C_API int atg_grid_sampler(tensor *out__, tensor input, tensor grid, int64_t interpolation_mode, int64_t padding_mode, int align_corners) { PROTECT( auto outputs__ = torch::grid_sampler(*input, *grid, interpolation_mode, padding_mode, (bool)align_corners); out__[0] = new torch::Tensor(outputs__); @@ -3170,7 +3170,7 @@ int atg_grid_sampler(tensor *out__, tensor input, tensor grid, int64_t interpola return 1; } -int atg_grid_sampler_2d(tensor *out__, tensor input, tensor grid, int64_t interpolation_mode, int64_t padding_mode, int align_corners) { +C_API int atg_grid_sampler_2d(tensor *out__, tensor input, tensor grid, int64_t interpolation_mode, int64_t padding_mode, int align_corners) { PROTECT( auto outputs__ = torch::grid_sampler_2d(*input, *grid, interpolation_mode, padding_mode, (bool)align_corners); out__[0] = new torch::Tensor(outputs__); @@ -3179,7 +3179,7 @@ int atg_grid_sampler_2d(tensor *out__, tensor input, tensor grid, int64_t interp return 1; } -int atg_grid_sampler_2d_backward(tensor *out__, tensor grad_output, tensor input, tensor grid, int64_t interpolation_mode, int64_t padding_mode, int align_corners) { +C_API int atg_grid_sampler_2d_backward(tensor *out__, tensor grad_output, tensor input, tensor grid, int64_t interpolation_mode, int64_t padding_mode, int align_corners) { PROTECT( auto outputs__ = torch::grid_sampler_2d_backward(*grad_output, *input, *grid, interpolation_mode, padding_mode, (bool)align_corners); out__[0] = new torch::Tensor(std::get<0>(outputs__)); @@ -3189,7 +3189,7 @@ int atg_grid_sampler_2d_backward(tensor *out__, tensor grad_output, tensor input return 1; } -int atg_grid_sampler_3d(tensor *out__, tensor input, tensor grid, int64_t interpolation_mode, int64_t padding_mode, int align_corners) { +C_API int atg_grid_sampler_3d(tensor *out__, tensor input, tensor grid, int64_t interpolation_mode, int64_t padding_mode, int align_corners) { PROTECT( auto outputs__ = torch::grid_sampler_3d(*input, *grid, interpolation_mode, padding_mode, (bool)align_corners); out__[0] = new torch::Tensor(outputs__); @@ -3198,7 +3198,7 @@ int atg_grid_sampler_3d(tensor *out__, tensor input, tensor grid, int64_t interp return 1; } -int atg_grid_sampler_3d_backward(tensor *out__, tensor grad_output, tensor input, tensor grid, int64_t interpolation_mode, int64_t padding_mode, int align_corners) { +C_API int atg_grid_sampler_3d_backward(tensor *out__, tensor grad_output, tensor input, tensor grid, int64_t interpolation_mode, int64_t padding_mode, int align_corners) { PROTECT( auto outputs__ = torch::grid_sampler_3d_backward(*grad_output, *input, *grid, interpolation_mode, padding_mode, (bool)align_corners); out__[0] = new torch::Tensor(std::get<0>(outputs__)); @@ -3208,7 +3208,7 @@ int atg_grid_sampler_3d_backward(tensor *out__, tensor grad_output, tensor input return 1; } -int atg_group_norm(tensor *out__, tensor input, int64_t num_groups, tensor weight, tensor bias, double eps, int cudnn_enabled) { +C_API int atg_group_norm(tensor *out__, tensor input, int64_t num_groups, tensor weight, tensor bias, double eps, int cudnn_enabled) { PROTECT( auto outputs__ = torch::group_norm(*input, num_groups, (weight ? *weight : torch::Tensor()), (bias ? *bias : torch::Tensor()), eps, (bool)cudnn_enabled); out__[0] = new torch::Tensor(outputs__); @@ -3217,7 +3217,7 @@ int atg_group_norm(tensor *out__, tensor input, int64_t num_groups, tensor weigh return 1; } -int atg_gru(tensor *out__, tensor input, tensor hx, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional, int batch_first) { +C_API int atg_gru(tensor *out__, tensor input, tensor hx, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional, int batch_first) { PROTECT( auto outputs__ = torch::gru(*input, *hx, of_carray_tensor(params_data, params_len), (bool)has_biases, num_layers, dropout, (bool)train, (bool)bidirectional, (bool)batch_first); out__[0] = new torch::Tensor(std::get<0>(outputs__)); @@ -3227,7 +3227,7 @@ int atg_gru(tensor *out__, tensor input, tensor hx, tensor *params_data, int par return 1; } -int atg_gru1(tensor *out__, tensor data, tensor batch_sizes, tensor hx, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional) { +C_API int atg_gru1(tensor *out__, tensor data, tensor batch_sizes, tensor hx, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional) { PROTECT( auto outputs__ = torch::gru(*data, *batch_sizes, *hx, of_carray_tensor(params_data, params_len), (bool)has_biases, num_layers, dropout, (bool)train, (bool)bidirectional); out__[0] = new torch::Tensor(std::get<0>(outputs__)); @@ -3237,7 +3237,7 @@ int atg_gru1(tensor *out__, tensor data, tensor batch_sizes, tensor hx, tensor * return 1; } -int atg_gru_cell(tensor *out__, tensor input, tensor hx, tensor w_ih, tensor w_hh, tensor b_ih, tensor b_hh) { +C_API int atg_gru_cell(tensor *out__, tensor input, tensor hx, tensor w_ih, tensor w_hh, tensor b_ih, tensor b_hh) { PROTECT( auto outputs__ = torch::gru_cell(*input, *hx, *w_ih, *w_hh, (b_ih ? *b_ih : torch::Tensor()), (b_hh ? *b_hh : torch::Tensor())); out__[0] = new torch::Tensor(outputs__); @@ -3246,7 +3246,7 @@ int atg_gru_cell(tensor *out__, tensor input, tensor hx, tensor w_ih, tensor w_h return 1; } -int atg_gt(tensor *out__, tensor self, scalar other) { +C_API int atg_gt(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = torch::gt(*self, *other); out__[0] = new torch::Tensor(outputs__); @@ -3255,7 +3255,7 @@ int atg_gt(tensor *out__, tensor self, scalar other) { return 1; } -int atg_gt1(tensor *out__, tensor self, tensor other) { +C_API int atg_gt1(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = torch::gt(*self, *other); out__[0] = new torch::Tensor(outputs__); @@ -3264,7 +3264,7 @@ int atg_gt1(tensor *out__, tensor self, tensor other) { return 1; } -int atg_gt_(tensor *out__, tensor self, scalar other) { +C_API int atg_gt_(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = self->gt_(*other); out__[0] = new torch::Tensor(outputs__); @@ -3273,7 +3273,7 @@ int atg_gt_(tensor *out__, tensor self, scalar other) { return 1; } -int atg_gt_1(tensor *out__, tensor self, tensor other) { +C_API int atg_gt_1(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = self->gt_(*other); out__[0] = new torch::Tensor(outputs__); @@ -3282,7 +3282,7 @@ int atg_gt_1(tensor *out__, tensor self, tensor other) { return 1; } -int atg_gt_out(tensor *out__, tensor out, tensor self, scalar other) { +C_API int atg_gt_out(tensor *out__, tensor out, tensor self, scalar other) { PROTECT( auto outputs__ = torch::gt_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); @@ -3291,7 +3291,7 @@ int atg_gt_out(tensor *out__, tensor out, tensor self, scalar other) { return 1; } -int atg_gt_out1(tensor *out__, tensor out, tensor self, tensor other) { +C_API int atg_gt_out1(tensor *out__, tensor out, tensor self, tensor other) { PROTECT( auto outputs__ = torch::gt_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); @@ -3300,7 +3300,7 @@ int atg_gt_out1(tensor *out__, tensor out, tensor self, tensor other) { return 1; } -int atg_hamming_window(tensor *out__, int64_t window_length, int options_kind, int options_device) { +C_API int atg_hamming_window(tensor *out__, int64_t window_length, int options_kind, int options_device) { PROTECT( auto outputs__ = torch::hamming_window(window_length, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); @@ -3309,7 +3309,7 @@ int atg_hamming_window(tensor *out__, int64_t window_length, int options_kind, i return 1; } -int atg_hamming_window1(tensor *out__, int64_t window_length, int periodic, int options_kind, int options_device) { +C_API int atg_hamming_window1(tensor *out__, int64_t window_length, int periodic, int options_kind, int options_device) { PROTECT( auto outputs__ = torch::hamming_window(window_length, (bool)periodic, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); @@ -3318,7 +3318,7 @@ int atg_hamming_window1(tensor *out__, int64_t window_length, int periodic, int return 1; } -int atg_hamming_window2(tensor *out__, int64_t window_length, int periodic, double alpha, int options_kind, int options_device) { +C_API int atg_hamming_window2(tensor *out__, int64_t window_length, int periodic, double alpha, int options_kind, int options_device) { PROTECT( auto outputs__ = torch::hamming_window(window_length, (bool)periodic, alpha, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); @@ -3327,7 +3327,7 @@ int atg_hamming_window2(tensor *out__, int64_t window_length, int periodic, doub return 1; } -int atg_hamming_window3(tensor *out__, int64_t window_length, int periodic, double alpha, double beta, int options_kind, int options_device) { +C_API int atg_hamming_window3(tensor *out__, int64_t window_length, int periodic, double alpha, double beta, int options_kind, int options_device) { PROTECT( auto outputs__ = torch::hamming_window(window_length, (bool)periodic, alpha, beta, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); @@ -3336,7 +3336,7 @@ int atg_hamming_window3(tensor *out__, int64_t window_length, int periodic, doub return 1; } -int atg_hann_window(tensor *out__, int64_t window_length, int options_kind, int options_device) { +C_API int atg_hann_window(tensor *out__, int64_t window_length, int options_kind, int options_device) { PROTECT( auto outputs__ = torch::hann_window(window_length, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); @@ -3345,7 +3345,7 @@ int atg_hann_window(tensor *out__, int64_t window_length, int options_kind, int return 1; } -int atg_hann_window1(tensor *out__, int64_t window_length, int periodic, int options_kind, int options_device) { +C_API int atg_hann_window1(tensor *out__, int64_t window_length, int periodic, int options_kind, int options_device) { PROTECT( auto outputs__ = torch::hann_window(window_length, (bool)periodic, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); @@ -3354,7 +3354,7 @@ int atg_hann_window1(tensor *out__, int64_t window_length, int periodic, int opt return 1; } -int atg_hardshrink(tensor *out__, tensor self) { +C_API int atg_hardshrink(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::hardshrink(*self); out__[0] = new torch::Tensor(outputs__); @@ -3363,7 +3363,7 @@ int atg_hardshrink(tensor *out__, tensor self) { return 1; } -int atg_hardshrink_backward(tensor *out__, tensor grad_out, tensor self, scalar lambd) { +C_API int atg_hardshrink_backward(tensor *out__, tensor grad_out, tensor self, scalar lambd) { PROTECT( auto outputs__ = torch::hardshrink_backward(*grad_out, *self, *lambd); out__[0] = new torch::Tensor(outputs__); @@ -3372,7 +3372,7 @@ int atg_hardshrink_backward(tensor *out__, tensor grad_out, tensor self, scalar return 1; } -int atg_hardtanh(tensor *out__, tensor self) { +C_API int atg_hardtanh(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::hardtanh(*self); out__[0] = new torch::Tensor(outputs__); @@ -3381,7 +3381,7 @@ int atg_hardtanh(tensor *out__, tensor self) { return 1; } -int atg_hardtanh_(tensor *out__, tensor self) { +C_API int atg_hardtanh_(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::hardtanh_(*self); out__[0] = new torch::Tensor(outputs__); @@ -3390,7 +3390,7 @@ int atg_hardtanh_(tensor *out__, tensor self) { return 1; } -int atg_hardtanh_backward(tensor *out__, tensor grad_output, tensor self, scalar min_val, scalar max_val) { +C_API int atg_hardtanh_backward(tensor *out__, tensor grad_output, tensor self, scalar min_val, scalar max_val) { PROTECT( auto outputs__ = torch::hardtanh_backward(*grad_output, *self, *min_val, *max_val); out__[0] = new torch::Tensor(outputs__); @@ -3399,7 +3399,7 @@ int atg_hardtanh_backward(tensor *out__, tensor grad_output, tensor self, scalar return 1; } -int atg_hardtanh_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self, scalar min_val, scalar max_val) { +C_API int atg_hardtanh_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self, scalar min_val, scalar max_val) { PROTECT( auto outputs__ = torch::hardtanh_backward_out(*grad_input, *grad_output, *self, *min_val, *max_val); out__[0] = new torch::Tensor(outputs__); @@ -3408,7 +3408,7 @@ int atg_hardtanh_backward_out(tensor *out__, tensor grad_input, tensor grad_outp return 1; } -int atg_hardtanh_out(tensor *out__, tensor out, tensor self) { +C_API int atg_hardtanh_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::hardtanh_out(*out, *self); out__[0] = new torch::Tensor(outputs__); @@ -3417,7 +3417,7 @@ int atg_hardtanh_out(tensor *out__, tensor out, tensor self) { return 1; } -int atg_hinge_embedding_loss(tensor *out__, tensor self, tensor target, double margin, int64_t reduction) { +C_API int atg_hinge_embedding_loss(tensor *out__, tensor self, tensor target, double margin, int64_t reduction) { PROTECT( auto outputs__ = torch::hinge_embedding_loss(*self, *target, margin, reduction); out__[0] = new torch::Tensor(outputs__); @@ -3426,7 +3426,7 @@ int atg_hinge_embedding_loss(tensor *out__, tensor self, tensor target, double m return 1; } -int atg_histc(tensor *out__, tensor self, int64_t bins) { +C_API int atg_histc(tensor *out__, tensor self, int64_t bins) { PROTECT( auto outputs__ = torch::histc(*self, bins); out__[0] = new torch::Tensor(outputs__); @@ -3435,7 +3435,7 @@ int atg_histc(tensor *out__, tensor self, int64_t bins) { return 1; } -int atg_histc_out(tensor *out__, tensor out, tensor self, int64_t bins) { +C_API int atg_histc_out(tensor *out__, tensor out, tensor self, int64_t bins) { PROTECT( auto outputs__ = torch::histc_out(*out, *self, bins); out__[0] = new torch::Tensor(outputs__); @@ -3444,7 +3444,7 @@ int atg_histc_out(tensor *out__, tensor out, tensor self, int64_t bins) { return 1; } -int atg_hspmm(tensor *out__, tensor mat1, tensor mat2) { +C_API int atg_hspmm(tensor *out__, tensor mat1, tensor mat2) { PROTECT( auto outputs__ = torch::hspmm(*mat1, *mat2); out__[0] = new torch::Tensor(outputs__); @@ -3453,7 +3453,7 @@ int atg_hspmm(tensor *out__, tensor mat1, tensor mat2) { return 1; } -int atg_hspmm_out(tensor *out__, tensor out, tensor mat1, tensor mat2) { +C_API int atg_hspmm_out(tensor *out__, tensor out, tensor mat1, tensor mat2) { PROTECT( auto outputs__ = torch::hspmm_out(*out, *mat1, *mat2); out__[0] = new torch::Tensor(outputs__); @@ -3462,7 +3462,7 @@ int atg_hspmm_out(tensor *out__, tensor out, tensor mat1, tensor mat2) { return 1; } -int atg_ifft(tensor *out__, tensor self, int64_t signal_ndim, int normalized) { +C_API int atg_ifft(tensor *out__, tensor self, int64_t signal_ndim, int normalized) { PROTECT( auto outputs__ = torch::ifft(*self, signal_ndim, (bool)normalized); out__[0] = new torch::Tensor(outputs__); @@ -3471,7 +3471,7 @@ int atg_ifft(tensor *out__, tensor self, int64_t signal_ndim, int normalized) { return 1; } -int atg_im2col(tensor *out__, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *dilation_data, int dilation_len, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len) { +C_API int atg_im2col(tensor *out__, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *dilation_data, int dilation_len, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len) { PROTECT( auto outputs__ = torch::im2col(*self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(dilation_data, dilation_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len)); out__[0] = new torch::Tensor(outputs__); @@ -3480,7 +3480,7 @@ int atg_im2col(tensor *out__, tensor self, int64_t *kernel_size_data, int kernel return 1; } -int atg_im2col_backward(tensor *out__, tensor grad_output, int64_t *input_size_data, int input_size_len, int64_t *kernel_size_data, int kernel_size_len, int64_t *dilation_data, int dilation_len, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len) { +C_API int atg_im2col_backward(tensor *out__, tensor grad_output, int64_t *input_size_data, int input_size_len, int64_t *kernel_size_data, int kernel_size_len, int64_t *dilation_data, int dilation_len, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len) { PROTECT( auto outputs__ = torch::im2col_backward(*grad_output, torch::IntArrayRef(input_size_data, input_size_len), torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(dilation_data, dilation_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len)); out__[0] = new torch::Tensor(outputs__); @@ -3489,7 +3489,7 @@ int atg_im2col_backward(tensor *out__, tensor grad_output, int64_t *input_size_d return 1; } -int atg_im2col_backward_out(tensor *out__, tensor grad_input, tensor grad_output, int64_t *input_size_data, int input_size_len, int64_t *kernel_size_data, int kernel_size_len, int64_t *dilation_data, int dilation_len, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len) { +C_API int atg_im2col_backward_out(tensor *out__, tensor grad_input, tensor grad_output, int64_t *input_size_data, int input_size_len, int64_t *kernel_size_data, int kernel_size_len, int64_t *dilation_data, int dilation_len, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len) { PROTECT( auto outputs__ = torch::im2col_backward_out(*grad_input, *grad_output, torch::IntArrayRef(input_size_data, input_size_len), torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(dilation_data, dilation_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len)); out__[0] = new torch::Tensor(outputs__); @@ -3498,7 +3498,7 @@ int atg_im2col_backward_out(tensor *out__, tensor grad_input, tensor grad_output return 1; } -int atg_im2col_out(tensor *out__, tensor out, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *dilation_data, int dilation_len, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len) { +C_API int atg_im2col_out(tensor *out__, tensor out, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *dilation_data, int dilation_len, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len) { PROTECT( auto outputs__ = torch::im2col_out(*out, *self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(dilation_data, dilation_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len)); out__[0] = new torch::Tensor(outputs__); @@ -3507,7 +3507,7 @@ int atg_im2col_out(tensor *out__, tensor out, tensor self, int64_t *kernel_size_ return 1; } -int atg_imag(tensor *out__, tensor self) { +C_API int atg_imag(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::imag(*self); out__[0] = new torch::Tensor(outputs__); @@ -3516,7 +3516,7 @@ int atg_imag(tensor *out__, tensor self) { return 1; } -int atg_imag_out(tensor *out__, tensor out, tensor self) { +C_API int atg_imag_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::imag_out(*out, *self); out__[0] = new torch::Tensor(outputs__); @@ -3525,7 +3525,7 @@ int atg_imag_out(tensor *out__, tensor out, tensor self) { return 1; } -int atg_index(tensor *out__, tensor self, tensor *indices_data, int indices_len) { +C_API int atg_index(tensor *out__, tensor self, tensor *indices_data, int indices_len) { PROTECT( auto outputs__ = torch::index(*self, of_carray_tensor(indices_data, indices_len)); out__[0] = new torch::Tensor(outputs__); @@ -3534,7 +3534,7 @@ int atg_index(tensor *out__, tensor self, tensor *indices_data, int indices_len) return 1; } -int atg_index_add(tensor *out__, tensor self, int64_t dim, tensor index, tensor source) { +C_API int atg_index_add(tensor *out__, tensor self, int64_t dim, tensor index, tensor source) { PROTECT( auto outputs__ = torch::index_add(*self, dim, *index, *source); out__[0] = new torch::Tensor(outputs__); @@ -3543,7 +3543,7 @@ int atg_index_add(tensor *out__, tensor self, int64_t dim, tensor index, tensor return 1; } -int atg_index_add_(tensor *out__, tensor self, int64_t dim, tensor index, tensor source) { +C_API int atg_index_add_(tensor *out__, tensor self, int64_t dim, tensor index, tensor source) { PROTECT( auto outputs__ = self->index_add_(dim, *index, *source); out__[0] = new torch::Tensor(outputs__); @@ -3552,7 +3552,7 @@ int atg_index_add_(tensor *out__, tensor self, int64_t dim, tensor index, tensor return 1; } -int atg_index_copy(tensor *out__, tensor self, int64_t dim, tensor index, tensor source) { +C_API int atg_index_copy(tensor *out__, tensor self, int64_t dim, tensor index, tensor source) { PROTECT( auto outputs__ = torch::index_copy(*self, dim, *index, *source); out__[0] = new torch::Tensor(outputs__); @@ -3561,7 +3561,7 @@ int atg_index_copy(tensor *out__, tensor self, int64_t dim, tensor index, tensor return 1; } -int atg_index_copy_(tensor *out__, tensor self, int64_t dim, tensor index, tensor source) { +C_API int atg_index_copy_(tensor *out__, tensor self, int64_t dim, tensor index, tensor source) { PROTECT( auto outputs__ = self->index_copy_(dim, *index, *source); out__[0] = new torch::Tensor(outputs__); @@ -3570,7 +3570,7 @@ int atg_index_copy_(tensor *out__, tensor self, int64_t dim, tensor index, tenso return 1; } -int atg_index_fill(tensor *out__, tensor self, int64_t dim, tensor index, scalar value) { +C_API int atg_index_fill(tensor *out__, tensor self, int64_t dim, tensor index, scalar value) { PROTECT( auto outputs__ = torch::index_fill(*self, dim, *index, *value); out__[0] = new torch::Tensor(outputs__); @@ -3579,7 +3579,7 @@ int atg_index_fill(tensor *out__, tensor self, int64_t dim, tensor index, scalar return 1; } -int atg_index_fill1(tensor *out__, tensor self, int64_t dim, tensor index, tensor value) { +C_API int atg_index_fill1(tensor *out__, tensor self, int64_t dim, tensor index, tensor value) { PROTECT( auto outputs__ = torch::index_fill(*self, dim, *index, *value); out__[0] = new torch::Tensor(outputs__); @@ -3588,7 +3588,7 @@ int atg_index_fill1(tensor *out__, tensor self, int64_t dim, tensor index, tenso return 1; } -int atg_index_fill_(tensor *out__, tensor self, int64_t dim, tensor index, scalar value) { +C_API int atg_index_fill_(tensor *out__, tensor self, int64_t dim, tensor index, scalar value) { PROTECT( auto outputs__ = self->index_fill_(dim, *index, *value); out__[0] = new torch::Tensor(outputs__); @@ -3597,7 +3597,7 @@ int atg_index_fill_(tensor *out__, tensor self, int64_t dim, tensor index, scala return 1; } -int atg_index_fill_1(tensor *out__, tensor self, int64_t dim, tensor index, tensor value) { +C_API int atg_index_fill_1(tensor *out__, tensor self, int64_t dim, tensor index, tensor value) { PROTECT( auto outputs__ = self->index_fill_(dim, *index, *value); out__[0] = new torch::Tensor(outputs__); @@ -3606,7 +3606,7 @@ int atg_index_fill_1(tensor *out__, tensor self, int64_t dim, tensor index, tens return 1; } -int atg_index_put(tensor *out__, tensor self, tensor *indices_data, int indices_len, tensor values, int accumulate) { +C_API int atg_index_put(tensor *out__, tensor self, tensor *indices_data, int indices_len, tensor values, int accumulate) { PROTECT( auto outputs__ = torch::index_put(*self, of_carray_tensor(indices_data, indices_len), *values, (bool)accumulate); out__[0] = new torch::Tensor(outputs__); @@ -3615,7 +3615,7 @@ int atg_index_put(tensor *out__, tensor self, tensor *indices_data, int indices_ return 1; } -int atg_index_put_(tensor *out__, tensor self, tensor *indices_data, int indices_len, tensor values, int accumulate) { +C_API int atg_index_put_(tensor *out__, tensor self, tensor *indices_data, int indices_len, tensor values, int accumulate) { PROTECT( auto outputs__ = torch::index_put_(*self, of_carray_tensor(indices_data, indices_len), *values, (bool)accumulate); out__[0] = new torch::Tensor(outputs__); @@ -3624,7 +3624,7 @@ int atg_index_put_(tensor *out__, tensor self, tensor *indices_data, int indices return 1; } -int atg_index_select(tensor *out__, tensor self, int64_t dim, tensor index) { +C_API int atg_index_select(tensor *out__, tensor self, int64_t dim, tensor index) { PROTECT( auto outputs__ = torch::index_select(*self, dim, *index); out__[0] = new torch::Tensor(outputs__); @@ -3633,7 +3633,7 @@ int atg_index_select(tensor *out__, tensor self, int64_t dim, tensor index) { return 1; } -int atg_index_select_out(tensor *out__, tensor out, tensor self, int64_t dim, tensor index) { +C_API int atg_index_select_out(tensor *out__, tensor out, tensor self, int64_t dim, tensor index) { PROTECT( auto outputs__ = torch::index_select_out(*out, *self, dim, *index); out__[0] = new torch::Tensor(outputs__); @@ -3642,7 +3642,7 @@ int atg_index_select_out(tensor *out__, tensor out, tensor self, int64_t dim, te return 1; } -int atg_indices(tensor *out__, tensor self) { +C_API int atg_indices(tensor *out__, tensor self) { PROTECT( auto outputs__ = self->indices(); out__[0] = new torch::Tensor(outputs__); @@ -3651,7 +3651,7 @@ int atg_indices(tensor *out__, tensor self) { return 1; } -int atg_instance_norm(tensor *out__, tensor input, tensor weight, tensor bias, tensor running_mean, tensor running_var, int use_input_stats, double momentum, double eps, int cudnn_enabled) { +C_API int atg_instance_norm(tensor *out__, tensor input, tensor weight, tensor bias, tensor running_mean, tensor running_var, int use_input_stats, double momentum, double eps, int cudnn_enabled) { PROTECT( auto outputs__ = torch::instance_norm(*input, (weight ? *weight : torch::Tensor()), (bias ? *bias : torch::Tensor()), (running_mean ? *running_mean : torch::Tensor()), (running_var ? *running_var : torch::Tensor()), (bool)use_input_stats, momentum, eps, (bool)cudnn_enabled); out__[0] = new torch::Tensor(outputs__); @@ -3660,7 +3660,7 @@ int atg_instance_norm(tensor *out__, tensor input, tensor weight, tensor bias, t return 1; } -int atg_int_repr(tensor *out__, tensor self) { +C_API int atg_int_repr(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::int_repr(*self); out__[0] = new torch::Tensor(outputs__); @@ -3669,7 +3669,7 @@ int atg_int_repr(tensor *out__, tensor self) { return 1; } -int atg_inverse(tensor *out__, tensor self) { +C_API int atg_inverse(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::inverse(*self); out__[0] = new torch::Tensor(outputs__); @@ -3678,7 +3678,7 @@ int atg_inverse(tensor *out__, tensor self) { return 1; } -int atg_inverse_out(tensor *out__, tensor out, tensor self) { +C_API int atg_inverse_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::inverse_out(*out, *self); out__[0] = new torch::Tensor(outputs__); @@ -3687,7 +3687,7 @@ int atg_inverse_out(tensor *out__, tensor out, tensor self) { return 1; } -int atg_irfft(tensor *out__, tensor self, int64_t signal_ndim, int normalized, int onesided, int64_t *signal_sizes_data, int signal_sizes_len) { +C_API int atg_irfft(tensor *out__, tensor self, int64_t signal_ndim, int normalized, int onesided, int64_t *signal_sizes_data, int signal_sizes_len) { PROTECT( auto outputs__ = torch::irfft(*self, signal_ndim, (bool)normalized, (bool)onesided, torch::IntArrayRef(signal_sizes_data, signal_sizes_len)); out__[0] = new torch::Tensor(outputs__); @@ -3696,7 +3696,7 @@ int atg_irfft(tensor *out__, tensor self, int64_t signal_ndim, int normalized, i return 1; } -int atg_isclose(tensor *out__, tensor self, tensor other, double rtol, double atol, int equal_nan) { +C_API int atg_isclose(tensor *out__, tensor self, tensor other, double rtol, double atol, int equal_nan) { PROTECT( auto outputs__ = torch::isclose(*self, *other, rtol, atol, (bool)equal_nan); out__[0] = new torch::Tensor(outputs__); @@ -3705,7 +3705,7 @@ int atg_isclose(tensor *out__, tensor self, tensor other, double rtol, double at return 1; } -int atg_isfinite(tensor *out__, tensor self) { +C_API int atg_isfinite(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::isfinite(*self); out__[0] = new torch::Tensor(outputs__); @@ -3714,7 +3714,7 @@ int atg_isfinite(tensor *out__, tensor self) { return 1; } -int atg_isnan(tensor *out__, tensor self) { +C_API int atg_isnan(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::isnan(*self); out__[0] = new torch::Tensor(outputs__); @@ -3723,7 +3723,7 @@ int atg_isnan(tensor *out__, tensor self) { return 1; } -int atg_kl_div(tensor *out__, tensor self, tensor target, int64_t reduction) { +C_API int atg_kl_div(tensor *out__, tensor self, tensor target, int64_t reduction) { PROTECT( auto outputs__ = torch::kl_div(*self, *target, reduction); out__[0] = new torch::Tensor(outputs__); @@ -3732,7 +3732,7 @@ int atg_kl_div(tensor *out__, tensor self, tensor target, int64_t reduction) { return 1; } -int atg_kl_div_backward(tensor *out__, tensor grad_output, tensor self, tensor target, int64_t reduction) { +C_API int atg_kl_div_backward(tensor *out__, tensor grad_output, tensor self, tensor target, int64_t reduction) { PROTECT( auto outputs__ = torch::kl_div_backward(*grad_output, *self, *target, reduction); out__[0] = new torch::Tensor(outputs__); @@ -3741,7 +3741,7 @@ int atg_kl_div_backward(tensor *out__, tensor grad_output, tensor self, tensor t return 1; } -int atg_kthvalue(tensor *out__, tensor self, int64_t k, int64_t dim, int keepdim) { +C_API int atg_kthvalue(tensor *out__, tensor self, int64_t k, int64_t dim, int keepdim) { PROTECT( auto outputs__ = torch::kthvalue(*self, k, dim, (bool)keepdim); out__[0] = new torch::Tensor(std::get<0>(outputs__)); @@ -3751,7 +3751,7 @@ int atg_kthvalue(tensor *out__, tensor self, int64_t k, int64_t dim, int keepdim return 1; } -int atg_kthvalue_out(tensor *out__, tensor values, tensor indices, tensor self, int64_t k, int64_t dim, int keepdim) { +C_API int atg_kthvalue_out(tensor *out__, tensor values, tensor indices, tensor self, int64_t k, int64_t dim, int keepdim) { PROTECT( auto outputs__ = torch::kthvalue_out(*values, *indices, *self, k, dim, (bool)keepdim); out__[0] = new torch::Tensor(std::get<0>(outputs__)); @@ -3761,7 +3761,7 @@ int atg_kthvalue_out(tensor *out__, tensor values, tensor indices, tensor self, return 1; } -int atg_l1_loss(tensor *out__, tensor self, tensor target, int64_t reduction) { +C_API int atg_l1_loss(tensor *out__, tensor self, tensor target, int64_t reduction) { PROTECT( auto outputs__ = torch::l1_loss(*self, *target, reduction); out__[0] = new torch::Tensor(outputs__); @@ -3770,7 +3770,7 @@ int atg_l1_loss(tensor *out__, tensor self, tensor target, int64_t reduction) { return 1; } -int atg_l1_loss_backward(tensor *out__, tensor grad_output, tensor self, tensor target, int64_t reduction) { +C_API int atg_l1_loss_backward(tensor *out__, tensor grad_output, tensor self, tensor target, int64_t reduction) { PROTECT( auto outputs__ = torch::l1_loss_backward(*grad_output, *self, *target, reduction); out__[0] = new torch::Tensor(outputs__); @@ -3779,7 +3779,7 @@ int atg_l1_loss_backward(tensor *out__, tensor grad_output, tensor self, tensor return 1; } -int atg_l1_loss_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self, tensor target, int64_t reduction) { +C_API int atg_l1_loss_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self, tensor target, int64_t reduction) { PROTECT( auto outputs__ = torch::l1_loss_backward_out(*grad_input, *grad_output, *self, *target, reduction); out__[0] = new torch::Tensor(outputs__); @@ -3788,7 +3788,7 @@ int atg_l1_loss_backward_out(tensor *out__, tensor grad_input, tensor grad_outpu return 1; } -int atg_l1_loss_out(tensor *out__, tensor out, tensor self, tensor target, int64_t reduction) { +C_API int atg_l1_loss_out(tensor *out__, tensor out, tensor self, tensor target, int64_t reduction) { PROTECT( auto outputs__ = torch::l1_loss_out(*out, *self, *target, reduction); out__[0] = new torch::Tensor(outputs__); @@ -3797,7 +3797,7 @@ int atg_l1_loss_out(tensor *out__, tensor out, tensor self, tensor target, int64 return 1; } -int atg_layer_norm(tensor *out__, tensor input, int64_t *normalized_shape_data, int normalized_shape_len, tensor weight, tensor bias, double eps, int cudnn_enable) { +C_API int atg_layer_norm(tensor *out__, tensor input, int64_t *normalized_shape_data, int normalized_shape_len, tensor weight, tensor bias, double eps, int cudnn_enable) { PROTECT( auto outputs__ = torch::layer_norm(*input, torch::IntArrayRef(normalized_shape_data, normalized_shape_len), (weight ? *weight : torch::Tensor()), (bias ? *bias : torch::Tensor()), eps, (bool)cudnn_enable); out__[0] = new torch::Tensor(outputs__); @@ -3806,7 +3806,7 @@ int atg_layer_norm(tensor *out__, tensor input, int64_t *normalized_shape_data, return 1; } -int atg_le(tensor *out__, tensor self, scalar other) { +C_API int atg_le(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = torch::le(*self, *other); out__[0] = new torch::Tensor(outputs__); @@ -3815,7 +3815,7 @@ int atg_le(tensor *out__, tensor self, scalar other) { return 1; } -int atg_le1(tensor *out__, tensor self, tensor other) { +C_API int atg_le1(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = torch::le(*self, *other); out__[0] = new torch::Tensor(outputs__); @@ -3824,7 +3824,7 @@ int atg_le1(tensor *out__, tensor self, tensor other) { return 1; } -int atg_le_(tensor *out__, tensor self, scalar other) { +C_API int atg_le_(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = self->le_(*other); out__[0] = new torch::Tensor(outputs__); @@ -3833,7 +3833,7 @@ int atg_le_(tensor *out__, tensor self, scalar other) { return 1; } -int atg_le_1(tensor *out__, tensor self, tensor other) { +C_API int atg_le_1(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = self->le_(*other); out__[0] = new torch::Tensor(outputs__); @@ -3842,7 +3842,7 @@ int atg_le_1(tensor *out__, tensor self, tensor other) { return 1; } -int atg_le_out(tensor *out__, tensor out, tensor self, scalar other) { +C_API int atg_le_out(tensor *out__, tensor out, tensor self, scalar other) { PROTECT( auto outputs__ = torch::le_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); @@ -3851,7 +3851,7 @@ int atg_le_out(tensor *out__, tensor out, tensor self, scalar other) { return 1; } -int atg_le_out1(tensor *out__, tensor out, tensor self, tensor other) { +C_API int atg_le_out1(tensor *out__, tensor out, tensor self, tensor other) { PROTECT( auto outputs__ = torch::le_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); @@ -3860,7 +3860,7 @@ int atg_le_out1(tensor *out__, tensor out, tensor self, tensor other) { return 1; } -int atg_leaky_relu(tensor *out__, tensor self) { +C_API int atg_leaky_relu(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::leaky_relu(*self); out__[0] = new torch::Tensor(outputs__); @@ -3869,7 +3869,7 @@ int atg_leaky_relu(tensor *out__, tensor self) { return 1; } -int atg_leaky_relu_(tensor *out__, tensor self) { +C_API int atg_leaky_relu_(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::leaky_relu_(*self); out__[0] = new torch::Tensor(outputs__); @@ -3878,7 +3878,7 @@ int atg_leaky_relu_(tensor *out__, tensor self) { return 1; } -int atg_leaky_relu_backward(tensor *out__, tensor grad_output, tensor self, scalar negative_slope) { +C_API int atg_leaky_relu_backward(tensor *out__, tensor grad_output, tensor self, scalar negative_slope) { PROTECT( auto outputs__ = torch::leaky_relu_backward(*grad_output, *self, *negative_slope); out__[0] = new torch::Tensor(outputs__); @@ -3887,7 +3887,7 @@ int atg_leaky_relu_backward(tensor *out__, tensor grad_output, tensor self, scal return 1; } -int atg_leaky_relu_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self, scalar negative_slope) { +C_API int atg_leaky_relu_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self, scalar negative_slope) { PROTECT( auto outputs__ = torch::leaky_relu_backward_out(*grad_input, *grad_output, *self, *negative_slope); out__[0] = new torch::Tensor(outputs__); @@ -3896,7 +3896,7 @@ int atg_leaky_relu_backward_out(tensor *out__, tensor grad_input, tensor grad_ou return 1; } -int atg_leaky_relu_out(tensor *out__, tensor out, tensor self) { +C_API int atg_leaky_relu_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::leaky_relu_out(*out, *self); out__[0] = new torch::Tensor(outputs__); @@ -3905,7 +3905,7 @@ int atg_leaky_relu_out(tensor *out__, tensor out, tensor self) { return 1; } -int atg_lerp(tensor *out__, tensor self, tensor end, scalar weight) { +C_API int atg_lerp(tensor *out__, tensor self, tensor end, scalar weight) { PROTECT( auto outputs__ = torch::lerp(*self, *end, *weight); out__[0] = new torch::Tensor(outputs__); @@ -3914,7 +3914,7 @@ int atg_lerp(tensor *out__, tensor self, tensor end, scalar weight) { return 1; } -int atg_lerp1(tensor *out__, tensor self, tensor end, tensor weight) { +C_API int atg_lerp1(tensor *out__, tensor self, tensor end, tensor weight) { PROTECT( auto outputs__ = torch::lerp(*self, *end, *weight); out__[0] = new torch::Tensor(outputs__); @@ -3923,7 +3923,7 @@ int atg_lerp1(tensor *out__, tensor self, tensor end, tensor weight) { return 1; } -int atg_lerp_(tensor *out__, tensor self, tensor end, scalar weight) { +C_API int atg_lerp_(tensor *out__, tensor self, tensor end, scalar weight) { PROTECT( auto outputs__ = self->lerp_(*end, *weight); out__[0] = new torch::Tensor(outputs__); @@ -3932,7 +3932,7 @@ int atg_lerp_(tensor *out__, tensor self, tensor end, scalar weight) { return 1; } -int atg_lerp_1(tensor *out__, tensor self, tensor end, tensor weight) { +C_API int atg_lerp_1(tensor *out__, tensor self, tensor end, tensor weight) { PROTECT( auto outputs__ = self->lerp_(*end, *weight); out__[0] = new torch::Tensor(outputs__); @@ -3941,7 +3941,7 @@ int atg_lerp_1(tensor *out__, tensor self, tensor end, tensor weight) { return 1; } -int atg_lerp_out(tensor *out__, tensor out, tensor self, tensor end, scalar weight) { +C_API int atg_lerp_out(tensor *out__, tensor out, tensor self, tensor end, scalar weight) { PROTECT( auto outputs__ = torch::lerp_out(*out, *self, *end, *weight); out__[0] = new torch::Tensor(outputs__); @@ -3950,7 +3950,7 @@ int atg_lerp_out(tensor *out__, tensor out, tensor self, tensor end, scalar weig return 1; } -int atg_lerp_out1(tensor *out__, tensor out, tensor self, tensor end, tensor weight) { +C_API int atg_lerp_out1(tensor *out__, tensor out, tensor self, tensor end, tensor weight) { PROTECT( auto outputs__ = torch::lerp_out(*out, *self, *end, *weight); out__[0] = new torch::Tensor(outputs__); @@ -3959,7 +3959,7 @@ int atg_lerp_out1(tensor *out__, tensor out, tensor self, tensor end, tensor wei return 1; } -int atg_lgamma(tensor *out__, tensor self) { +C_API int atg_lgamma(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::lgamma(*self); out__[0] = new torch::Tensor(outputs__); @@ -3968,7 +3968,7 @@ int atg_lgamma(tensor *out__, tensor self) { return 1; } -int atg_lgamma_(tensor *out__, tensor self) { +C_API int atg_lgamma_(tensor *out__, tensor self) { PROTECT( auto outputs__ = self->lgamma_(); out__[0] = new torch::Tensor(outputs__); @@ -3977,7 +3977,7 @@ int atg_lgamma_(tensor *out__, tensor self) { return 1; } -int atg_lgamma_out(tensor *out__, tensor out, tensor self) { +C_API int atg_lgamma_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::lgamma_out(*out, *self); out__[0] = new torch::Tensor(outputs__); @@ -3986,7 +3986,7 @@ int atg_lgamma_out(tensor *out__, tensor out, tensor self) { return 1; } -int atg_linear(tensor *out__, tensor input, tensor weight, tensor bias) { +C_API int atg_linear(tensor *out__, tensor input, tensor weight, tensor bias) { PROTECT( auto outputs__ = torch::linear(*input, *weight, (bias ? *bias : torch::Tensor())); out__[0] = new torch::Tensor(outputs__); @@ -3995,7 +3995,7 @@ int atg_linear(tensor *out__, tensor input, tensor weight, tensor bias) { return 1; } -int atg_linspace(tensor *out__, scalar start, scalar end, int64_t steps, int options_kind, int options_device) { +C_API int atg_linspace(tensor *out__, scalar start, scalar end, int64_t steps, int options_kind, int options_device) { PROTECT( auto outputs__ = torch::linspace(*start, *end, steps, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); @@ -4004,7 +4004,7 @@ int atg_linspace(tensor *out__, scalar start, scalar end, int64_t steps, int opt return 1; } -int atg_linspace_out(tensor *out__, tensor out, scalar start, scalar end, int64_t steps) { +C_API int atg_linspace_out(tensor *out__, tensor out, scalar start, scalar end, int64_t steps) { PROTECT( auto outputs__ = torch::linspace_out(*out, *start, *end, steps); out__[0] = new torch::Tensor(outputs__); @@ -4013,7 +4013,7 @@ int atg_linspace_out(tensor *out__, tensor out, scalar start, scalar end, int64_ return 1; } -int atg_log(tensor *out__, tensor self) { +C_API int atg_log(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::log(*self); out__[0] = new torch::Tensor(outputs__); @@ -4022,7 +4022,7 @@ int atg_log(tensor *out__, tensor self) { return 1; } -int atg_log10(tensor *out__, tensor self) { +C_API int atg_log10(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::log10(*self); out__[0] = new torch::Tensor(outputs__); @@ -4031,7 +4031,7 @@ int atg_log10(tensor *out__, tensor self) { return 1; } -int atg_log10_(tensor *out__, tensor self) { +C_API int atg_log10_(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::log10_(*self); out__[0] = new torch::Tensor(outputs__); @@ -4040,7 +4040,7 @@ int atg_log10_(tensor *out__, tensor self) { return 1; } -int atg_log10_out(tensor *out__, tensor out, tensor self) { +C_API int atg_log10_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::log10_out(*out, *self); out__[0] = new torch::Tensor(outputs__); @@ -4049,7 +4049,7 @@ int atg_log10_out(tensor *out__, tensor out, tensor self) { return 1; } -int atg_log1p(tensor *out__, tensor self) { +C_API int atg_log1p(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::log1p(*self); out__[0] = new torch::Tensor(outputs__); @@ -4058,7 +4058,7 @@ int atg_log1p(tensor *out__, tensor self) { return 1; } -int atg_log1p_(tensor *out__, tensor self) { +C_API int atg_log1p_(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::log1p_(*self); out__[0] = new torch::Tensor(outputs__); @@ -4067,7 +4067,7 @@ int atg_log1p_(tensor *out__, tensor self) { return 1; } -int atg_log1p_out(tensor *out__, tensor out, tensor self) { +C_API int atg_log1p_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::log1p_out(*out, *self); out__[0] = new torch::Tensor(outputs__); @@ -4076,7 +4076,7 @@ int atg_log1p_out(tensor *out__, tensor out, tensor self) { return 1; } -int atg_log2(tensor *out__, tensor self) { +C_API int atg_log2(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::log2(*self); out__[0] = new torch::Tensor(outputs__); @@ -4085,7 +4085,7 @@ int atg_log2(tensor *out__, tensor self) { return 1; } -int atg_log2_(tensor *out__, tensor self) { +C_API int atg_log2_(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::log2_(*self); out__[0] = new torch::Tensor(outputs__); @@ -4094,7 +4094,7 @@ int atg_log2_(tensor *out__, tensor self) { return 1; } -int atg_log2_out(tensor *out__, tensor out, tensor self) { +C_API int atg_log2_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::log2_out(*out, *self); out__[0] = new torch::Tensor(outputs__); @@ -4103,7 +4103,7 @@ int atg_log2_out(tensor *out__, tensor out, tensor self) { return 1; } -int atg_log_(tensor *out__, tensor self) { +C_API int atg_log_(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::log_(*self); out__[0] = new torch::Tensor(outputs__); @@ -4112,7 +4112,7 @@ int atg_log_(tensor *out__, tensor self) { return 1; } -int atg_log_normal_(tensor *out__, tensor self, double mean, double std) { +C_API int atg_log_normal_(tensor *out__, tensor self, double mean, double std) { PROTECT( auto outputs__ = self->log_normal_(mean, std); out__[0] = new torch::Tensor(outputs__); @@ -4121,7 +4121,7 @@ int atg_log_normal_(tensor *out__, tensor self, double mean, double std) { return 1; } -int atg_log_out(tensor *out__, tensor out, tensor self) { +C_API int atg_log_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::log_out(*out, *self); out__[0] = new torch::Tensor(outputs__); @@ -4130,7 +4130,7 @@ int atg_log_out(tensor *out__, tensor out, tensor self) { return 1; } -int atg_log_sigmoid(tensor *out__, tensor self) { +C_API int atg_log_sigmoid(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::log_sigmoid(*self); out__[0] = new torch::Tensor(outputs__); @@ -4139,7 +4139,7 @@ int atg_log_sigmoid(tensor *out__, tensor self) { return 1; } -int atg_log_sigmoid_backward(tensor *out__, tensor grad_output, tensor self, tensor buffer) { +C_API int atg_log_sigmoid_backward(tensor *out__, tensor grad_output, tensor self, tensor buffer) { PROTECT( auto outputs__ = torch::log_sigmoid_backward(*grad_output, *self, *buffer); out__[0] = new torch::Tensor(outputs__); @@ -4148,7 +4148,7 @@ int atg_log_sigmoid_backward(tensor *out__, tensor grad_output, tensor self, ten return 1; } -int atg_log_sigmoid_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self, tensor buffer) { +C_API int atg_log_sigmoid_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self, tensor buffer) { PROTECT( auto outputs__ = torch::log_sigmoid_backward_out(*grad_input, *grad_output, *self, *buffer); out__[0] = new torch::Tensor(outputs__); @@ -4157,7 +4157,7 @@ int atg_log_sigmoid_backward_out(tensor *out__, tensor grad_input, tensor grad_o return 1; } -int atg_log_sigmoid_out(tensor *out__, tensor out, tensor self) { +C_API int atg_log_sigmoid_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::log_sigmoid_out(*out, *self); out__[0] = new torch::Tensor(outputs__); @@ -4166,7 +4166,7 @@ int atg_log_sigmoid_out(tensor *out__, tensor out, tensor self) { return 1; } -int atg_log_softmax(tensor *out__, tensor self, int64_t dim, int dtype) { +C_API int atg_log_softmax(tensor *out__, tensor self, int64_t dim, int dtype) { PROTECT( auto outputs__ = torch::log_softmax(*self, dim, torch::ScalarType(dtype)); out__[0] = new torch::Tensor(outputs__); @@ -4175,7 +4175,7 @@ int atg_log_softmax(tensor *out__, tensor self, int64_t dim, int dtype) { return 1; } -int atg_logdet(tensor *out__, tensor self) { +C_API int atg_logdet(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::logdet(*self); out__[0] = new torch::Tensor(outputs__); @@ -4184,7 +4184,7 @@ int atg_logdet(tensor *out__, tensor self) { return 1; } -int atg_logical_not(tensor *out__, tensor self) { +C_API int atg_logical_not(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::logical_not(*self); out__[0] = new torch::Tensor(outputs__); @@ -4193,7 +4193,7 @@ int atg_logical_not(tensor *out__, tensor self) { return 1; } -int atg_logical_not_(tensor *out__, tensor self) { +C_API int atg_logical_not_(tensor *out__, tensor self) { PROTECT( auto outputs__ = self->logical_not_(); out__[0] = new torch::Tensor(outputs__); @@ -4202,7 +4202,7 @@ int atg_logical_not_(tensor *out__, tensor self) { return 1; } -int atg_logical_not_out(tensor *out__, tensor out, tensor self) { +C_API int atg_logical_not_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::logical_not_out(*out, *self); out__[0] = new torch::Tensor(outputs__); @@ -4211,7 +4211,7 @@ int atg_logical_not_out(tensor *out__, tensor out, tensor self) { return 1; } -int atg_logical_xor(tensor *out__, tensor self, tensor other) { +C_API int atg_logical_xor(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = torch::logical_xor(*self, *other); out__[0] = new torch::Tensor(outputs__); @@ -4220,7 +4220,7 @@ int atg_logical_xor(tensor *out__, tensor self, tensor other) { return 1; } -int atg_logical_xor_(tensor *out__, tensor self, tensor other) { +C_API int atg_logical_xor_(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = self->logical_xor_(*other); out__[0] = new torch::Tensor(outputs__); @@ -4229,7 +4229,7 @@ int atg_logical_xor_(tensor *out__, tensor self, tensor other) { return 1; } -int atg_logical_xor_out(tensor *out__, tensor out, tensor self, tensor other) { +C_API int atg_logical_xor_out(tensor *out__, tensor out, tensor self, tensor other) { PROTECT( auto outputs__ = torch::logical_xor_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); @@ -4238,7 +4238,7 @@ int atg_logical_xor_out(tensor *out__, tensor out, tensor self, tensor other) { return 1; } -int atg_logspace(tensor *out__, scalar start, scalar end, int64_t steps, double base, int options_kind, int options_device) { +C_API int atg_logspace(tensor *out__, scalar start, scalar end, int64_t steps, double base, int options_kind, int options_device) { PROTECT( auto outputs__ = torch::logspace(*start, *end, steps, base, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); @@ -4247,7 +4247,7 @@ int atg_logspace(tensor *out__, scalar start, scalar end, int64_t steps, double return 1; } -int atg_logspace_out(tensor *out__, tensor out, scalar start, scalar end, int64_t steps, double base) { +C_API int atg_logspace_out(tensor *out__, tensor out, scalar start, scalar end, int64_t steps, double base) { PROTECT( auto outputs__ = torch::logspace_out(*out, *start, *end, steps, base); out__[0] = new torch::Tensor(outputs__); @@ -4256,7 +4256,7 @@ int atg_logspace_out(tensor *out__, tensor out, scalar start, scalar end, int64_ return 1; } -int atg_logsumexp(tensor *out__, tensor self, int64_t *dim_data, int dim_len, int keepdim) { +C_API int atg_logsumexp(tensor *out__, tensor self, int64_t *dim_data, int dim_len, int keepdim) { PROTECT( auto outputs__ = torch::logsumexp(*self, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim); out__[0] = new torch::Tensor(outputs__); @@ -4265,7 +4265,7 @@ int atg_logsumexp(tensor *out__, tensor self, int64_t *dim_data, int dim_len, in return 1; } -int atg_logsumexp_out(tensor *out__, tensor out, tensor self, int64_t *dim_data, int dim_len, int keepdim) { +C_API int atg_logsumexp_out(tensor *out__, tensor out, tensor self, int64_t *dim_data, int dim_len, int keepdim) { PROTECT( auto outputs__ = torch::logsumexp_out(*out, *self, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim); out__[0] = new torch::Tensor(outputs__); @@ -4274,7 +4274,7 @@ int atg_logsumexp_out(tensor *out__, tensor out, tensor self, int64_t *dim_data, return 1; } -int atg_lstm(tensor *out__, tensor input, tensor *hx_data, int hx_len, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional, int batch_first) { +C_API int atg_lstm(tensor *out__, tensor input, tensor *hx_data, int hx_len, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional, int batch_first) { PROTECT( auto outputs__ = torch::lstm(*input, of_carray_tensor(hx_data, hx_len), of_carray_tensor(params_data, params_len), (bool)has_biases, num_layers, dropout, (bool)train, (bool)bidirectional, (bool)batch_first); out__[0] = new torch::Tensor(std::get<0>(outputs__)); @@ -4285,7 +4285,7 @@ int atg_lstm(tensor *out__, tensor input, tensor *hx_data, int hx_len, tensor *p return 1; } -int atg_lstm1(tensor *out__, tensor data, tensor batch_sizes, tensor *hx_data, int hx_len, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional) { +C_API int atg_lstm1(tensor *out__, tensor data, tensor batch_sizes, tensor *hx_data, int hx_len, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional) { PROTECT( auto outputs__ = torch::lstm(*data, *batch_sizes, of_carray_tensor(hx_data, hx_len), of_carray_tensor(params_data, params_len), (bool)has_biases, num_layers, dropout, (bool)train, (bool)bidirectional); out__[0] = new torch::Tensor(std::get<0>(outputs__)); @@ -4296,7 +4296,7 @@ int atg_lstm1(tensor *out__, tensor data, tensor batch_sizes, tensor *hx_data, i return 1; } -int atg_lstm_cell(tensor *out__, tensor input, tensor *hx_data, int hx_len, tensor w_ih, tensor w_hh, tensor b_ih, tensor b_hh) { +C_API int atg_lstm_cell(tensor *out__, tensor input, tensor *hx_data, int hx_len, tensor w_ih, tensor w_hh, tensor b_ih, tensor b_hh) { PROTECT( auto outputs__ = torch::lstm_cell(*input, of_carray_tensor(hx_data, hx_len), *w_ih, *w_hh, (b_ih ? *b_ih : torch::Tensor()), (b_hh ? *b_hh : torch::Tensor())); out__[0] = new torch::Tensor(std::get<0>(outputs__)); @@ -4306,7 +4306,7 @@ int atg_lstm_cell(tensor *out__, tensor input, tensor *hx_data, int hx_len, tens return 1; } -int atg_lstsq(tensor *out__, tensor self, tensor A) { +C_API int atg_lstsq(tensor *out__, tensor self, tensor A) { PROTECT( auto outputs__ = torch::lstsq(*self, *A); out__[0] = new torch::Tensor(std::get<0>(outputs__)); @@ -4316,7 +4316,7 @@ int atg_lstsq(tensor *out__, tensor self, tensor A) { return 1; } -int atg_lstsq_out(tensor *out__, tensor X, tensor qr, tensor self, tensor A) { +C_API int atg_lstsq_out(tensor *out__, tensor X, tensor qr, tensor self, tensor A) { PROTECT( auto outputs__ = torch::lstsq_out(*X, *qr, *self, *A); out__[0] = new torch::Tensor(std::get<0>(outputs__)); @@ -4326,7 +4326,7 @@ int atg_lstsq_out(tensor *out__, tensor X, tensor qr, tensor self, tensor A) { return 1; } -int atg_lt(tensor *out__, tensor self, scalar other) { +C_API int atg_lt(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = torch::lt(*self, *other); out__[0] = new torch::Tensor(outputs__); @@ -4335,7 +4335,7 @@ int atg_lt(tensor *out__, tensor self, scalar other) { return 1; } -int atg_lt1(tensor *out__, tensor self, tensor other) { +C_API int atg_lt1(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = torch::lt(*self, *other); out__[0] = new torch::Tensor(outputs__); @@ -4344,7 +4344,7 @@ int atg_lt1(tensor *out__, tensor self, tensor other) { return 1; } -int atg_lt_(tensor *out__, tensor self, scalar other) { +C_API int atg_lt_(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = self->lt_(*other); out__[0] = new torch::Tensor(outputs__); @@ -4353,7 +4353,7 @@ int atg_lt_(tensor *out__, tensor self, scalar other) { return 1; } -int atg_lt_1(tensor *out__, tensor self, tensor other) { +C_API int atg_lt_1(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = self->lt_(*other); out__[0] = new torch::Tensor(outputs__); @@ -4362,7 +4362,7 @@ int atg_lt_1(tensor *out__, tensor self, tensor other) { return 1; } -int atg_lt_out(tensor *out__, tensor out, tensor self, scalar other) { +C_API int atg_lt_out(tensor *out__, tensor out, tensor self, scalar other) { PROTECT( auto outputs__ = torch::lt_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); @@ -4371,7 +4371,7 @@ int atg_lt_out(tensor *out__, tensor out, tensor self, scalar other) { return 1; } -int atg_lt_out1(tensor *out__, tensor out, tensor self, tensor other) { +C_API int atg_lt_out1(tensor *out__, tensor out, tensor self, tensor other) { PROTECT( auto outputs__ = torch::lt_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); @@ -4380,7 +4380,7 @@ int atg_lt_out1(tensor *out__, tensor out, tensor self, tensor other) { return 1; } -int atg_lu_solve(tensor *out__, tensor self, tensor LU_data, tensor LU_pivots) { +C_API int atg_lu_solve(tensor *out__, tensor self, tensor LU_data, tensor LU_pivots) { PROTECT( auto outputs__ = torch::lu_solve(*self, *LU_data, *LU_pivots); out__[0] = new torch::Tensor(outputs__); @@ -4389,7 +4389,7 @@ int atg_lu_solve(tensor *out__, tensor self, tensor LU_data, tensor LU_pivots) { return 1; } -int atg_lu_solve_out(tensor *out__, tensor out, tensor self, tensor LU_data, tensor LU_pivots) { +C_API int atg_lu_solve_out(tensor *out__, tensor out, tensor self, tensor LU_data, tensor LU_pivots) { PROTECT( auto outputs__ = torch::lu_solve_out(*out, *self, *LU_data, *LU_pivots); out__[0] = new torch::Tensor(outputs__); @@ -4398,7 +4398,7 @@ int atg_lu_solve_out(tensor *out__, tensor out, tensor self, tensor LU_data, ten return 1; } -int atg_margin_ranking_loss(tensor *out__, tensor input1, tensor input2, tensor target, double margin, int64_t reduction) { +C_API int atg_margin_ranking_loss(tensor *out__, tensor input1, tensor input2, tensor target, double margin, int64_t reduction) { PROTECT( auto outputs__ = torch::margin_ranking_loss(*input1, *input2, *target, margin, reduction); out__[0] = new torch::Tensor(outputs__); @@ -4407,7 +4407,7 @@ int atg_margin_ranking_loss(tensor *out__, tensor input1, tensor input2, tensor return 1; } -int atg_masked_fill(tensor *out__, tensor self, tensor mask, scalar value) { +C_API int atg_masked_fill(tensor *out__, tensor self, tensor mask, scalar value) { PROTECT( auto outputs__ = torch::masked_fill(*self, *mask, *value); out__[0] = new torch::Tensor(outputs__); @@ -4416,7 +4416,7 @@ int atg_masked_fill(tensor *out__, tensor self, tensor mask, scalar value) { return 1; } -int atg_masked_fill1(tensor *out__, tensor self, tensor mask, tensor value) { +C_API int atg_masked_fill1(tensor *out__, tensor self, tensor mask, tensor value) { PROTECT( auto outputs__ = torch::masked_fill(*self, *mask, *value); out__[0] = new torch::Tensor(outputs__); @@ -4425,7 +4425,7 @@ int atg_masked_fill1(tensor *out__, tensor self, tensor mask, tensor value) { return 1; } -int atg_masked_fill_(tensor *out__, tensor self, tensor mask, scalar value) { +C_API int atg_masked_fill_(tensor *out__, tensor self, tensor mask, scalar value) { PROTECT( auto outputs__ = self->masked_fill_(*mask, *value); out__[0] = new torch::Tensor(outputs__); @@ -4434,7 +4434,7 @@ int atg_masked_fill_(tensor *out__, tensor self, tensor mask, scalar value) { return 1; } -int atg_masked_fill_1(tensor *out__, tensor self, tensor mask, tensor value) { +C_API int atg_masked_fill_1(tensor *out__, tensor self, tensor mask, tensor value) { PROTECT( auto outputs__ = self->masked_fill_(*mask, *value); out__[0] = new torch::Tensor(outputs__); @@ -4443,7 +4443,7 @@ int atg_masked_fill_1(tensor *out__, tensor self, tensor mask, tensor value) { return 1; } -int atg_masked_scatter(tensor *out__, tensor self, tensor mask, tensor source) { +C_API int atg_masked_scatter(tensor *out__, tensor self, tensor mask, tensor source) { PROTECT( auto outputs__ = torch::masked_scatter(*self, *mask, *source); out__[0] = new torch::Tensor(outputs__); @@ -4452,7 +4452,7 @@ int atg_masked_scatter(tensor *out__, tensor self, tensor mask, tensor source) { return 1; } -int atg_masked_scatter_(tensor *out__, tensor self, tensor mask, tensor source) { +C_API int atg_masked_scatter_(tensor *out__, tensor self, tensor mask, tensor source) { PROTECT( auto outputs__ = self->masked_scatter_(*mask, *source); out__[0] = new torch::Tensor(outputs__); @@ -4461,7 +4461,7 @@ int atg_masked_scatter_(tensor *out__, tensor self, tensor mask, tensor source) return 1; } -int atg_masked_select(tensor *out__, tensor self, tensor mask) { +C_API int atg_masked_select(tensor *out__, tensor self, tensor mask) { PROTECT( auto outputs__ = torch::masked_select(*self, *mask); out__[0] = new torch::Tensor(outputs__); @@ -4470,7 +4470,7 @@ int atg_masked_select(tensor *out__, tensor self, tensor mask) { return 1; } -int atg_masked_select_out(tensor *out__, tensor out, tensor self, tensor mask) { +C_API int atg_masked_select_out(tensor *out__, tensor out, tensor self, tensor mask) { PROTECT( auto outputs__ = torch::masked_select_out(*out, *self, *mask); out__[0] = new torch::Tensor(outputs__); @@ -4479,7 +4479,7 @@ int atg_masked_select_out(tensor *out__, tensor out, tensor self, tensor mask) { return 1; } -int atg_matmul(tensor *out__, tensor self, tensor other) { +C_API int atg_matmul(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = torch::matmul(*self, *other); out__[0] = new torch::Tensor(outputs__); @@ -4488,7 +4488,7 @@ int atg_matmul(tensor *out__, tensor self, tensor other) { return 1; } -int atg_matmul_out(tensor *out__, tensor out, tensor self, tensor other) { +C_API int atg_matmul_out(tensor *out__, tensor out, tensor self, tensor other) { PROTECT( auto outputs__ = torch::matmul_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); @@ -4497,7 +4497,7 @@ int atg_matmul_out(tensor *out__, tensor out, tensor self, tensor other) { return 1; } -int atg_matrix_power(tensor *out__, tensor self, int64_t n) { +C_API int atg_matrix_power(tensor *out__, tensor self, int64_t n) { PROTECT( auto outputs__ = torch::matrix_power(*self, n); out__[0] = new torch::Tensor(outputs__); @@ -4506,7 +4506,7 @@ int atg_matrix_power(tensor *out__, tensor self, int64_t n) { return 1; } -int atg_matrix_rank(tensor *out__, tensor self, int symmetric) { +C_API int atg_matrix_rank(tensor *out__, tensor self, int symmetric) { PROTECT( auto outputs__ = torch::matrix_rank(*self, (bool)symmetric); out__[0] = new torch::Tensor(outputs__); @@ -4515,7 +4515,7 @@ int atg_matrix_rank(tensor *out__, tensor self, int symmetric) { return 1; } -int atg_matrix_rank1(tensor *out__, tensor self, double tol, int symmetric) { +C_API int atg_matrix_rank1(tensor *out__, tensor self, double tol, int symmetric) { PROTECT( auto outputs__ = torch::matrix_rank(*self, tol, (bool)symmetric); out__[0] = new torch::Tensor(outputs__); @@ -4524,7 +4524,7 @@ int atg_matrix_rank1(tensor *out__, tensor self, double tol, int symmetric) { return 1; } -int atg_max(tensor *out__, tensor self) { +C_API int atg_max(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::max(*self); out__[0] = new torch::Tensor(outputs__); @@ -4533,7 +4533,7 @@ int atg_max(tensor *out__, tensor self) { return 1; } -int atg_max1(tensor *out__, tensor self, tensor other) { +C_API int atg_max1(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = torch::max(*self, *other); out__[0] = new torch::Tensor(outputs__); @@ -4542,7 +4542,7 @@ int atg_max1(tensor *out__, tensor self, tensor other) { return 1; } -int atg_max2(tensor *out__, tensor self, int64_t dim, int keepdim) { +C_API int atg_max2(tensor *out__, tensor self, int64_t dim, int keepdim) { PROTECT( auto outputs__ = torch::max(*self, dim, (bool)keepdim); out__[0] = new torch::Tensor(std::get<0>(outputs__)); @@ -4552,7 +4552,7 @@ int atg_max2(tensor *out__, tensor self, int64_t dim, int keepdim) { return 1; } -int atg_max_out(tensor *out__, tensor out, tensor self, tensor other) { +C_API int atg_max_out(tensor *out__, tensor out, tensor self, tensor other) { PROTECT( auto outputs__ = torch::max_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); @@ -4561,7 +4561,7 @@ int atg_max_out(tensor *out__, tensor out, tensor self, tensor other) { return 1; } -int atg_max_out1(tensor *out__, tensor max, tensor max_values, tensor self, int64_t dim, int keepdim) { +C_API int atg_max_out1(tensor *out__, tensor max, tensor max_values, tensor self, int64_t dim, int keepdim) { PROTECT( auto outputs__ = torch::max_out(*max, *max_values, *self, dim, (bool)keepdim); out__[0] = new torch::Tensor(std::get<0>(outputs__)); @@ -4571,7 +4571,7 @@ int atg_max_out1(tensor *out__, tensor max, tensor max_values, tensor self, int6 return 1; } -int atg_max_pool1d(tensor *out__, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode) { +C_API int atg_max_pool1d(tensor *out__, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode) { PROTECT( auto outputs__ = torch::max_pool1d(*self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), (bool)ceil_mode); out__[0] = new torch::Tensor(outputs__); @@ -4580,7 +4580,7 @@ int atg_max_pool1d(tensor *out__, tensor self, int64_t *kernel_size_data, int ke return 1; } -int atg_max_pool1d_with_indices(tensor *out__, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode) { +C_API int atg_max_pool1d_with_indices(tensor *out__, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode) { PROTECT( auto outputs__ = torch::max_pool1d_with_indices(*self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), (bool)ceil_mode); out__[0] = new torch::Tensor(std::get<0>(outputs__)); @@ -4590,7 +4590,7 @@ int atg_max_pool1d_with_indices(tensor *out__, tensor self, int64_t *kernel_size return 1; } -int atg_max_pool2d(tensor *out__, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode) { +C_API int atg_max_pool2d(tensor *out__, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode) { PROTECT( auto outputs__ = torch::max_pool2d(*self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), (bool)ceil_mode); out__[0] = new torch::Tensor(outputs__); @@ -4599,7 +4599,7 @@ int atg_max_pool2d(tensor *out__, tensor self, int64_t *kernel_size_data, int ke return 1; } -int atg_max_pool2d_with_indices(tensor *out__, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode) { +C_API int atg_max_pool2d_with_indices(tensor *out__, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode) { PROTECT( auto outputs__ = torch::max_pool2d_with_indices(*self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), (bool)ceil_mode); out__[0] = new torch::Tensor(std::get<0>(outputs__)); @@ -4609,7 +4609,7 @@ int atg_max_pool2d_with_indices(tensor *out__, tensor self, int64_t *kernel_size return 1; } -int atg_max_pool2d_with_indices_backward(tensor *out__, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode, tensor indices) { +C_API int atg_max_pool2d_with_indices_backward(tensor *out__, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode, tensor indices) { PROTECT( auto outputs__ = torch::max_pool2d_with_indices_backward(*grad_output, *self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), (bool)ceil_mode, *indices); out__[0] = new torch::Tensor(outputs__); @@ -4618,7 +4618,7 @@ int atg_max_pool2d_with_indices_backward(tensor *out__, tensor grad_output, tens return 1; } -int atg_max_pool2d_with_indices_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode, tensor indices) { +C_API int atg_max_pool2d_with_indices_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode, tensor indices) { PROTECT( auto outputs__ = torch::max_pool2d_with_indices_backward_out(*grad_input, *grad_output, *self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), (bool)ceil_mode, *indices); out__[0] = new torch::Tensor(outputs__); @@ -4627,7 +4627,7 @@ int atg_max_pool2d_with_indices_backward_out(tensor *out__, tensor grad_input, t return 1; } -int atg_max_pool2d_with_indices_out(tensor *out__, tensor out, tensor indices, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode) { +C_API int atg_max_pool2d_with_indices_out(tensor *out__, tensor out, tensor indices, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode) { PROTECT( auto outputs__ = torch::max_pool2d_with_indices_out(*out, *indices, *self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), (bool)ceil_mode); out__[0] = new torch::Tensor(std::get<0>(outputs__)); @@ -4637,7 +4637,7 @@ int atg_max_pool2d_with_indices_out(tensor *out__, tensor out, tensor indices, t return 1; } -int atg_max_pool3d(tensor *out__, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode) { +C_API int atg_max_pool3d(tensor *out__, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode) { PROTECT( auto outputs__ = torch::max_pool3d(*self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), (bool)ceil_mode); out__[0] = new torch::Tensor(outputs__); @@ -4646,7 +4646,7 @@ int atg_max_pool3d(tensor *out__, tensor self, int64_t *kernel_size_data, int ke return 1; } -int atg_max_pool3d_with_indices(tensor *out__, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode) { +C_API int atg_max_pool3d_with_indices(tensor *out__, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode) { PROTECT( auto outputs__ = torch::max_pool3d_with_indices(*self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), (bool)ceil_mode); out__[0] = new torch::Tensor(std::get<0>(outputs__)); @@ -4656,7 +4656,7 @@ int atg_max_pool3d_with_indices(tensor *out__, tensor self, int64_t *kernel_size return 1; } -int atg_max_pool3d_with_indices_backward(tensor *out__, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode, tensor indices) { +C_API int atg_max_pool3d_with_indices_backward(tensor *out__, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode, tensor indices) { PROTECT( auto outputs__ = torch::max_pool3d_with_indices_backward(*grad_output, *self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), (bool)ceil_mode, *indices); out__[0] = new torch::Tensor(outputs__); @@ -4665,7 +4665,7 @@ int atg_max_pool3d_with_indices_backward(tensor *out__, tensor grad_output, tens return 1; } -int atg_max_pool3d_with_indices_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode, tensor indices) { +C_API int atg_max_pool3d_with_indices_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode, tensor indices) { PROTECT( auto outputs__ = torch::max_pool3d_with_indices_backward_out(*grad_input, *grad_output, *self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), (bool)ceil_mode, *indices); out__[0] = new torch::Tensor(outputs__); @@ -4674,7 +4674,7 @@ int atg_max_pool3d_with_indices_backward_out(tensor *out__, tensor grad_input, t return 1; } -int atg_max_pool3d_with_indices_out(tensor *out__, tensor out, tensor indices, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode) { +C_API int atg_max_pool3d_with_indices_out(tensor *out__, tensor out, tensor indices, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode) { PROTECT( auto outputs__ = torch::max_pool3d_with_indices_out(*out, *indices, *self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), (bool)ceil_mode); out__[0] = new torch::Tensor(std::get<0>(outputs__)); @@ -4684,7 +4684,7 @@ int atg_max_pool3d_with_indices_out(tensor *out__, tensor out, tensor indices, t return 1; } -int atg_max_unpool2d(tensor *out__, tensor self, tensor indices, int64_t *output_size_data, int output_size_len) { +C_API int atg_max_unpool2d(tensor *out__, tensor self, tensor indices, int64_t *output_size_data, int output_size_len) { PROTECT( auto outputs__ = torch::max_unpool2d(*self, *indices, torch::IntArrayRef(output_size_data, output_size_len)); out__[0] = new torch::Tensor(outputs__); @@ -4693,7 +4693,7 @@ int atg_max_unpool2d(tensor *out__, tensor self, tensor indices, int64_t *output return 1; } -int atg_max_unpool2d_backward(tensor *out__, tensor grad_output, tensor self, tensor indices, int64_t *output_size_data, int output_size_len) { +C_API int atg_max_unpool2d_backward(tensor *out__, tensor grad_output, tensor self, tensor indices, int64_t *output_size_data, int output_size_len) { PROTECT( auto outputs__ = torch::max_unpool2d_backward(*grad_output, *self, *indices, torch::IntArrayRef(output_size_data, output_size_len)); out__[0] = new torch::Tensor(outputs__); @@ -4702,7 +4702,7 @@ int atg_max_unpool2d_backward(tensor *out__, tensor grad_output, tensor self, te return 1; } -int atg_max_unpool2d_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self, tensor indices, int64_t *output_size_data, int output_size_len) { +C_API int atg_max_unpool2d_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self, tensor indices, int64_t *output_size_data, int output_size_len) { PROTECT( auto outputs__ = torch::max_unpool2d_backward_out(*grad_input, *grad_output, *self, *indices, torch::IntArrayRef(output_size_data, output_size_len)); out__[0] = new torch::Tensor(outputs__); @@ -4711,7 +4711,7 @@ int atg_max_unpool2d_backward_out(tensor *out__, tensor grad_input, tensor grad_ return 1; } -int atg_max_unpool2d_out(tensor *out__, tensor out, tensor self, tensor indices, int64_t *output_size_data, int output_size_len) { +C_API int atg_max_unpool2d_out(tensor *out__, tensor out, tensor self, tensor indices, int64_t *output_size_data, int output_size_len) { PROTECT( auto outputs__ = torch::max_unpool2d_out(*out, *self, *indices, torch::IntArrayRef(output_size_data, output_size_len)); out__[0] = new torch::Tensor(outputs__); @@ -4720,7 +4720,7 @@ int atg_max_unpool2d_out(tensor *out__, tensor out, tensor self, tensor indices, return 1; } -int atg_max_unpool3d(tensor *out__, tensor self, tensor indices, int64_t *output_size_data, int output_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len) { +C_API int atg_max_unpool3d(tensor *out__, tensor self, tensor indices, int64_t *output_size_data, int output_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len) { PROTECT( auto outputs__ = torch::max_unpool3d(*self, *indices, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len)); out__[0] = new torch::Tensor(outputs__); @@ -4729,7 +4729,7 @@ int atg_max_unpool3d(tensor *out__, tensor self, tensor indices, int64_t *output return 1; } -int atg_max_unpool3d_backward(tensor *out__, tensor grad_output, tensor self, tensor indices, int64_t *output_size_data, int output_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len) { +C_API int atg_max_unpool3d_backward(tensor *out__, tensor grad_output, tensor self, tensor indices, int64_t *output_size_data, int output_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len) { PROTECT( auto outputs__ = torch::max_unpool3d_backward(*grad_output, *self, *indices, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len)); out__[0] = new torch::Tensor(outputs__); @@ -4738,7 +4738,7 @@ int atg_max_unpool3d_backward(tensor *out__, tensor grad_output, tensor self, te return 1; } -int atg_max_unpool3d_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self, tensor indices, int64_t *output_size_data, int output_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len) { +C_API int atg_max_unpool3d_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self, tensor indices, int64_t *output_size_data, int output_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len) { PROTECT( auto outputs__ = torch::max_unpool3d_backward_out(*grad_input, *grad_output, *self, *indices, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len)); out__[0] = new torch::Tensor(outputs__); @@ -4747,7 +4747,7 @@ int atg_max_unpool3d_backward_out(tensor *out__, tensor grad_input, tensor grad_ return 1; } -int atg_max_unpool3d_out(tensor *out__, tensor out, tensor self, tensor indices, int64_t *output_size_data, int output_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len) { +C_API int atg_max_unpool3d_out(tensor *out__, tensor out, tensor self, tensor indices, int64_t *output_size_data, int output_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len) { PROTECT( auto outputs__ = torch::max_unpool3d_out(*out, *self, *indices, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len)); out__[0] = new torch::Tensor(outputs__); @@ -4756,7 +4756,7 @@ int atg_max_unpool3d_out(tensor *out__, tensor out, tensor self, tensor indices, return 1; } -int atg_max_values(tensor *out__, tensor self, int64_t *dim_data, int dim_len, int keepdim) { +C_API int atg_max_values(tensor *out__, tensor self, int64_t *dim_data, int dim_len, int keepdim) { PROTECT( auto outputs__ = torch::max_values(*self, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim); out__[0] = new torch::Tensor(outputs__); @@ -4765,7 +4765,7 @@ int atg_max_values(tensor *out__, tensor self, int64_t *dim_data, int dim_len, i return 1; } -int atg_mean(tensor *out__, tensor self, int dtype) { +C_API int atg_mean(tensor *out__, tensor self, int dtype) { PROTECT( auto outputs__ = torch::mean(*self, torch::ScalarType(dtype)); out__[0] = new torch::Tensor(outputs__); @@ -4774,7 +4774,7 @@ int atg_mean(tensor *out__, tensor self, int dtype) { return 1; } -int atg_mean1(tensor *out__, tensor self, int64_t *dim_data, int dim_len, int keepdim, int dtype) { +C_API int atg_mean1(tensor *out__, tensor self, int64_t *dim_data, int dim_len, int keepdim, int dtype) { PROTECT( auto outputs__ = torch::mean(*self, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim, torch::ScalarType(dtype)); out__[0] = new torch::Tensor(outputs__); @@ -4783,7 +4783,7 @@ int atg_mean1(tensor *out__, tensor self, int64_t *dim_data, int dim_len, int ke return 1; } -int atg_mean_out(tensor *out__, tensor out, tensor self, int64_t *dim_data, int dim_len, int keepdim, int dtype) { +C_API int atg_mean_out(tensor *out__, tensor out, tensor self, int64_t *dim_data, int dim_len, int keepdim, int dtype) { PROTECT( auto outputs__ = torch::mean_out(*out, *self, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim, torch::ScalarType(dtype)); out__[0] = new torch::Tensor(outputs__); @@ -4792,7 +4792,7 @@ int atg_mean_out(tensor *out__, tensor out, tensor self, int64_t *dim_data, int return 1; } -int atg_median(tensor *out__, tensor self) { +C_API int atg_median(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::median(*self); out__[0] = new torch::Tensor(outputs__); @@ -4801,7 +4801,7 @@ int atg_median(tensor *out__, tensor self) { return 1; } -int atg_median1(tensor *out__, tensor self, int64_t dim, int keepdim) { +C_API int atg_median1(tensor *out__, tensor self, int64_t dim, int keepdim) { PROTECT( auto outputs__ = torch::median(*self, dim, (bool)keepdim); out__[0] = new torch::Tensor(std::get<0>(outputs__)); @@ -4811,7 +4811,7 @@ int atg_median1(tensor *out__, tensor self, int64_t dim, int keepdim) { return 1; } -int atg_median_out(tensor *out__, tensor values, tensor indices, tensor self, int64_t dim, int keepdim) { +C_API int atg_median_out(tensor *out__, tensor values, tensor indices, tensor self, int64_t dim, int keepdim) { PROTECT( auto outputs__ = torch::median_out(*values, *indices, *self, dim, (bool)keepdim); out__[0] = new torch::Tensor(std::get<0>(outputs__)); @@ -4821,7 +4821,7 @@ int atg_median_out(tensor *out__, tensor values, tensor indices, tensor self, in return 1; } -int atg_meshgrid(tensor *out__, tensor *tensors_data, int tensors_len) { +C_API int atg_meshgrid(tensor *out__, tensor *tensors_data, int tensors_len) { PROTECT( auto outputs__ = torch::meshgrid(of_carray_tensor(tensors_data, tensors_len)); int sz = outputs__.size(); @@ -4835,7 +4835,7 @@ int atg_meshgrid(tensor *out__, tensor *tensors_data, int tensors_len) { return 1; } -int atg_min(tensor *out__, tensor self) { +C_API int atg_min(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::min(*self); out__[0] = new torch::Tensor(outputs__); @@ -4844,7 +4844,7 @@ int atg_min(tensor *out__, tensor self) { return 1; } -int atg_min1(tensor *out__, tensor self, tensor other) { +C_API int atg_min1(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = torch::min(*self, *other); out__[0] = new torch::Tensor(outputs__); @@ -4853,7 +4853,7 @@ int atg_min1(tensor *out__, tensor self, tensor other) { return 1; } -int atg_min2(tensor *out__, tensor self, int64_t dim, int keepdim) { +C_API int atg_min2(tensor *out__, tensor self, int64_t dim, int keepdim) { PROTECT( auto outputs__ = torch::min(*self, dim, (bool)keepdim); out__[0] = new torch::Tensor(std::get<0>(outputs__)); @@ -4863,7 +4863,7 @@ int atg_min2(tensor *out__, tensor self, int64_t dim, int keepdim) { return 1; } -int atg_min_out(tensor *out__, tensor out, tensor self, tensor other) { +C_API int atg_min_out(tensor *out__, tensor out, tensor self, tensor other) { PROTECT( auto outputs__ = torch::min_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); @@ -4872,7 +4872,7 @@ int atg_min_out(tensor *out__, tensor out, tensor self, tensor other) { return 1; } -int atg_min_out1(tensor *out__, tensor min, tensor min_indices, tensor self, int64_t dim, int keepdim) { +C_API int atg_min_out1(tensor *out__, tensor min, tensor min_indices, tensor self, int64_t dim, int keepdim) { PROTECT( auto outputs__ = torch::min_out(*min, *min_indices, *self, dim, (bool)keepdim); out__[0] = new torch::Tensor(std::get<0>(outputs__)); @@ -4882,7 +4882,7 @@ int atg_min_out1(tensor *out__, tensor min, tensor min_indices, tensor self, int return 1; } -int atg_min_values(tensor *out__, tensor self, int64_t *dim_data, int dim_len, int keepdim) { +C_API int atg_min_values(tensor *out__, tensor self, int64_t *dim_data, int dim_len, int keepdim) { PROTECT( auto outputs__ = torch::min_values(*self, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim); out__[0] = new torch::Tensor(outputs__); @@ -4891,7 +4891,7 @@ int atg_min_values(tensor *out__, tensor self, int64_t *dim_data, int dim_len, i return 1; } -int atg_miopen_batch_norm(tensor *out__, tensor input, tensor weight, tensor bias, tensor running_mean, tensor running_var, int training, double exponential_average_factor, double epsilon) { +C_API int atg_miopen_batch_norm(tensor *out__, tensor input, tensor weight, tensor bias, tensor running_mean, tensor running_var, int training, double exponential_average_factor, double epsilon) { PROTECT( auto outputs__ = torch::miopen_batch_norm(*input, *weight, (bias ? *bias : torch::Tensor()), (running_mean ? *running_mean : torch::Tensor()), (running_var ? *running_var : torch::Tensor()), (bool)training, exponential_average_factor, epsilon); out__[0] = new torch::Tensor(std::get<0>(outputs__)); @@ -4902,7 +4902,7 @@ int atg_miopen_batch_norm(tensor *out__, tensor input, tensor weight, tensor bia return 1; } -int atg_miopen_batch_norm_backward(tensor *out__, tensor input, tensor grad_output, tensor weight, tensor running_mean, tensor running_var, tensor save_mean, tensor save_var, double epsilon) { +C_API int atg_miopen_batch_norm_backward(tensor *out__, tensor input, tensor grad_output, tensor weight, tensor running_mean, tensor running_var, tensor save_mean, tensor save_var, double epsilon) { PROTECT( auto outputs__ = torch::miopen_batch_norm_backward(*input, *grad_output, *weight, (running_mean ? *running_mean : torch::Tensor()), (running_var ? *running_var : torch::Tensor()), (save_mean ? *save_mean : torch::Tensor()), (save_var ? *save_var : torch::Tensor()), epsilon); out__[0] = new torch::Tensor(std::get<0>(outputs__)); @@ -4913,7 +4913,7 @@ int atg_miopen_batch_norm_backward(tensor *out__, tensor input, tensor grad_outp return 1; } -int atg_miopen_convolution(tensor *out__, tensor self, tensor weight, tensor bias, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic) { +C_API int atg_miopen_convolution(tensor *out__, tensor self, tensor weight, tensor bias, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic) { PROTECT( auto outputs__ = torch::miopen_convolution(*self, *weight, (bias ? *bias : torch::Tensor()), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)benchmark, (bool)deterministic); out__[0] = new torch::Tensor(outputs__); @@ -4922,7 +4922,7 @@ int atg_miopen_convolution(tensor *out__, tensor self, tensor weight, tensor bia return 1; } -int atg_miopen_convolution_backward_bias(tensor *out__, tensor grad_output) { +C_API int atg_miopen_convolution_backward_bias(tensor *out__, tensor grad_output) { PROTECT( auto outputs__ = torch::miopen_convolution_backward_bias(*grad_output); out__[0] = new torch::Tensor(outputs__); @@ -4931,7 +4931,7 @@ int atg_miopen_convolution_backward_bias(tensor *out__, tensor grad_output) { return 1; } -int atg_miopen_convolution_backward_input(tensor *out__, int64_t *self_size_data, int self_size_len, tensor grad_output, tensor weight, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic) { +C_API int atg_miopen_convolution_backward_input(tensor *out__, int64_t *self_size_data, int self_size_len, tensor grad_output, tensor weight, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic) { PROTECT( auto outputs__ = torch::miopen_convolution_backward_input(torch::IntArrayRef(self_size_data, self_size_len), *grad_output, *weight, torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)benchmark, (bool)deterministic); out__[0] = new torch::Tensor(outputs__); @@ -4940,7 +4940,7 @@ int atg_miopen_convolution_backward_input(tensor *out__, int64_t *self_size_data return 1; } -int atg_miopen_convolution_backward_weight(tensor *out__, int64_t *weight_size_data, int weight_size_len, tensor grad_output, tensor self, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic) { +C_API int atg_miopen_convolution_backward_weight(tensor *out__, int64_t *weight_size_data, int weight_size_len, tensor grad_output, tensor self, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic) { PROTECT( auto outputs__ = torch::miopen_convolution_backward_weight(torch::IntArrayRef(weight_size_data, weight_size_len), *grad_output, *self, torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)benchmark, (bool)deterministic); out__[0] = new torch::Tensor(outputs__); @@ -4949,7 +4949,7 @@ int atg_miopen_convolution_backward_weight(tensor *out__, int64_t *weight_size_d return 1; } -int atg_miopen_convolution_transpose(tensor *out__, tensor self, tensor weight, tensor bias, int64_t *padding_data, int padding_len, int64_t *output_padding_data, int output_padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic) { +C_API int atg_miopen_convolution_transpose(tensor *out__, tensor self, tensor weight, tensor bias, int64_t *padding_data, int padding_len, int64_t *output_padding_data, int output_padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic) { PROTECT( auto outputs__ = torch::miopen_convolution_transpose(*self, *weight, (bias ? *bias : torch::Tensor()), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(output_padding_data, output_padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)benchmark, (bool)deterministic); out__[0] = new torch::Tensor(outputs__); @@ -4958,7 +4958,7 @@ int atg_miopen_convolution_transpose(tensor *out__, tensor self, tensor weight, return 1; } -int atg_miopen_convolution_transpose_backward_input(tensor *out__, tensor grad_output, tensor weight, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic) { +C_API int atg_miopen_convolution_transpose_backward_input(tensor *out__, tensor grad_output, tensor weight, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic) { PROTECT( auto outputs__ = torch::miopen_convolution_transpose_backward_input(*grad_output, *weight, torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)benchmark, (bool)deterministic); out__[0] = new torch::Tensor(outputs__); @@ -4967,7 +4967,7 @@ int atg_miopen_convolution_transpose_backward_input(tensor *out__, tensor grad_o return 1; } -int atg_miopen_convolution_transpose_backward_weight(tensor *out__, int64_t *weight_size_data, int weight_size_len, tensor grad_output, tensor self, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic) { +C_API int atg_miopen_convolution_transpose_backward_weight(tensor *out__, int64_t *weight_size_data, int weight_size_len, tensor grad_output, tensor self, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic) { PROTECT( auto outputs__ = torch::miopen_convolution_transpose_backward_weight(torch::IntArrayRef(weight_size_data, weight_size_len), *grad_output, *self, torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)benchmark, (bool)deterministic); out__[0] = new torch::Tensor(outputs__); @@ -4976,7 +4976,7 @@ int atg_miopen_convolution_transpose_backward_weight(tensor *out__, int64_t *wei return 1; } -int atg_miopen_depthwise_convolution(tensor *out__, tensor self, tensor weight, tensor bias, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic) { +C_API int atg_miopen_depthwise_convolution(tensor *out__, tensor self, tensor weight, tensor bias, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic) { PROTECT( auto outputs__ = torch::miopen_depthwise_convolution(*self, *weight, (bias ? *bias : torch::Tensor()), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)benchmark, (bool)deterministic); out__[0] = new torch::Tensor(outputs__); @@ -4985,7 +4985,7 @@ int atg_miopen_depthwise_convolution(tensor *out__, tensor self, tensor weight, return 1; } -int atg_miopen_depthwise_convolution_backward_input(tensor *out__, int64_t *self_size_data, int self_size_len, tensor grad_output, tensor weight, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic) { +C_API int atg_miopen_depthwise_convolution_backward_input(tensor *out__, int64_t *self_size_data, int self_size_len, tensor grad_output, tensor weight, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic) { PROTECT( auto outputs__ = torch::miopen_depthwise_convolution_backward_input(torch::IntArrayRef(self_size_data, self_size_len), *grad_output, *weight, torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)benchmark, (bool)deterministic); out__[0] = new torch::Tensor(outputs__); @@ -4994,7 +4994,7 @@ int atg_miopen_depthwise_convolution_backward_input(tensor *out__, int64_t *self return 1; } -int atg_miopen_depthwise_convolution_backward_weight(tensor *out__, int64_t *weight_size_data, int weight_size_len, tensor grad_output, tensor self, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic) { +C_API int atg_miopen_depthwise_convolution_backward_weight(tensor *out__, int64_t *weight_size_data, int weight_size_len, tensor grad_output, tensor self, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic) { PROTECT( auto outputs__ = torch::miopen_depthwise_convolution_backward_weight(torch::IntArrayRef(weight_size_data, weight_size_len), *grad_output, *self, torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)benchmark, (bool)deterministic); out__[0] = new torch::Tensor(outputs__); @@ -5003,7 +5003,7 @@ int atg_miopen_depthwise_convolution_backward_weight(tensor *out__, int64_t *wei return 1; } -int atg_miopen_rnn(tensor *out__, tensor input, tensor *weight_data, int weight_len, int64_t weight_stride0, tensor hx, tensor cx, int64_t mode, int64_t hidden_size, int64_t num_layers, int batch_first, double dropout, int train, int bidirectional, int64_t *batch_sizes_data, int batch_sizes_len, tensor dropout_state) { +C_API int atg_miopen_rnn(tensor *out__, tensor input, tensor *weight_data, int weight_len, int64_t weight_stride0, tensor hx, tensor cx, int64_t mode, int64_t hidden_size, int64_t num_layers, int batch_first, double dropout, int train, int bidirectional, int64_t *batch_sizes_data, int batch_sizes_len, tensor dropout_state) { PROTECT( auto outputs__ = torch::miopen_rnn(*input, of_carray_tensor(weight_data, weight_len), weight_stride0, *hx, (cx ? *cx : torch::Tensor()), mode, hidden_size, num_layers, (bool)batch_first, dropout, (bool)train, (bool)bidirectional, torch::IntArrayRef(batch_sizes_data, batch_sizes_len), (dropout_state ? *dropout_state : torch::Tensor())); out__[0] = new torch::Tensor(std::get<0>(outputs__)); @@ -5016,7 +5016,7 @@ int atg_miopen_rnn(tensor *out__, tensor input, tensor *weight_data, int weight_ return 1; } -int atg_mkldnn_adaptive_avg_pool2d(tensor *out__, tensor self, int64_t *output_size_data, int output_size_len) { +C_API int atg_mkldnn_adaptive_avg_pool2d(tensor *out__, tensor self, int64_t *output_size_data, int output_size_len) { PROTECT( auto outputs__ = torch::mkldnn_adaptive_avg_pool2d(*self, torch::IntArrayRef(output_size_data, output_size_len)); out__[0] = new torch::Tensor(outputs__); @@ -5025,7 +5025,7 @@ int atg_mkldnn_adaptive_avg_pool2d(tensor *out__, tensor self, int64_t *output_s return 1; } -int atg_mkldnn_convolution(tensor *out__, tensor self, tensor weight, tensor bias, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups) { +C_API int atg_mkldnn_convolution(tensor *out__, tensor self, tensor weight, tensor bias, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups) { PROTECT( auto outputs__ = torch::mkldnn_convolution(*self, *weight, (bias ? *bias : torch::Tensor()), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups); out__[0] = new torch::Tensor(outputs__); @@ -5034,7 +5034,7 @@ int atg_mkldnn_convolution(tensor *out__, tensor self, tensor weight, tensor bia return 1; } -int atg_mkldnn_convolution_backward_input(tensor *out__, int64_t *self_size_data, int self_size_len, tensor grad_output, tensor weight, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int bias_defined) { +C_API int atg_mkldnn_convolution_backward_input(tensor *out__, int64_t *self_size_data, int self_size_len, tensor grad_output, tensor weight, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int bias_defined) { PROTECT( auto outputs__ = torch::mkldnn_convolution_backward_input(torch::IntArrayRef(self_size_data, self_size_len), *grad_output, *weight, torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)bias_defined); out__[0] = new torch::Tensor(outputs__); @@ -5043,7 +5043,7 @@ int atg_mkldnn_convolution_backward_input(tensor *out__, int64_t *self_size_data return 1; } -int atg_mkldnn_convolution_backward_weights(tensor *out__, int64_t *weight_size_data, int weight_size_len, tensor grad_output, tensor self, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int bias_defined) { +C_API int atg_mkldnn_convolution_backward_weights(tensor *out__, int64_t *weight_size_data, int weight_size_len, tensor grad_output, tensor self, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int bias_defined) { PROTECT( auto outputs__ = torch::mkldnn_convolution_backward_weights(torch::IntArrayRef(weight_size_data, weight_size_len), *grad_output, *self, torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups, (bool)bias_defined); out__[0] = new torch::Tensor(std::get<0>(outputs__)); @@ -5053,7 +5053,7 @@ int atg_mkldnn_convolution_backward_weights(tensor *out__, int64_t *weight_size_ return 1; } -int atg_mkldnn_linear(tensor *out__, tensor input, tensor weight, tensor bias) { +C_API int atg_mkldnn_linear(tensor *out__, tensor input, tensor weight, tensor bias) { PROTECT( auto outputs__ = torch::mkldnn_linear(*input, *weight, (bias ? *bias : torch::Tensor())); out__[0] = new torch::Tensor(outputs__); @@ -5062,7 +5062,7 @@ int atg_mkldnn_linear(tensor *out__, tensor input, tensor weight, tensor bias) { return 1; } -int atg_mkldnn_max_pool2d(tensor *out__, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode) { +C_API int atg_mkldnn_max_pool2d(tensor *out__, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode) { PROTECT( auto outputs__ = torch::mkldnn_max_pool2d(*self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), (bool)ceil_mode); out__[0] = new torch::Tensor(outputs__); @@ -5071,7 +5071,7 @@ int atg_mkldnn_max_pool2d(tensor *out__, tensor self, int64_t *kernel_size_data, return 1; } -int atg_mkldnn_reorder_conv2d_weight(tensor *out__, tensor self, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups) { +C_API int atg_mkldnn_reorder_conv2d_weight(tensor *out__, tensor self, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups) { PROTECT( auto outputs__ = torch::mkldnn_reorder_conv2d_weight(*self, torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(dilation_data, dilation_len), groups); out__[0] = new torch::Tensor(outputs__); @@ -5080,7 +5080,7 @@ int atg_mkldnn_reorder_conv2d_weight(tensor *out__, tensor self, int64_t *paddin return 1; } -int atg_mm(tensor *out__, tensor self, tensor mat2) { +C_API int atg_mm(tensor *out__, tensor self, tensor mat2) { PROTECT( auto outputs__ = torch::mm(*self, *mat2); out__[0] = new torch::Tensor(outputs__); @@ -5089,7 +5089,7 @@ int atg_mm(tensor *out__, tensor self, tensor mat2) { return 1; } -int atg_mm_out(tensor *out__, tensor out, tensor self, tensor mat2) { +C_API int atg_mm_out(tensor *out__, tensor out, tensor self, tensor mat2) { PROTECT( auto outputs__ = torch::mm_out(*out, *self, *mat2); out__[0] = new torch::Tensor(outputs__); @@ -5098,7 +5098,7 @@ int atg_mm_out(tensor *out__, tensor out, tensor self, tensor mat2) { return 1; } -int atg_mode(tensor *out__, tensor self, int64_t dim, int keepdim) { +C_API int atg_mode(tensor *out__, tensor self, int64_t dim, int keepdim) { PROTECT( auto outputs__ = torch::mode(*self, dim, (bool)keepdim); out__[0] = new torch::Tensor(std::get<0>(outputs__)); @@ -5108,7 +5108,7 @@ int atg_mode(tensor *out__, tensor self, int64_t dim, int keepdim) { return 1; } -int atg_mode_out(tensor *out__, tensor values, tensor indices, tensor self, int64_t dim, int keepdim) { +C_API int atg_mode_out(tensor *out__, tensor values, tensor indices, tensor self, int64_t dim, int keepdim) { PROTECT( auto outputs__ = torch::mode_out(*values, *indices, *self, dim, (bool)keepdim); out__[0] = new torch::Tensor(std::get<0>(outputs__)); @@ -5118,7 +5118,7 @@ int atg_mode_out(tensor *out__, tensor values, tensor indices, tensor self, int6 return 1; } -int atg_mse_loss(tensor *out__, tensor self, tensor target, int64_t reduction) { +C_API int atg_mse_loss(tensor *out__, tensor self, tensor target, int64_t reduction) { PROTECT( auto outputs__ = torch::mse_loss(*self, *target, reduction); out__[0] = new torch::Tensor(outputs__); @@ -5127,7 +5127,7 @@ int atg_mse_loss(tensor *out__, tensor self, tensor target, int64_t reduction) { return 1; } -int atg_mse_loss_backward(tensor *out__, tensor grad_output, tensor self, tensor target, int64_t reduction) { +C_API int atg_mse_loss_backward(tensor *out__, tensor grad_output, tensor self, tensor target, int64_t reduction) { PROTECT( auto outputs__ = torch::mse_loss_backward(*grad_output, *self, *target, reduction); out__[0] = new torch::Tensor(outputs__); @@ -5136,7 +5136,7 @@ int atg_mse_loss_backward(tensor *out__, tensor grad_output, tensor self, tensor return 1; } -int atg_mse_loss_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self, tensor target, int64_t reduction) { +C_API int atg_mse_loss_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self, tensor target, int64_t reduction) { PROTECT( auto outputs__ = torch::mse_loss_backward_out(*grad_input, *grad_output, *self, *target, reduction); out__[0] = new torch::Tensor(outputs__); @@ -5145,7 +5145,7 @@ int atg_mse_loss_backward_out(tensor *out__, tensor grad_input, tensor grad_outp return 1; } -int atg_mse_loss_out(tensor *out__, tensor out, tensor self, tensor target, int64_t reduction) { +C_API int atg_mse_loss_out(tensor *out__, tensor out, tensor self, tensor target, int64_t reduction) { PROTECT( auto outputs__ = torch::mse_loss_out(*out, *self, *target, reduction); out__[0] = new torch::Tensor(outputs__); @@ -5154,7 +5154,7 @@ int atg_mse_loss_out(tensor *out__, tensor out, tensor self, tensor target, int6 return 1; } -int atg_mul(tensor *out__, tensor self, tensor other) { +C_API int atg_mul(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = torch::mul(*self, *other); out__[0] = new torch::Tensor(outputs__); @@ -5163,7 +5163,7 @@ int atg_mul(tensor *out__, tensor self, tensor other) { return 1; } -int atg_mul1(tensor *out__, tensor self, scalar other) { +C_API int atg_mul1(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = torch::mul(*self, *other); out__[0] = new torch::Tensor(outputs__); @@ -5172,7 +5172,7 @@ int atg_mul1(tensor *out__, tensor self, scalar other) { return 1; } -int atg_mul_(tensor *out__, tensor self, tensor other) { +C_API int atg_mul_(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = self->mul_(*other); out__[0] = new torch::Tensor(outputs__); @@ -5181,7 +5181,7 @@ int atg_mul_(tensor *out__, tensor self, tensor other) { return 1; } -int atg_mul_1(tensor *out__, tensor self, scalar other) { +C_API int atg_mul_1(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = self->mul_(*other); out__[0] = new torch::Tensor(outputs__); @@ -5190,7 +5190,7 @@ int atg_mul_1(tensor *out__, tensor self, scalar other) { return 1; } -int atg_mul_out(tensor *out__, tensor out, tensor self, tensor other) { +C_API int atg_mul_out(tensor *out__, tensor out, tensor self, tensor other) { PROTECT( auto outputs__ = torch::mul_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); @@ -5199,7 +5199,7 @@ int atg_mul_out(tensor *out__, tensor out, tensor self, tensor other) { return 1; } -int atg_multi_margin_loss_backward(tensor *out__, tensor grad_output, tensor self, tensor target, scalar p, scalar margin, tensor weight, int64_t reduction) { +C_API int atg_multi_margin_loss_backward(tensor *out__, tensor grad_output, tensor self, tensor target, scalar p, scalar margin, tensor weight, int64_t reduction) { PROTECT( auto outputs__ = torch::multi_margin_loss_backward(*grad_output, *self, *target, *p, *margin, (weight ? *weight : torch::Tensor()), reduction); out__[0] = new torch::Tensor(outputs__); @@ -5208,7 +5208,7 @@ int atg_multi_margin_loss_backward(tensor *out__, tensor grad_output, tensor sel return 1; } -int atg_multi_margin_loss_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self, tensor target, scalar p, scalar margin, tensor weight, int64_t reduction) { +C_API int atg_multi_margin_loss_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self, tensor target, scalar p, scalar margin, tensor weight, int64_t reduction) { PROTECT( auto outputs__ = torch::multi_margin_loss_backward_out(*grad_input, *grad_output, *self, *target, *p, *margin, (weight ? *weight : torch::Tensor()), reduction); out__[0] = new torch::Tensor(outputs__); @@ -5217,7 +5217,7 @@ int atg_multi_margin_loss_backward_out(tensor *out__, tensor grad_input, tensor return 1; } -int atg_multilabel_margin_loss(tensor *out__, tensor self, tensor target, int64_t reduction) { +C_API int atg_multilabel_margin_loss(tensor *out__, tensor self, tensor target, int64_t reduction) { PROTECT( auto outputs__ = torch::multilabel_margin_loss(*self, *target, reduction); out__[0] = new torch::Tensor(outputs__); @@ -5226,7 +5226,7 @@ int atg_multilabel_margin_loss(tensor *out__, tensor self, tensor target, int64_ return 1; } -int atg_multilabel_margin_loss_backward(tensor *out__, tensor grad_output, tensor self, tensor target, int64_t reduction, tensor is_target) { +C_API int atg_multilabel_margin_loss_backward(tensor *out__, tensor grad_output, tensor self, tensor target, int64_t reduction, tensor is_target) { PROTECT( auto outputs__ = torch::multilabel_margin_loss_backward(*grad_output, *self, *target, reduction, *is_target); out__[0] = new torch::Tensor(outputs__); @@ -5235,7 +5235,7 @@ int atg_multilabel_margin_loss_backward(tensor *out__, tensor grad_output, tenso return 1; } -int atg_multilabel_margin_loss_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self, tensor target, int64_t reduction, tensor is_target) { +C_API int atg_multilabel_margin_loss_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self, tensor target, int64_t reduction, tensor is_target) { PROTECT( auto outputs__ = torch::multilabel_margin_loss_backward_out(*grad_input, *grad_output, *self, *target, reduction, *is_target); out__[0] = new torch::Tensor(outputs__); @@ -5244,7 +5244,7 @@ int atg_multilabel_margin_loss_backward_out(tensor *out__, tensor grad_input, te return 1; } -int atg_multilabel_margin_loss_out(tensor *out__, tensor out, tensor self, tensor target, int64_t reduction) { +C_API int atg_multilabel_margin_loss_out(tensor *out__, tensor out, tensor self, tensor target, int64_t reduction) { PROTECT( auto outputs__ = torch::multilabel_margin_loss_out(*out, *self, *target, reduction); out__[0] = new torch::Tensor(outputs__); @@ -5253,7 +5253,7 @@ int atg_multilabel_margin_loss_out(tensor *out__, tensor out, tensor self, tenso return 1; } -int atg_multinomial(tensor *out__, tensor self, int64_t num_samples, int replacement) { +C_API int atg_multinomial(tensor *out__, tensor self, int64_t num_samples, int replacement) { PROTECT( auto outputs__ = torch::multinomial(*self, num_samples, (bool)replacement); out__[0] = new torch::Tensor(outputs__); @@ -5262,7 +5262,7 @@ int atg_multinomial(tensor *out__, tensor self, int64_t num_samples, int replace return 1; } -int atg_multinomial_out(tensor *out__, tensor out, tensor self, int64_t num_samples, int replacement) { +C_API int atg_multinomial_out(tensor *out__, tensor out, tensor self, int64_t num_samples, int replacement) { PROTECT( auto outputs__ = torch::multinomial_out(*out, *self, num_samples, (bool)replacement); out__[0] = new torch::Tensor(outputs__); @@ -5271,7 +5271,7 @@ int atg_multinomial_out(tensor *out__, tensor out, tensor self, int64_t num_samp return 1; } -int atg_mv(tensor *out__, tensor self, tensor vec) { +C_API int atg_mv(tensor *out__, tensor self, tensor vec) { PROTECT( auto outputs__ = torch::mv(*self, *vec); out__[0] = new torch::Tensor(outputs__); @@ -5280,7 +5280,7 @@ int atg_mv(tensor *out__, tensor self, tensor vec) { return 1; } -int atg_mv_out(tensor *out__, tensor out, tensor self, tensor vec) { +C_API int atg_mv_out(tensor *out__, tensor out, tensor self, tensor vec) { PROTECT( auto outputs__ = torch::mv_out(*out, *self, *vec); out__[0] = new torch::Tensor(outputs__); @@ -5289,7 +5289,7 @@ int atg_mv_out(tensor *out__, tensor out, tensor self, tensor vec) { return 1; } -int atg_mvlgamma(tensor *out__, tensor self, int64_t p) { +C_API int atg_mvlgamma(tensor *out__, tensor self, int64_t p) { PROTECT( auto outputs__ = torch::mvlgamma(*self, p); out__[0] = new torch::Tensor(outputs__); @@ -5298,7 +5298,7 @@ int atg_mvlgamma(tensor *out__, tensor self, int64_t p) { return 1; } -int atg_mvlgamma_(tensor *out__, tensor self, int64_t p) { +C_API int atg_mvlgamma_(tensor *out__, tensor self, int64_t p) { PROTECT( auto outputs__ = self->mvlgamma_(p); out__[0] = new torch::Tensor(outputs__); @@ -5307,7 +5307,7 @@ int atg_mvlgamma_(tensor *out__, tensor self, int64_t p) { return 1; } -int atg_narrow(tensor *out__, tensor self, int64_t dim, int64_t start, int64_t length) { +C_API int atg_narrow(tensor *out__, tensor self, int64_t dim, int64_t start, int64_t length) { PROTECT( auto outputs__ = torch::narrow(*self, dim, start, length); out__[0] = new torch::Tensor(outputs__); @@ -5316,7 +5316,7 @@ int atg_narrow(tensor *out__, tensor self, int64_t dim, int64_t start, int64_t l return 1; } -int atg_narrow_copy(tensor *out__, tensor self, int64_t dim, int64_t start, int64_t length) { +C_API int atg_narrow_copy(tensor *out__, tensor self, int64_t dim, int64_t start, int64_t length) { PROTECT( auto outputs__ = self->narrow_copy(dim, start, length); out__[0] = new torch::Tensor(outputs__); @@ -5325,7 +5325,7 @@ int atg_narrow_copy(tensor *out__, tensor self, int64_t dim, int64_t start, int6 return 1; } -int atg_native_batch_norm(tensor *out__, tensor input, tensor weight, tensor bias, tensor running_mean, tensor running_var, int training, double momentum, double eps) { +C_API int atg_native_batch_norm(tensor *out__, tensor input, tensor weight, tensor bias, tensor running_mean, tensor running_var, int training, double momentum, double eps) { PROTECT( auto outputs__ = torch::native_batch_norm(*input, (weight ? *weight : torch::Tensor()), (bias ? *bias : torch::Tensor()), (running_mean ? *running_mean : torch::Tensor()), (running_var ? *running_var : torch::Tensor()), (bool)training, momentum, eps); out__[0] = new torch::Tensor(std::get<0>(outputs__)); @@ -5336,7 +5336,7 @@ int atg_native_batch_norm(tensor *out__, tensor input, tensor weight, tensor bia return 1; } -int atg_native_layer_norm(tensor *out__, tensor input, tensor weight, tensor bias, int64_t M, int64_t n, double eps) { +C_API int atg_native_layer_norm(tensor *out__, tensor input, tensor weight, tensor bias, int64_t M, int64_t n, double eps) { PROTECT( auto outputs__ = torch::native_layer_norm(*input, (weight ? *weight : torch::Tensor()), (bias ? *bias : torch::Tensor()), M, n, eps); out__[0] = new torch::Tensor(std::get<0>(outputs__)); @@ -5347,7 +5347,7 @@ int atg_native_layer_norm(tensor *out__, tensor input, tensor weight, tensor bia return 1; } -int atg_native_norm(tensor *out__, tensor self) { +C_API int atg_native_norm(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::native_norm(*self); out__[0] = new torch::Tensor(outputs__); @@ -5356,7 +5356,7 @@ int atg_native_norm(tensor *out__, tensor self) { return 1; } -int atg_ne(tensor *out__, tensor self, scalar other) { +C_API int atg_ne(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = torch::ne(*self, *other); out__[0] = new torch::Tensor(outputs__); @@ -5365,7 +5365,7 @@ int atg_ne(tensor *out__, tensor self, scalar other) { return 1; } -int atg_ne1(tensor *out__, tensor self, tensor other) { +C_API int atg_ne1(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = torch::ne(*self, *other); out__[0] = new torch::Tensor(outputs__); @@ -5374,7 +5374,7 @@ int atg_ne1(tensor *out__, tensor self, tensor other) { return 1; } -int atg_ne_(tensor *out__, tensor self, scalar other) { +C_API int atg_ne_(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = self->ne_(*other); out__[0] = new torch::Tensor(outputs__); @@ -5383,7 +5383,7 @@ int atg_ne_(tensor *out__, tensor self, scalar other) { return 1; } -int atg_ne_1(tensor *out__, tensor self, tensor other) { +C_API int atg_ne_1(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = self->ne_(*other); out__[0] = new torch::Tensor(outputs__); @@ -5392,7 +5392,7 @@ int atg_ne_1(tensor *out__, tensor self, tensor other) { return 1; } -int atg_ne_out(tensor *out__, tensor out, tensor self, scalar other) { +C_API int atg_ne_out(tensor *out__, tensor out, tensor self, scalar other) { PROTECT( auto outputs__ = torch::ne_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); @@ -5401,7 +5401,7 @@ int atg_ne_out(tensor *out__, tensor out, tensor self, scalar other) { return 1; } -int atg_ne_out1(tensor *out__, tensor out, tensor self, tensor other) { +C_API int atg_ne_out1(tensor *out__, tensor out, tensor self, tensor other) { PROTECT( auto outputs__ = torch::ne_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); @@ -5410,7 +5410,7 @@ int atg_ne_out1(tensor *out__, tensor out, tensor self, tensor other) { return 1; } -int atg_neg(tensor *out__, tensor self) { +C_API int atg_neg(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::neg(*self); out__[0] = new torch::Tensor(outputs__); @@ -5419,7 +5419,7 @@ int atg_neg(tensor *out__, tensor self) { return 1; } -int atg_neg_(tensor *out__, tensor self) { +C_API int atg_neg_(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::neg_(*self); out__[0] = new torch::Tensor(outputs__); @@ -5428,7 +5428,7 @@ int atg_neg_(tensor *out__, tensor self) { return 1; } -int atg_neg_out(tensor *out__, tensor out, tensor self) { +C_API int atg_neg_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::neg_out(*out, *self); out__[0] = new torch::Tensor(outputs__); @@ -5437,7 +5437,7 @@ int atg_neg_out(tensor *out__, tensor out, tensor self) { return 1; } -int atg_new_empty(tensor *out__, tensor self, int64_t *size_data, int size_len, int options_kind, int options_device) { +C_API int atg_new_empty(tensor *out__, tensor self, int64_t *size_data, int size_len, int options_kind, int options_device) { PROTECT( auto outputs__ = self->new_empty(torch::IntArrayRef(size_data, size_len), at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); @@ -5446,7 +5446,7 @@ int atg_new_empty(tensor *out__, tensor self, int64_t *size_data, int size_len, return 1; } -int atg_new_full(tensor *out__, tensor self, int64_t *size_data, int size_len, scalar fill_value, int options_kind, int options_device) { +C_API int atg_new_full(tensor *out__, tensor self, int64_t *size_data, int size_len, scalar fill_value, int options_kind, int options_device) { PROTECT( auto outputs__ = self->new_full(torch::IntArrayRef(size_data, size_len), *fill_value, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); @@ -5455,7 +5455,7 @@ int atg_new_full(tensor *out__, tensor self, int64_t *size_data, int size_len, s return 1; } -int atg_new_zeros(tensor *out__, tensor self, int64_t *size_data, int size_len, int options_kind, int options_device) { +C_API int atg_new_zeros(tensor *out__, tensor self, int64_t *size_data, int size_len, int options_kind, int options_device) { PROTECT( auto outputs__ = self->new_zeros(torch::IntArrayRef(size_data, size_len), at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); @@ -5464,7 +5464,7 @@ int atg_new_zeros(tensor *out__, tensor self, int64_t *size_data, int size_len, return 1; } -int atg_nll_loss(tensor *out__, tensor self, tensor target, tensor weight, int64_t reduction, int64_t ignore_index) { +C_API int atg_nll_loss(tensor *out__, tensor self, tensor target, tensor weight, int64_t reduction, int64_t ignore_index) { PROTECT( auto outputs__ = torch::nll_loss(*self, *target, (weight ? *weight : torch::Tensor()), reduction, ignore_index); out__[0] = new torch::Tensor(outputs__); @@ -5473,7 +5473,7 @@ int atg_nll_loss(tensor *out__, tensor self, tensor target, tensor weight, int64 return 1; } -int atg_nll_loss2d(tensor *out__, tensor self, tensor target, tensor weight, int64_t reduction, int64_t ignore_index) { +C_API int atg_nll_loss2d(tensor *out__, tensor self, tensor target, tensor weight, int64_t reduction, int64_t ignore_index) { PROTECT( auto outputs__ = torch::nll_loss2d(*self, *target, (weight ? *weight : torch::Tensor()), reduction, ignore_index); out__[0] = new torch::Tensor(outputs__); @@ -5482,7 +5482,7 @@ int atg_nll_loss2d(tensor *out__, tensor self, tensor target, tensor weight, int return 1; } -int atg_nll_loss2d_backward(tensor *out__, tensor grad_output, tensor self, tensor target, tensor weight, int64_t reduction, int64_t ignore_index, tensor total_weight) { +C_API int atg_nll_loss2d_backward(tensor *out__, tensor grad_output, tensor self, tensor target, tensor weight, int64_t reduction, int64_t ignore_index, tensor total_weight) { PROTECT( auto outputs__ = torch::nll_loss2d_backward(*grad_output, *self, *target, (weight ? *weight : torch::Tensor()), reduction, ignore_index, *total_weight); out__[0] = new torch::Tensor(outputs__); @@ -5491,7 +5491,7 @@ int atg_nll_loss2d_backward(tensor *out__, tensor grad_output, tensor self, tens return 1; } -int atg_nll_loss2d_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self, tensor target, tensor weight, int64_t reduction, int64_t ignore_index, tensor total_weight) { +C_API int atg_nll_loss2d_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self, tensor target, tensor weight, int64_t reduction, int64_t ignore_index, tensor total_weight) { PROTECT( auto outputs__ = torch::nll_loss2d_backward_out(*grad_input, *grad_output, *self, *target, (weight ? *weight : torch::Tensor()), reduction, ignore_index, *total_weight); out__[0] = new torch::Tensor(outputs__); @@ -5500,7 +5500,7 @@ int atg_nll_loss2d_backward_out(tensor *out__, tensor grad_input, tensor grad_ou return 1; } -int atg_nll_loss2d_out(tensor *out__, tensor out, tensor self, tensor target, tensor weight, int64_t reduction, int64_t ignore_index) { +C_API int atg_nll_loss2d_out(tensor *out__, tensor out, tensor self, tensor target, tensor weight, int64_t reduction, int64_t ignore_index) { PROTECT( auto outputs__ = torch::nll_loss2d_out(*out, *self, *target, (weight ? *weight : torch::Tensor()), reduction, ignore_index); out__[0] = new torch::Tensor(outputs__); @@ -5509,7 +5509,7 @@ int atg_nll_loss2d_out(tensor *out__, tensor out, tensor self, tensor target, te return 1; } -int atg_nll_loss_backward(tensor *out__, tensor grad_output, tensor self, tensor target, tensor weight, int64_t reduction, int64_t ignore_index, tensor total_weight) { +C_API int atg_nll_loss_backward(tensor *out__, tensor grad_output, tensor self, tensor target, tensor weight, int64_t reduction, int64_t ignore_index, tensor total_weight) { PROTECT( auto outputs__ = torch::nll_loss_backward(*grad_output, *self, *target, (weight ? *weight : torch::Tensor()), reduction, ignore_index, *total_weight); out__[0] = new torch::Tensor(outputs__); @@ -5518,7 +5518,7 @@ int atg_nll_loss_backward(tensor *out__, tensor grad_output, tensor self, tensor return 1; } -int atg_nll_loss_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self, tensor target, tensor weight, int64_t reduction, int64_t ignore_index, tensor total_weight) { +C_API int atg_nll_loss_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self, tensor target, tensor weight, int64_t reduction, int64_t ignore_index, tensor total_weight) { PROTECT( auto outputs__ = torch::nll_loss_backward_out(*grad_input, *grad_output, *self, *target, (weight ? *weight : torch::Tensor()), reduction, ignore_index, *total_weight); out__[0] = new torch::Tensor(outputs__); @@ -5527,7 +5527,7 @@ int atg_nll_loss_backward_out(tensor *out__, tensor grad_input, tensor grad_outp return 1; } -int atg_nll_loss_out(tensor *out__, tensor out, tensor self, tensor target, tensor weight, int64_t reduction, int64_t ignore_index) { +C_API int atg_nll_loss_out(tensor *out__, tensor out, tensor self, tensor target, tensor weight, int64_t reduction, int64_t ignore_index) { PROTECT( auto outputs__ = torch::nll_loss_out(*out, *self, *target, (weight ? *weight : torch::Tensor()), reduction, ignore_index); out__[0] = new torch::Tensor(outputs__); @@ -5536,7 +5536,7 @@ int atg_nll_loss_out(tensor *out__, tensor out, tensor self, tensor target, tens return 1; } -int atg_nonzero(tensor *out__, tensor self) { +C_API int atg_nonzero(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::nonzero(*self); out__[0] = new torch::Tensor(outputs__); @@ -5545,7 +5545,7 @@ int atg_nonzero(tensor *out__, tensor self) { return 1; } -int atg_nonzero_numpy(tensor *out__, tensor self) { +C_API int atg_nonzero_numpy(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::nonzero_numpy(*self); int sz = outputs__.size(); @@ -5559,7 +5559,7 @@ int atg_nonzero_numpy(tensor *out__, tensor self) { return 1; } -int atg_nonzero_out(tensor *out__, tensor out, tensor self) { +C_API int atg_nonzero_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::nonzero_out(*out, *self); out__[0] = new torch::Tensor(outputs__); @@ -5568,7 +5568,7 @@ int atg_nonzero_out(tensor *out__, tensor out, tensor self) { return 1; } -int atg_norm(tensor *out__, tensor self) { +C_API int atg_norm(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::norm(*self); out__[0] = new torch::Tensor(outputs__); @@ -5577,7 +5577,7 @@ int atg_norm(tensor *out__, tensor self) { return 1; } -int atg_norm1(tensor *out__, tensor self, scalar p, int dtype) { +C_API int atg_norm1(tensor *out__, tensor self, scalar p, int dtype) { PROTECT( auto outputs__ = torch::norm(*self, *p, torch::ScalarType(dtype)); out__[0] = new torch::Tensor(outputs__); @@ -5586,7 +5586,7 @@ int atg_norm1(tensor *out__, tensor self, scalar p, int dtype) { return 1; } -int atg_norm2(tensor *out__, tensor self, scalar p, int64_t *dim_data, int dim_len, int keepdim) { +C_API int atg_norm2(tensor *out__, tensor self, scalar p, int64_t *dim_data, int dim_len, int keepdim) { PROTECT( auto outputs__ = torch::norm(*self, *p, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim); out__[0] = new torch::Tensor(outputs__); @@ -5595,7 +5595,7 @@ int atg_norm2(tensor *out__, tensor self, scalar p, int64_t *dim_data, int dim_l return 1; } -int atg_norm3(tensor *out__, tensor self, scalar p, int64_t *dim_data, int dim_len, int keepdim, int dtype) { +C_API int atg_norm3(tensor *out__, tensor self, scalar p, int64_t *dim_data, int dim_len, int keepdim, int dtype) { PROTECT( auto outputs__ = torch::norm(*self, *p, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim, torch::ScalarType(dtype)); out__[0] = new torch::Tensor(outputs__); @@ -5604,7 +5604,7 @@ int atg_norm3(tensor *out__, tensor self, scalar p, int64_t *dim_data, int dim_l return 1; } -int atg_norm_except_dim(tensor *out__, tensor v, int64_t pow, int64_t dim) { +C_API int atg_norm_except_dim(tensor *out__, tensor v, int64_t pow, int64_t dim) { PROTECT( auto outputs__ = torch::norm_except_dim(*v, pow, dim); out__[0] = new torch::Tensor(outputs__); @@ -5613,7 +5613,7 @@ int atg_norm_except_dim(tensor *out__, tensor v, int64_t pow, int64_t dim) { return 1; } -int atg_norm_out(tensor *out__, tensor out, tensor self, scalar p, int64_t *dim_data, int dim_len, int keepdim) { +C_API int atg_norm_out(tensor *out__, tensor out, tensor self, scalar p, int64_t *dim_data, int dim_len, int keepdim) { PROTECT( auto outputs__ = torch::norm_out(*out, *self, *p, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim); out__[0] = new torch::Tensor(outputs__); @@ -5622,7 +5622,7 @@ int atg_norm_out(tensor *out__, tensor out, tensor self, scalar p, int64_t *dim_ return 1; } -int atg_norm_out1(tensor *out__, tensor out, tensor self, scalar p, int64_t *dim_data, int dim_len, int keepdim, int dtype) { +C_API int atg_norm_out1(tensor *out__, tensor out, tensor self, scalar p, int64_t *dim_data, int dim_len, int keepdim, int dtype) { PROTECT( auto outputs__ = torch::norm_out(*out, *self, *p, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim, torch::ScalarType(dtype)); out__[0] = new torch::Tensor(outputs__); @@ -5631,7 +5631,7 @@ int atg_norm_out1(tensor *out__, tensor out, tensor self, scalar p, int64_t *dim return 1; } -int atg_normal_(tensor *out__, tensor self, double mean, double std) { +C_API int atg_normal_(tensor *out__, tensor self, double mean, double std) { PROTECT( auto outputs__ = self->normal_(mean, std); out__[0] = new torch::Tensor(outputs__); @@ -5640,7 +5640,7 @@ int atg_normal_(tensor *out__, tensor self, double mean, double std) { return 1; } -int atg_normal_out(tensor *out__, tensor out, tensor mean, double std) { +C_API int atg_normal_out(tensor *out__, tensor out, tensor mean, double std) { PROTECT( auto outputs__ = torch::normal_out(*out, *mean, std); out__[0] = new torch::Tensor(outputs__); @@ -5649,7 +5649,7 @@ int atg_normal_out(tensor *out__, tensor out, tensor mean, double std) { return 1; } -int atg_normal_out1(tensor *out__, tensor out, double mean, tensor std) { +C_API int atg_normal_out1(tensor *out__, tensor out, double mean, tensor std) { PROTECT( auto outputs__ = torch::normal_out(*out, mean, *std); out__[0] = new torch::Tensor(outputs__); @@ -5658,7 +5658,7 @@ int atg_normal_out1(tensor *out__, tensor out, double mean, tensor std) { return 1; } -int atg_normal_out2(tensor *out__, tensor out, tensor mean, tensor std) { +C_API int atg_normal_out2(tensor *out__, tensor out, tensor mean, tensor std) { PROTECT( auto outputs__ = torch::normal_out(*out, *mean, *std); out__[0] = new torch::Tensor(outputs__); @@ -5667,7 +5667,7 @@ int atg_normal_out2(tensor *out__, tensor out, tensor mean, tensor std) { return 1; } -int atg_normal_out3(tensor *out__, tensor out, double mean, double std, int64_t *size_data, int size_len) { +C_API int atg_normal_out3(tensor *out__, tensor out, double mean, double std, int64_t *size_data, int size_len) { PROTECT( auto outputs__ = torch::normal_out(*out, mean, std, torch::IntArrayRef(size_data, size_len)); out__[0] = new torch::Tensor(outputs__); @@ -5676,7 +5676,7 @@ int atg_normal_out3(tensor *out__, tensor out, double mean, double std, int64_t return 1; } -int atg_nuclear_norm(tensor *out__, tensor self, int keepdim) { +C_API int atg_nuclear_norm(tensor *out__, tensor self, int keepdim) { PROTECT( auto outputs__ = torch::nuclear_norm(*self, (bool)keepdim); out__[0] = new torch::Tensor(outputs__); @@ -5685,7 +5685,7 @@ int atg_nuclear_norm(tensor *out__, tensor self, int keepdim) { return 1; } -int atg_nuclear_norm1(tensor *out__, tensor self, int64_t *dim_data, int dim_len, int keepdim) { +C_API int atg_nuclear_norm1(tensor *out__, tensor self, int64_t *dim_data, int dim_len, int keepdim) { PROTECT( auto outputs__ = torch::nuclear_norm(*self, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim); out__[0] = new torch::Tensor(outputs__); @@ -5694,7 +5694,7 @@ int atg_nuclear_norm1(tensor *out__, tensor self, int64_t *dim_data, int dim_len return 1; } -int atg_nuclear_norm_out(tensor *out__, tensor out, tensor self, int keepdim) { +C_API int atg_nuclear_norm_out(tensor *out__, tensor out, tensor self, int keepdim) { PROTECT( auto outputs__ = torch::nuclear_norm_out(*out, *self, (bool)keepdim); out__[0] = new torch::Tensor(outputs__); @@ -5703,7 +5703,7 @@ int atg_nuclear_norm_out(tensor *out__, tensor out, tensor self, int keepdim) { return 1; } -int atg_nuclear_norm_out1(tensor *out__, tensor out, tensor self, int64_t *dim_data, int dim_len, int keepdim) { +C_API int atg_nuclear_norm_out1(tensor *out__, tensor out, tensor self, int64_t *dim_data, int dim_len, int keepdim) { PROTECT( auto outputs__ = torch::nuclear_norm_out(*out, *self, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim); out__[0] = new torch::Tensor(outputs__); @@ -5712,7 +5712,7 @@ int atg_nuclear_norm_out1(tensor *out__, tensor out, tensor self, int64_t *dim_d return 1; } -int atg_numpy_t(tensor *out__, tensor self) { +C_API int atg_numpy_t(tensor *out__, tensor self) { PROTECT( auto outputs__ = self->numpy_T(); out__[0] = new torch::Tensor(outputs__); @@ -5721,7 +5721,7 @@ int atg_numpy_t(tensor *out__, tensor self) { return 1; } -int atg_one_hot(tensor *out__, tensor self, int64_t num_classes) { +C_API int atg_one_hot(tensor *out__, tensor self, int64_t num_classes) { PROTECT( auto outputs__ = torch::one_hot(*self, num_classes); out__[0] = new torch::Tensor(outputs__); @@ -5730,7 +5730,7 @@ int atg_one_hot(tensor *out__, tensor self, int64_t num_classes) { return 1; } -int atg_ones(tensor *out__, int64_t *size_data, int size_len, int options_kind, int options_device) { +C_API int atg_ones(tensor *out__, int64_t *size_data, int size_len, int options_kind, int options_device) { PROTECT( auto outputs__ = torch::ones(torch::IntArrayRef(size_data, size_len), at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); @@ -5739,7 +5739,7 @@ int atg_ones(tensor *out__, int64_t *size_data, int size_len, int options_kind, return 1; } -int atg_ones_like(tensor *out__, tensor self) { +C_API int atg_ones_like(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::ones_like(*self); out__[0] = new torch::Tensor(outputs__); @@ -5748,7 +5748,7 @@ int atg_ones_like(tensor *out__, tensor self) { return 1; } -int atg_ones_like1(tensor *out__, tensor self, int options_kind, int options_device) { +C_API int atg_ones_like1(tensor *out__, tensor self, int options_kind, int options_device) { PROTECT( auto outputs__ = torch::ones_like(*self, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); @@ -5757,7 +5757,7 @@ int atg_ones_like1(tensor *out__, tensor self, int options_kind, int options_dev return 1; } -int atg_ones_out(tensor *out__, tensor out, int64_t *size_data, int size_len) { +C_API int atg_ones_out(tensor *out__, tensor out, int64_t *size_data, int size_len) { PROTECT( auto outputs__ = torch::ones_out(*out, torch::IntArrayRef(size_data, size_len)); out__[0] = new torch::Tensor(outputs__); @@ -5766,7 +5766,7 @@ int atg_ones_out(tensor *out__, tensor out, int64_t *size_data, int size_len) { return 1; } -int atg_orgqr(tensor *out__, tensor self, tensor input2) { +C_API int atg_orgqr(tensor *out__, tensor self, tensor input2) { PROTECT( auto outputs__ = torch::orgqr(*self, *input2); out__[0] = new torch::Tensor(outputs__); @@ -5775,7 +5775,7 @@ int atg_orgqr(tensor *out__, tensor self, tensor input2) { return 1; } -int atg_orgqr_out(tensor *out__, tensor out, tensor self, tensor input2) { +C_API int atg_orgqr_out(tensor *out__, tensor out, tensor self, tensor input2) { PROTECT( auto outputs__ = torch::orgqr_out(*out, *self, *input2); out__[0] = new torch::Tensor(outputs__); @@ -5784,7 +5784,7 @@ int atg_orgqr_out(tensor *out__, tensor out, tensor self, tensor input2) { return 1; } -int atg_ormqr(tensor *out__, tensor self, tensor input2, tensor input3, int left, int transpose) { +C_API int atg_ormqr(tensor *out__, tensor self, tensor input2, tensor input3, int left, int transpose) { PROTECT( auto outputs__ = torch::ormqr(*self, *input2, *input3, (bool)left, (bool)transpose); out__[0] = new torch::Tensor(outputs__); @@ -5793,7 +5793,7 @@ int atg_ormqr(tensor *out__, tensor self, tensor input2, tensor input3, int left return 1; } -int atg_ormqr_out(tensor *out__, tensor out, tensor self, tensor input2, tensor input3, int left, int transpose) { +C_API int atg_ormqr_out(tensor *out__, tensor out, tensor self, tensor input2, tensor input3, int left, int transpose) { PROTECT( auto outputs__ = torch::ormqr_out(*out, *self, *input2, *input3, (bool)left, (bool)transpose); out__[0] = new torch::Tensor(outputs__); @@ -5802,7 +5802,7 @@ int atg_ormqr_out(tensor *out__, tensor out, tensor self, tensor input2, tensor return 1; } -int atg_pairwise_distance(tensor *out__, tensor x1, tensor x2, double p, double eps, int keepdim) { +C_API int atg_pairwise_distance(tensor *out__, tensor x1, tensor x2, double p, double eps, int keepdim) { PROTECT( auto outputs__ = torch::pairwise_distance(*x1, *x2, p, eps, (bool)keepdim); out__[0] = new torch::Tensor(outputs__); @@ -5811,7 +5811,7 @@ int atg_pairwise_distance(tensor *out__, tensor x1, tensor x2, double p, double return 1; } -int atg_pdist(tensor *out__, tensor self, double p) { +C_API int atg_pdist(tensor *out__, tensor self, double p) { PROTECT( auto outputs__ = torch::pdist(*self, p); out__[0] = new torch::Tensor(outputs__); @@ -5820,7 +5820,7 @@ int atg_pdist(tensor *out__, tensor self, double p) { return 1; } -int atg_permute(tensor *out__, tensor self, int64_t *dims_data, int dims_len) { +C_API int atg_permute(tensor *out__, tensor self, int64_t *dims_data, int dims_len) { PROTECT( auto outputs__ = self->permute(torch::IntArrayRef(dims_data, dims_len)); out__[0] = new torch::Tensor(outputs__); @@ -5829,7 +5829,7 @@ int atg_permute(tensor *out__, tensor self, int64_t *dims_data, int dims_len) { return 1; } -int atg_pin_memory(tensor *out__, tensor self) { +C_API int atg_pin_memory(tensor *out__, tensor self) { PROTECT( auto outputs__ = self->pin_memory(); out__[0] = new torch::Tensor(outputs__); @@ -5838,7 +5838,7 @@ int atg_pin_memory(tensor *out__, tensor self) { return 1; } -int atg_pinverse(tensor *out__, tensor self, double rcond) { +C_API int atg_pinverse(tensor *out__, tensor self, double rcond) { PROTECT( auto outputs__ = torch::pinverse(*self, rcond); out__[0] = new torch::Tensor(outputs__); @@ -5847,7 +5847,7 @@ int atg_pinverse(tensor *out__, tensor self, double rcond) { return 1; } -int atg_pixel_shuffle(tensor *out__, tensor self, int64_t upscale_factor) { +C_API int atg_pixel_shuffle(tensor *out__, tensor self, int64_t upscale_factor) { PROTECT( auto outputs__ = torch::pixel_shuffle(*self, upscale_factor); out__[0] = new torch::Tensor(outputs__); @@ -5856,7 +5856,7 @@ int atg_pixel_shuffle(tensor *out__, tensor self, int64_t upscale_factor) { return 1; } -int atg_poisson(tensor *out__, tensor self) { +C_API int atg_poisson(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::poisson(*self); out__[0] = new torch::Tensor(outputs__); @@ -5865,7 +5865,7 @@ int atg_poisson(tensor *out__, tensor self) { return 1; } -int atg_poisson_nll_loss(tensor *out__, tensor input, tensor target, int log_input, int full, double eps, int64_t reduction) { +C_API int atg_poisson_nll_loss(tensor *out__, tensor input, tensor target, int log_input, int full, double eps, int64_t reduction) { PROTECT( auto outputs__ = torch::poisson_nll_loss(*input, *target, (bool)log_input, (bool)full, eps, reduction); out__[0] = new torch::Tensor(outputs__); @@ -5874,7 +5874,7 @@ int atg_poisson_nll_loss(tensor *out__, tensor input, tensor target, int log_inp return 1; } -int atg_polygamma(tensor *out__, int64_t n, tensor self) { +C_API int atg_polygamma(tensor *out__, int64_t n, tensor self) { PROTECT( auto outputs__ = torch::polygamma(n, *self); out__[0] = new torch::Tensor(outputs__); @@ -5883,7 +5883,7 @@ int atg_polygamma(tensor *out__, int64_t n, tensor self) { return 1; } -int atg_polygamma_(tensor *out__, tensor self, int64_t n) { +C_API int atg_polygamma_(tensor *out__, tensor self, int64_t n) { PROTECT( auto outputs__ = self->polygamma_(n); out__[0] = new torch::Tensor(outputs__); @@ -5892,7 +5892,7 @@ int atg_polygamma_(tensor *out__, tensor self, int64_t n) { return 1; } -int atg_polygamma_out(tensor *out__, tensor out, int64_t n, tensor self) { +C_API int atg_polygamma_out(tensor *out__, tensor out, int64_t n, tensor self) { PROTECT( auto outputs__ = torch::polygamma_out(*out, n, *self); out__[0] = new torch::Tensor(outputs__); @@ -5901,7 +5901,7 @@ int atg_polygamma_out(tensor *out__, tensor out, int64_t n, tensor self) { return 1; } -int atg_pow(tensor *out__, tensor self, scalar exponent) { +C_API int atg_pow(tensor *out__, tensor self, scalar exponent) { PROTECT( auto outputs__ = torch::pow(*self, *exponent); out__[0] = new torch::Tensor(outputs__); @@ -5910,7 +5910,7 @@ int atg_pow(tensor *out__, tensor self, scalar exponent) { return 1; } -int atg_pow1(tensor *out__, tensor self, tensor exponent) { +C_API int atg_pow1(tensor *out__, tensor self, tensor exponent) { PROTECT( auto outputs__ = torch::pow(*self, *exponent); out__[0] = new torch::Tensor(outputs__); @@ -5919,7 +5919,7 @@ int atg_pow1(tensor *out__, tensor self, tensor exponent) { return 1; } -int atg_pow2(tensor *out__, scalar self, tensor exponent) { +C_API int atg_pow2(tensor *out__, scalar self, tensor exponent) { PROTECT( auto outputs__ = torch::pow(*self, *exponent); out__[0] = new torch::Tensor(outputs__); @@ -5928,7 +5928,7 @@ int atg_pow2(tensor *out__, scalar self, tensor exponent) { return 1; } -int atg_pow_(tensor *out__, tensor self, scalar exponent) { +C_API int atg_pow_(tensor *out__, tensor self, scalar exponent) { PROTECT( auto outputs__ = self->pow_(*exponent); out__[0] = new torch::Tensor(outputs__); @@ -5937,7 +5937,7 @@ int atg_pow_(tensor *out__, tensor self, scalar exponent) { return 1; } -int atg_pow_1(tensor *out__, tensor self, tensor exponent) { +C_API int atg_pow_1(tensor *out__, tensor self, tensor exponent) { PROTECT( auto outputs__ = self->pow_(*exponent); out__[0] = new torch::Tensor(outputs__); @@ -5946,7 +5946,7 @@ int atg_pow_1(tensor *out__, tensor self, tensor exponent) { return 1; } -int atg_pow_out(tensor *out__, tensor out, tensor self, scalar exponent) { +C_API int atg_pow_out(tensor *out__, tensor out, tensor self, scalar exponent) { PROTECT( auto outputs__ = torch::pow_out(*out, *self, *exponent); out__[0] = new torch::Tensor(outputs__); @@ -5955,7 +5955,7 @@ int atg_pow_out(tensor *out__, tensor out, tensor self, scalar exponent) { return 1; } -int atg_pow_out1(tensor *out__, tensor out, tensor self, tensor exponent) { +C_API int atg_pow_out1(tensor *out__, tensor out, tensor self, tensor exponent) { PROTECT( auto outputs__ = torch::pow_out(*out, *self, *exponent); out__[0] = new torch::Tensor(outputs__); @@ -5964,7 +5964,7 @@ int atg_pow_out1(tensor *out__, tensor out, tensor self, tensor exponent) { return 1; } -int atg_pow_out2(tensor *out__, tensor out, scalar self, tensor exponent) { +C_API int atg_pow_out2(tensor *out__, tensor out, scalar self, tensor exponent) { PROTECT( auto outputs__ = torch::pow_out(*out, *self, *exponent); out__[0] = new torch::Tensor(outputs__); @@ -5973,7 +5973,7 @@ int atg_pow_out2(tensor *out__, tensor out, scalar self, tensor exponent) { return 1; } -int atg_prelu(tensor *out__, tensor self, tensor weight) { +C_API int atg_prelu(tensor *out__, tensor self, tensor weight) { PROTECT( auto outputs__ = torch::prelu(*self, *weight); out__[0] = new torch::Tensor(outputs__); @@ -5982,7 +5982,7 @@ int atg_prelu(tensor *out__, tensor self, tensor weight) { return 1; } -int atg_prelu_backward(tensor *out__, tensor grad_output, tensor self, tensor weight) { +C_API int atg_prelu_backward(tensor *out__, tensor grad_output, tensor self, tensor weight) { PROTECT( auto outputs__ = torch::prelu_backward(*grad_output, *self, *weight); out__[0] = new torch::Tensor(std::get<0>(outputs__)); @@ -5992,7 +5992,7 @@ int atg_prelu_backward(tensor *out__, tensor grad_output, tensor self, tensor we return 1; } -int atg_prod(tensor *out__, tensor self, int dtype) { +C_API int atg_prod(tensor *out__, tensor self, int dtype) { PROTECT( auto outputs__ = torch::prod(*self, torch::ScalarType(dtype)); out__[0] = new torch::Tensor(outputs__); @@ -6001,7 +6001,7 @@ int atg_prod(tensor *out__, tensor self, int dtype) { return 1; } -int atg_prod1(tensor *out__, tensor self, int64_t dim, int keepdim, int dtype) { +C_API int atg_prod1(tensor *out__, tensor self, int64_t dim, int keepdim, int dtype) { PROTECT( auto outputs__ = torch::prod(*self, dim, (bool)keepdim, torch::ScalarType(dtype)); out__[0] = new torch::Tensor(outputs__); @@ -6010,7 +6010,7 @@ int atg_prod1(tensor *out__, tensor self, int64_t dim, int keepdim, int dtype) { return 1; } -int atg_prod_out(tensor *out__, tensor out, tensor self, int64_t dim, int keepdim, int dtype) { +C_API int atg_prod_out(tensor *out__, tensor out, tensor self, int64_t dim, int keepdim, int dtype) { PROTECT( auto outputs__ = torch::prod_out(*out, *self, dim, (bool)keepdim, torch::ScalarType(dtype)); out__[0] = new torch::Tensor(outputs__); @@ -6019,7 +6019,7 @@ int atg_prod_out(tensor *out__, tensor out, tensor self, int64_t dim, int keepdi return 1; } -int atg_put_(tensor *out__, tensor self, tensor index, tensor source, int accumulate) { +C_API int atg_put_(tensor *out__, tensor self, tensor index, tensor source, int accumulate) { PROTECT( auto outputs__ = self->put_(*index, *source, (bool)accumulate); out__[0] = new torch::Tensor(outputs__); @@ -6028,7 +6028,7 @@ int atg_put_(tensor *out__, tensor self, tensor index, tensor source, int accumu return 1; } -int atg_q_per_channel_scales(tensor *out__, tensor self) { +C_API int atg_q_per_channel_scales(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::q_per_channel_scales(*self); out__[0] = new torch::Tensor(outputs__); @@ -6037,7 +6037,7 @@ int atg_q_per_channel_scales(tensor *out__, tensor self) { return 1; } -int atg_q_per_channel_zero_points(tensor *out__, tensor self) { +C_API int atg_q_per_channel_zero_points(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::q_per_channel_zero_points(*self); out__[0] = new torch::Tensor(outputs__); @@ -6046,7 +6046,7 @@ int atg_q_per_channel_zero_points(tensor *out__, tensor self) { return 1; } -int atg_qr(tensor *out__, tensor self, int some) { +C_API int atg_qr(tensor *out__, tensor self, int some) { PROTECT( auto outputs__ = torch::qr(*self, (bool)some); out__[0] = new torch::Tensor(std::get<0>(outputs__)); @@ -6056,7 +6056,7 @@ int atg_qr(tensor *out__, tensor self, int some) { return 1; } -int atg_qr_out(tensor *out__, tensor Q, tensor R, tensor self, int some) { +C_API int atg_qr_out(tensor *out__, tensor Q, tensor R, tensor self, int some) { PROTECT( auto outputs__ = torch::qr_out(*Q, *R, *self, (bool)some); out__[0] = new torch::Tensor(std::get<0>(outputs__)); @@ -6066,7 +6066,7 @@ int atg_qr_out(tensor *out__, tensor Q, tensor R, tensor self, int some) { return 1; } -int atg_quantize_per_channel(tensor *out__, tensor self, tensor scales, tensor zero_points, int64_t axis, int dtype) { +C_API int atg_quantize_per_channel(tensor *out__, tensor self, tensor scales, tensor zero_points, int64_t axis, int dtype) { PROTECT( auto outputs__ = torch::quantize_per_channel(*self, *scales, *zero_points, axis, torch::ScalarType(dtype)); out__[0] = new torch::Tensor(outputs__); @@ -6075,7 +6075,7 @@ int atg_quantize_per_channel(tensor *out__, tensor self, tensor scales, tensor z return 1; } -int atg_quantize_per_tensor(tensor *out__, tensor self, double scale, int64_t zero_point, int dtype) { +C_API int atg_quantize_per_tensor(tensor *out__, tensor self, double scale, int64_t zero_point, int dtype) { PROTECT( auto outputs__ = torch::quantize_per_tensor(*self, scale, zero_point, torch::ScalarType(dtype)); out__[0] = new torch::Tensor(outputs__); @@ -6084,7 +6084,7 @@ int atg_quantize_per_tensor(tensor *out__, tensor self, double scale, int64_t ze return 1; } -int atg_quantized_gru(tensor *out__, tensor input, tensor hx, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional, int batch_first) { +C_API int atg_quantized_gru(tensor *out__, tensor input, tensor hx, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional, int batch_first) { PROTECT( auto outputs__ = torch::quantized_gru(*input, *hx, of_carray_tensor(params_data, params_len), (bool)has_biases, num_layers, dropout, (bool)train, (bool)bidirectional, (bool)batch_first); out__[0] = new torch::Tensor(std::get<0>(outputs__)); @@ -6094,7 +6094,7 @@ int atg_quantized_gru(tensor *out__, tensor input, tensor hx, tensor *params_dat return 1; } -int atg_quantized_gru1(tensor *out__, tensor data, tensor batch_sizes, tensor hx, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional) { +C_API int atg_quantized_gru1(tensor *out__, tensor data, tensor batch_sizes, tensor hx, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional) { PROTECT( auto outputs__ = torch::quantized_gru(*data, *batch_sizes, *hx, of_carray_tensor(params_data, params_len), (bool)has_biases, num_layers, dropout, (bool)train, (bool)bidirectional); out__[0] = new torch::Tensor(std::get<0>(outputs__)); @@ -6104,7 +6104,7 @@ int atg_quantized_gru1(tensor *out__, tensor data, tensor batch_sizes, tensor hx return 1; } -int atg_quantized_gru_cell(tensor *out__, tensor input, tensor hx, tensor w_ih, tensor w_hh, tensor b_ih, tensor b_hh, tensor packed_ih, tensor packed_hh, tensor col_offsets_ih, tensor col_offsets_hh, scalar scale_ih, scalar scale_hh, scalar zero_point_ih, scalar zero_point_hh) { +C_API int atg_quantized_gru_cell(tensor *out__, tensor input, tensor hx, tensor w_ih, tensor w_hh, tensor b_ih, tensor b_hh, tensor packed_ih, tensor packed_hh, tensor col_offsets_ih, tensor col_offsets_hh, scalar scale_ih, scalar scale_hh, scalar zero_point_ih, scalar zero_point_hh) { PROTECT( auto outputs__ = torch::quantized_gru_cell(*input, *hx, *w_ih, *w_hh, *b_ih, *b_hh, *packed_ih, *packed_hh, *col_offsets_ih, *col_offsets_hh, *scale_ih, *scale_hh, *zero_point_ih, *zero_point_hh); out__[0] = new torch::Tensor(outputs__); @@ -6113,7 +6113,7 @@ int atg_quantized_gru_cell(tensor *out__, tensor input, tensor hx, tensor w_ih, return 1; } -int atg_quantized_lstm(tensor *out__, tensor input, tensor *hx_data, int hx_len, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional, int batch_first, int dtype, int use_dynamic) { +C_API int atg_quantized_lstm(tensor *out__, tensor input, tensor *hx_data, int hx_len, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional, int batch_first, int dtype, int use_dynamic) { PROTECT( auto outputs__ = torch::quantized_lstm(*input, of_carray_tensor(hx_data, hx_len), of_carray_tensor(params_data, params_len), (bool)has_biases, num_layers, dropout, (bool)train, (bool)bidirectional, (bool)batch_first, torch::ScalarType(dtype), (bool)use_dynamic); out__[0] = new torch::Tensor(std::get<0>(outputs__)); @@ -6124,7 +6124,7 @@ int atg_quantized_lstm(tensor *out__, tensor input, tensor *hx_data, int hx_len, return 1; } -int atg_quantized_lstm1(tensor *out__, tensor data, tensor batch_sizes, tensor *hx_data, int hx_len, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional, int dtype, int use_dynamic) { +C_API int atg_quantized_lstm1(tensor *out__, tensor data, tensor batch_sizes, tensor *hx_data, int hx_len, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional, int dtype, int use_dynamic) { PROTECT( auto outputs__ = torch::quantized_lstm(*data, *batch_sizes, of_carray_tensor(hx_data, hx_len), of_carray_tensor(params_data, params_len), (bool)has_biases, num_layers, dropout, (bool)train, (bool)bidirectional, torch::ScalarType(dtype), (bool)use_dynamic); out__[0] = new torch::Tensor(std::get<0>(outputs__)); @@ -6135,7 +6135,7 @@ int atg_quantized_lstm1(tensor *out__, tensor data, tensor batch_sizes, tensor * return 1; } -int atg_quantized_lstm_cell(tensor *out__, tensor input, tensor *hx_data, int hx_len, tensor w_ih, tensor w_hh, tensor b_ih, tensor b_hh, tensor packed_ih, tensor packed_hh, tensor col_offsets_ih, tensor col_offsets_hh, scalar scale_ih, scalar scale_hh, scalar zero_point_ih, scalar zero_point_hh) { +C_API int atg_quantized_lstm_cell(tensor *out__, tensor input, tensor *hx_data, int hx_len, tensor w_ih, tensor w_hh, tensor b_ih, tensor b_hh, tensor packed_ih, tensor packed_hh, tensor col_offsets_ih, tensor col_offsets_hh, scalar scale_ih, scalar scale_hh, scalar zero_point_ih, scalar zero_point_hh) { PROTECT( auto outputs__ = torch::quantized_lstm_cell(*input, of_carray_tensor(hx_data, hx_len), *w_ih, *w_hh, *b_ih, *b_hh, *packed_ih, *packed_hh, *col_offsets_ih, *col_offsets_hh, *scale_ih, *scale_hh, *zero_point_ih, *zero_point_hh); out__[0] = new torch::Tensor(std::get<0>(outputs__)); @@ -6145,7 +6145,7 @@ int atg_quantized_lstm_cell(tensor *out__, tensor input, tensor *hx_data, int hx return 1; } -int atg_quantized_max_pool2d(tensor *out__, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode) { +C_API int atg_quantized_max_pool2d(tensor *out__, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode) { PROTECT( auto outputs__ = torch::quantized_max_pool2d(*self, torch::IntArrayRef(kernel_size_data, kernel_size_len), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len), (bool)ceil_mode); out__[0] = new torch::Tensor(outputs__); @@ -6154,7 +6154,7 @@ int atg_quantized_max_pool2d(tensor *out__, tensor self, int64_t *kernel_size_da return 1; } -int atg_quantized_rnn_relu_cell(tensor *out__, tensor input, tensor hx, tensor w_ih, tensor w_hh, tensor b_ih, tensor b_hh, tensor packed_ih, tensor packed_hh, tensor col_offsets_ih, tensor col_offsets_hh, scalar scale_ih, scalar scale_hh, scalar zero_point_ih, scalar zero_point_hh) { +C_API int atg_quantized_rnn_relu_cell(tensor *out__, tensor input, tensor hx, tensor w_ih, tensor w_hh, tensor b_ih, tensor b_hh, tensor packed_ih, tensor packed_hh, tensor col_offsets_ih, tensor col_offsets_hh, scalar scale_ih, scalar scale_hh, scalar zero_point_ih, scalar zero_point_hh) { PROTECT( auto outputs__ = torch::quantized_rnn_relu_cell(*input, *hx, *w_ih, *w_hh, *b_ih, *b_hh, *packed_ih, *packed_hh, *col_offsets_ih, *col_offsets_hh, *scale_ih, *scale_hh, *zero_point_ih, *zero_point_hh); out__[0] = new torch::Tensor(outputs__); @@ -6163,7 +6163,7 @@ int atg_quantized_rnn_relu_cell(tensor *out__, tensor input, tensor hx, tensor w return 1; } -int atg_quantized_rnn_tanh_cell(tensor *out__, tensor input, tensor hx, tensor w_ih, tensor w_hh, tensor b_ih, tensor b_hh, tensor packed_ih, tensor packed_hh, tensor col_offsets_ih, tensor col_offsets_hh, scalar scale_ih, scalar scale_hh, scalar zero_point_ih, scalar zero_point_hh) { +C_API int atg_quantized_rnn_tanh_cell(tensor *out__, tensor input, tensor hx, tensor w_ih, tensor w_hh, tensor b_ih, tensor b_hh, tensor packed_ih, tensor packed_hh, tensor col_offsets_ih, tensor col_offsets_hh, scalar scale_ih, scalar scale_hh, scalar zero_point_ih, scalar zero_point_hh) { PROTECT( auto outputs__ = torch::quantized_rnn_tanh_cell(*input, *hx, *w_ih, *w_hh, *b_ih, *b_hh, *packed_ih, *packed_hh, *col_offsets_ih, *col_offsets_hh, *scale_ih, *scale_hh, *zero_point_ih, *zero_point_hh); out__[0] = new torch::Tensor(outputs__); @@ -6172,7 +6172,7 @@ int atg_quantized_rnn_tanh_cell(tensor *out__, tensor input, tensor hx, tensor w return 1; } -int atg_rand(tensor *out__, int64_t *size_data, int size_len, int options_kind, int options_device) { +C_API int atg_rand(tensor *out__, int64_t *size_data, int size_len, int options_kind, int options_device) { PROTECT( auto outputs__ = torch::rand(torch::IntArrayRef(size_data, size_len), at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); @@ -6181,7 +6181,7 @@ int atg_rand(tensor *out__, int64_t *size_data, int size_len, int options_kind, return 1; } -int atg_rand_like(tensor *out__, tensor self) { +C_API int atg_rand_like(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::rand_like(*self); out__[0] = new torch::Tensor(outputs__); @@ -6190,7 +6190,7 @@ int atg_rand_like(tensor *out__, tensor self) { return 1; } -int atg_rand_like1(tensor *out__, tensor self, int options_kind, int options_device) { +C_API int atg_rand_like1(tensor *out__, tensor self, int options_kind, int options_device) { PROTECT( auto outputs__ = torch::rand_like(*self, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); @@ -6199,7 +6199,7 @@ int atg_rand_like1(tensor *out__, tensor self, int options_kind, int options_dev return 1; } -int atg_rand_out(tensor *out__, tensor out, int64_t *size_data, int size_len) { +C_API int atg_rand_out(tensor *out__, tensor out, int64_t *size_data, int size_len) { PROTECT( auto outputs__ = torch::rand_out(*out, torch::IntArrayRef(size_data, size_len)); out__[0] = new torch::Tensor(outputs__); @@ -6208,7 +6208,7 @@ int atg_rand_out(tensor *out__, tensor out, int64_t *size_data, int size_len) { return 1; } -int atg_randint(tensor *out__, int64_t high, int64_t *size_data, int size_len, int options_kind, int options_device) { +C_API int atg_randint(tensor *out__, int64_t high, int64_t *size_data, int size_len, int options_kind, int options_device) { PROTECT( auto outputs__ = torch::randint(high, torch::IntArrayRef(size_data, size_len), at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); @@ -6217,7 +6217,7 @@ int atg_randint(tensor *out__, int64_t high, int64_t *size_data, int size_len, i return 1; } -int atg_randint1(tensor *out__, int64_t low, int64_t high, int64_t *size_data, int size_len, int options_kind, int options_device) { +C_API int atg_randint1(tensor *out__, int64_t low, int64_t high, int64_t *size_data, int size_len, int options_kind, int options_device) { PROTECT( auto outputs__ = torch::randint(low, high, torch::IntArrayRef(size_data, size_len), at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); @@ -6226,7 +6226,7 @@ int atg_randint1(tensor *out__, int64_t low, int64_t high, int64_t *size_data, i return 1; } -int atg_randint_like(tensor *out__, tensor self, int64_t high) { +C_API int atg_randint_like(tensor *out__, tensor self, int64_t high) { PROTECT( auto outputs__ = torch::randint_like(*self, high); out__[0] = new torch::Tensor(outputs__); @@ -6235,7 +6235,7 @@ int atg_randint_like(tensor *out__, tensor self, int64_t high) { return 1; } -int atg_randint_like1(tensor *out__, tensor self, int64_t low, int64_t high) { +C_API int atg_randint_like1(tensor *out__, tensor self, int64_t low, int64_t high) { PROTECT( auto outputs__ = torch::randint_like(*self, low, high); out__[0] = new torch::Tensor(outputs__); @@ -6244,7 +6244,7 @@ int atg_randint_like1(tensor *out__, tensor self, int64_t low, int64_t high) { return 1; } -int atg_randint_like2(tensor *out__, tensor self, int64_t high, int options_kind, int options_device) { +C_API int atg_randint_like2(tensor *out__, tensor self, int64_t high, int options_kind, int options_device) { PROTECT( auto outputs__ = torch::randint_like(*self, high, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); @@ -6253,7 +6253,7 @@ int atg_randint_like2(tensor *out__, tensor self, int64_t high, int options_kind return 1; } -int atg_randint_like3(tensor *out__, tensor self, int64_t low, int64_t high, int options_kind, int options_device) { +C_API int atg_randint_like3(tensor *out__, tensor self, int64_t low, int64_t high, int options_kind, int options_device) { PROTECT( auto outputs__ = torch::randint_like(*self, low, high, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); @@ -6262,7 +6262,7 @@ int atg_randint_like3(tensor *out__, tensor self, int64_t low, int64_t high, int return 1; } -int atg_randint_out(tensor *out__, tensor out, int64_t high, int64_t *size_data, int size_len) { +C_API int atg_randint_out(tensor *out__, tensor out, int64_t high, int64_t *size_data, int size_len) { PROTECT( auto outputs__ = torch::randint_out(*out, high, torch::IntArrayRef(size_data, size_len)); out__[0] = new torch::Tensor(outputs__); @@ -6271,7 +6271,7 @@ int atg_randint_out(tensor *out__, tensor out, int64_t high, int64_t *size_data, return 1; } -int atg_randint_out1(tensor *out__, tensor out, int64_t low, int64_t high, int64_t *size_data, int size_len) { +C_API int atg_randint_out1(tensor *out__, tensor out, int64_t low, int64_t high, int64_t *size_data, int size_len) { PROTECT( auto outputs__ = torch::randint_out(*out, low, high, torch::IntArrayRef(size_data, size_len)); out__[0] = new torch::Tensor(outputs__); @@ -6280,7 +6280,7 @@ int atg_randint_out1(tensor *out__, tensor out, int64_t low, int64_t high, int64 return 1; } -int atg_randn(tensor *out__, int64_t *size_data, int size_len, int options_kind, int options_device) { +C_API int atg_randn(tensor *out__, int64_t *size_data, int size_len, int options_kind, int options_device) { PROTECT( auto outputs__ = torch::randn(torch::IntArrayRef(size_data, size_len), at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); @@ -6289,7 +6289,7 @@ int atg_randn(tensor *out__, int64_t *size_data, int size_len, int options_kind, return 1; } -int atg_randn_like(tensor *out__, tensor self) { +C_API int atg_randn_like(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::randn_like(*self); out__[0] = new torch::Tensor(outputs__); @@ -6298,7 +6298,7 @@ int atg_randn_like(tensor *out__, tensor self) { return 1; } -int atg_randn_like1(tensor *out__, tensor self, int options_kind, int options_device) { +C_API int atg_randn_like1(tensor *out__, tensor self, int options_kind, int options_device) { PROTECT( auto outputs__ = torch::randn_like(*self, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); @@ -6307,7 +6307,7 @@ int atg_randn_like1(tensor *out__, tensor self, int options_kind, int options_de return 1; } -int atg_randn_out(tensor *out__, tensor out, int64_t *size_data, int size_len) { +C_API int atg_randn_out(tensor *out__, tensor out, int64_t *size_data, int size_len) { PROTECT( auto outputs__ = torch::randn_out(*out, torch::IntArrayRef(size_data, size_len)); out__[0] = new torch::Tensor(outputs__); @@ -6316,7 +6316,7 @@ int atg_randn_out(tensor *out__, tensor out, int64_t *size_data, int size_len) { return 1; } -int atg_random_(tensor *out__, tensor self) { +C_API int atg_random_(tensor *out__, tensor self) { PROTECT( auto outputs__ = self->random_(); out__[0] = new torch::Tensor(outputs__); @@ -6325,7 +6325,7 @@ int atg_random_(tensor *out__, tensor self) { return 1; } -int atg_random_1(tensor *out__, tensor self, int64_t to) { +C_API int atg_random_1(tensor *out__, tensor self, int64_t to) { PROTECT( auto outputs__ = self->random_(to); out__[0] = new torch::Tensor(outputs__); @@ -6334,7 +6334,7 @@ int atg_random_1(tensor *out__, tensor self, int64_t to) { return 1; } -int atg_random_2(tensor *out__, tensor self, int64_t from, int64_t to) { +C_API int atg_random_2(tensor *out__, tensor self, int64_t from, int64_t to) { PROTECT( auto outputs__ = self->random_(from, to); out__[0] = new torch::Tensor(outputs__); @@ -6343,7 +6343,7 @@ int atg_random_2(tensor *out__, tensor self, int64_t from, int64_t to) { return 1; } -int atg_randperm(tensor *out__, int64_t n, int options_kind, int options_device) { +C_API int atg_randperm(tensor *out__, int64_t n, int options_kind, int options_device) { PROTECT( auto outputs__ = torch::randperm(n, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); @@ -6352,7 +6352,7 @@ int atg_randperm(tensor *out__, int64_t n, int options_kind, int options_device) return 1; } -int atg_randperm_out(tensor *out__, tensor out, int64_t n) { +C_API int atg_randperm_out(tensor *out__, tensor out, int64_t n) { PROTECT( auto outputs__ = torch::randperm_out(*out, n); out__[0] = new torch::Tensor(outputs__); @@ -6361,7 +6361,7 @@ int atg_randperm_out(tensor *out__, tensor out, int64_t n) { return 1; } -int atg_range(tensor *out__, scalar start, scalar end, int options_kind, int options_device) { +C_API int atg_range(tensor *out__, scalar start, scalar end, int options_kind, int options_device) { PROTECT( auto outputs__ = torch::range(*start, *end, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); @@ -6370,7 +6370,7 @@ int atg_range(tensor *out__, scalar start, scalar end, int options_kind, int opt return 1; } -int atg_range1(tensor *out__, scalar start, scalar end, int options_kind, int options_device) { +C_API int atg_range1(tensor *out__, scalar start, scalar end, int options_kind, int options_device) { PROTECT( auto outputs__ = torch::range(*start, *end, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); @@ -6379,7 +6379,7 @@ int atg_range1(tensor *out__, scalar start, scalar end, int options_kind, int op return 1; } -int atg_range_out(tensor *out__, tensor out, scalar start, scalar end) { +C_API int atg_range_out(tensor *out__, tensor out, scalar start, scalar end) { PROTECT( auto outputs__ = torch::range_out(*out, *start, *end); out__[0] = new torch::Tensor(outputs__); @@ -6388,7 +6388,7 @@ int atg_range_out(tensor *out__, tensor out, scalar start, scalar end) { return 1; } -int atg_real(tensor *out__, tensor self) { +C_API int atg_real(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::real(*self); out__[0] = new torch::Tensor(outputs__); @@ -6397,7 +6397,7 @@ int atg_real(tensor *out__, tensor self) { return 1; } -int atg_real_out(tensor *out__, tensor out, tensor self) { +C_API int atg_real_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::real_out(*out, *self); out__[0] = new torch::Tensor(outputs__); @@ -6406,7 +6406,7 @@ int atg_real_out(tensor *out__, tensor out, tensor self) { return 1; } -int atg_reciprocal(tensor *out__, tensor self) { +C_API int atg_reciprocal(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::reciprocal(*self); out__[0] = new torch::Tensor(outputs__); @@ -6415,7 +6415,7 @@ int atg_reciprocal(tensor *out__, tensor self) { return 1; } -int atg_reciprocal_(tensor *out__, tensor self) { +C_API int atg_reciprocal_(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::reciprocal_(*self); out__[0] = new torch::Tensor(outputs__); @@ -6424,7 +6424,7 @@ int atg_reciprocal_(tensor *out__, tensor self) { return 1; } -int atg_reciprocal_out(tensor *out__, tensor out, tensor self) { +C_API int atg_reciprocal_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::reciprocal_out(*out, *self); out__[0] = new torch::Tensor(outputs__); @@ -6433,7 +6433,7 @@ int atg_reciprocal_out(tensor *out__, tensor out, tensor self) { return 1; } -int atg_reflection_pad1d(tensor *out__, tensor self, int64_t *padding_data, int padding_len) { +C_API int atg_reflection_pad1d(tensor *out__, tensor self, int64_t *padding_data, int padding_len) { PROTECT( auto outputs__ = torch::reflection_pad1d(*self, torch::IntArrayRef(padding_data, padding_len)); out__[0] = new torch::Tensor(outputs__); @@ -6442,7 +6442,7 @@ int atg_reflection_pad1d(tensor *out__, tensor self, int64_t *padding_data, int return 1; } -int atg_reflection_pad1d_backward(tensor *out__, tensor grad_output, tensor self, int64_t *padding_data, int padding_len) { +C_API int atg_reflection_pad1d_backward(tensor *out__, tensor grad_output, tensor self, int64_t *padding_data, int padding_len) { PROTECT( auto outputs__ = torch::reflection_pad1d_backward(*grad_output, *self, torch::IntArrayRef(padding_data, padding_len)); out__[0] = new torch::Tensor(outputs__); @@ -6451,7 +6451,7 @@ int atg_reflection_pad1d_backward(tensor *out__, tensor grad_output, tensor self return 1; } -int atg_reflection_pad1d_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self, int64_t *padding_data, int padding_len) { +C_API int atg_reflection_pad1d_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self, int64_t *padding_data, int padding_len) { PROTECT( auto outputs__ = torch::reflection_pad1d_backward_out(*grad_input, *grad_output, *self, torch::IntArrayRef(padding_data, padding_len)); out__[0] = new torch::Tensor(outputs__); @@ -6460,7 +6460,7 @@ int atg_reflection_pad1d_backward_out(tensor *out__, tensor grad_input, tensor g return 1; } -int atg_reflection_pad1d_out(tensor *out__, tensor out, tensor self, int64_t *padding_data, int padding_len) { +C_API int atg_reflection_pad1d_out(tensor *out__, tensor out, tensor self, int64_t *padding_data, int padding_len) { PROTECT( auto outputs__ = torch::reflection_pad1d_out(*out, *self, torch::IntArrayRef(padding_data, padding_len)); out__[0] = new torch::Tensor(outputs__); @@ -6469,7 +6469,7 @@ int atg_reflection_pad1d_out(tensor *out__, tensor out, tensor self, int64_t *pa return 1; } -int atg_reflection_pad2d(tensor *out__, tensor self, int64_t *padding_data, int padding_len) { +C_API int atg_reflection_pad2d(tensor *out__, tensor self, int64_t *padding_data, int padding_len) { PROTECT( auto outputs__ = torch::reflection_pad2d(*self, torch::IntArrayRef(padding_data, padding_len)); out__[0] = new torch::Tensor(outputs__); @@ -6478,7 +6478,7 @@ int atg_reflection_pad2d(tensor *out__, tensor self, int64_t *padding_data, int return 1; } -int atg_reflection_pad2d_backward(tensor *out__, tensor grad_output, tensor self, int64_t *padding_data, int padding_len) { +C_API int atg_reflection_pad2d_backward(tensor *out__, tensor grad_output, tensor self, int64_t *padding_data, int padding_len) { PROTECT( auto outputs__ = torch::reflection_pad2d_backward(*grad_output, *self, torch::IntArrayRef(padding_data, padding_len)); out__[0] = new torch::Tensor(outputs__); @@ -6487,7 +6487,7 @@ int atg_reflection_pad2d_backward(tensor *out__, tensor grad_output, tensor self return 1; } -int atg_reflection_pad2d_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self, int64_t *padding_data, int padding_len) { +C_API int atg_reflection_pad2d_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self, int64_t *padding_data, int padding_len) { PROTECT( auto outputs__ = torch::reflection_pad2d_backward_out(*grad_input, *grad_output, *self, torch::IntArrayRef(padding_data, padding_len)); out__[0] = new torch::Tensor(outputs__); @@ -6496,7 +6496,7 @@ int atg_reflection_pad2d_backward_out(tensor *out__, tensor grad_input, tensor g return 1; } -int atg_reflection_pad2d_out(tensor *out__, tensor out, tensor self, int64_t *padding_data, int padding_len) { +C_API int atg_reflection_pad2d_out(tensor *out__, tensor out, tensor self, int64_t *padding_data, int padding_len) { PROTECT( auto outputs__ = torch::reflection_pad2d_out(*out, *self, torch::IntArrayRef(padding_data, padding_len)); out__[0] = new torch::Tensor(outputs__); @@ -6505,7 +6505,7 @@ int atg_reflection_pad2d_out(tensor *out__, tensor out, tensor self, int64_t *pa return 1; } -int atg_relu(tensor *out__, tensor self) { +C_API int atg_relu(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::relu(*self); out__[0] = new torch::Tensor(outputs__); @@ -6514,7 +6514,7 @@ int atg_relu(tensor *out__, tensor self) { return 1; } -int atg_relu_(tensor *out__, tensor self) { +C_API int atg_relu_(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::relu_(*self); out__[0] = new torch::Tensor(outputs__); @@ -6523,7 +6523,7 @@ int atg_relu_(tensor *out__, tensor self) { return 1; } -int atg_remainder(tensor *out__, tensor self, scalar other) { +C_API int atg_remainder(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = torch::remainder(*self, *other); out__[0] = new torch::Tensor(outputs__); @@ -6532,7 +6532,7 @@ int atg_remainder(tensor *out__, tensor self, scalar other) { return 1; } -int atg_remainder1(tensor *out__, tensor self, tensor other) { +C_API int atg_remainder1(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = torch::remainder(*self, *other); out__[0] = new torch::Tensor(outputs__); @@ -6541,7 +6541,7 @@ int atg_remainder1(tensor *out__, tensor self, tensor other) { return 1; } -int atg_remainder_(tensor *out__, tensor self, scalar other) { +C_API int atg_remainder_(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = self->remainder_(*other); out__[0] = new torch::Tensor(outputs__); @@ -6550,7 +6550,7 @@ int atg_remainder_(tensor *out__, tensor self, scalar other) { return 1; } -int atg_remainder_1(tensor *out__, tensor self, tensor other) { +C_API int atg_remainder_1(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = self->remainder_(*other); out__[0] = new torch::Tensor(outputs__); @@ -6559,7 +6559,7 @@ int atg_remainder_1(tensor *out__, tensor self, tensor other) { return 1; } -int atg_remainder_out(tensor *out__, tensor out, tensor self, scalar other) { +C_API int atg_remainder_out(tensor *out__, tensor out, tensor self, scalar other) { PROTECT( auto outputs__ = torch::remainder_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); @@ -6568,7 +6568,7 @@ int atg_remainder_out(tensor *out__, tensor out, tensor self, scalar other) { return 1; } -int atg_remainder_out1(tensor *out__, tensor out, tensor self, tensor other) { +C_API int atg_remainder_out1(tensor *out__, tensor out, tensor self, tensor other) { PROTECT( auto outputs__ = torch::remainder_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); @@ -6577,7 +6577,7 @@ int atg_remainder_out1(tensor *out__, tensor out, tensor self, tensor other) { return 1; } -int atg_renorm(tensor *out__, tensor self, scalar p, int64_t dim, scalar maxnorm) { +C_API int atg_renorm(tensor *out__, tensor self, scalar p, int64_t dim, scalar maxnorm) { PROTECT( auto outputs__ = torch::renorm(*self, *p, dim, *maxnorm); out__[0] = new torch::Tensor(outputs__); @@ -6586,7 +6586,7 @@ int atg_renorm(tensor *out__, tensor self, scalar p, int64_t dim, scalar maxnorm return 1; } -int atg_renorm_(tensor *out__, tensor self, scalar p, int64_t dim, scalar maxnorm) { +C_API int atg_renorm_(tensor *out__, tensor self, scalar p, int64_t dim, scalar maxnorm) { PROTECT( auto outputs__ = self->renorm_(*p, dim, *maxnorm); out__[0] = new torch::Tensor(outputs__); @@ -6595,7 +6595,7 @@ int atg_renorm_(tensor *out__, tensor self, scalar p, int64_t dim, scalar maxnor return 1; } -int atg_renorm_out(tensor *out__, tensor out, tensor self, scalar p, int64_t dim, scalar maxnorm) { +C_API int atg_renorm_out(tensor *out__, tensor out, tensor self, scalar p, int64_t dim, scalar maxnorm) { PROTECT( auto outputs__ = torch::renorm_out(*out, *self, *p, dim, *maxnorm); out__[0] = new torch::Tensor(outputs__); @@ -6604,7 +6604,7 @@ int atg_renorm_out(tensor *out__, tensor out, tensor self, scalar p, int64_t dim return 1; } -int atg_repeat(tensor *out__, tensor self, int64_t *repeats_data, int repeats_len) { +C_API int atg_repeat(tensor *out__, tensor self, int64_t *repeats_data, int repeats_len) { PROTECT( auto outputs__ = self->repeat(torch::IntArrayRef(repeats_data, repeats_len)); out__[0] = new torch::Tensor(outputs__); @@ -6613,7 +6613,7 @@ int atg_repeat(tensor *out__, tensor self, int64_t *repeats_data, int repeats_le return 1; } -int atg_repeat_interleave(tensor *out__, tensor repeats) { +C_API int atg_repeat_interleave(tensor *out__, tensor repeats) { PROTECT( auto outputs__ = torch::repeat_interleave(*repeats); out__[0] = new torch::Tensor(outputs__); @@ -6622,7 +6622,7 @@ int atg_repeat_interleave(tensor *out__, tensor repeats) { return 1; } -int atg_repeat_interleave1(tensor *out__, tensor self, tensor repeats, int64_t dim) { +C_API int atg_repeat_interleave1(tensor *out__, tensor self, tensor repeats, int64_t dim) { PROTECT( auto outputs__ = torch::repeat_interleave(*self, *repeats, dim); out__[0] = new torch::Tensor(outputs__); @@ -6631,7 +6631,7 @@ int atg_repeat_interleave1(tensor *out__, tensor self, tensor repeats, int64_t d return 1; } -int atg_repeat_interleave2(tensor *out__, tensor self, int64_t repeats, int64_t dim) { +C_API int atg_repeat_interleave2(tensor *out__, tensor self, int64_t repeats, int64_t dim) { PROTECT( auto outputs__ = torch::repeat_interleave(*self, repeats, dim); out__[0] = new torch::Tensor(outputs__); @@ -6640,7 +6640,7 @@ int atg_repeat_interleave2(tensor *out__, tensor self, int64_t repeats, int64_t return 1; } -int atg_replication_pad1d(tensor *out__, tensor self, int64_t *padding_data, int padding_len) { +C_API int atg_replication_pad1d(tensor *out__, tensor self, int64_t *padding_data, int padding_len) { PROTECT( auto outputs__ = torch::replication_pad1d(*self, torch::IntArrayRef(padding_data, padding_len)); out__[0] = new torch::Tensor(outputs__); @@ -6649,7 +6649,7 @@ int atg_replication_pad1d(tensor *out__, tensor self, int64_t *padding_data, int return 1; } -int atg_replication_pad1d_backward(tensor *out__, tensor grad_output, tensor self, int64_t *padding_data, int padding_len) { +C_API int atg_replication_pad1d_backward(tensor *out__, tensor grad_output, tensor self, int64_t *padding_data, int padding_len) { PROTECT( auto outputs__ = torch::replication_pad1d_backward(*grad_output, *self, torch::IntArrayRef(padding_data, padding_len)); out__[0] = new torch::Tensor(outputs__); @@ -6658,7 +6658,7 @@ int atg_replication_pad1d_backward(tensor *out__, tensor grad_output, tensor sel return 1; } -int atg_replication_pad1d_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self, int64_t *padding_data, int padding_len) { +C_API int atg_replication_pad1d_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self, int64_t *padding_data, int padding_len) { PROTECT( auto outputs__ = torch::replication_pad1d_backward_out(*grad_input, *grad_output, *self, torch::IntArrayRef(padding_data, padding_len)); out__[0] = new torch::Tensor(outputs__); @@ -6667,7 +6667,7 @@ int atg_replication_pad1d_backward_out(tensor *out__, tensor grad_input, tensor return 1; } -int atg_replication_pad1d_out(tensor *out__, tensor out, tensor self, int64_t *padding_data, int padding_len) { +C_API int atg_replication_pad1d_out(tensor *out__, tensor out, tensor self, int64_t *padding_data, int padding_len) { PROTECT( auto outputs__ = torch::replication_pad1d_out(*out, *self, torch::IntArrayRef(padding_data, padding_len)); out__[0] = new torch::Tensor(outputs__); @@ -6676,7 +6676,7 @@ int atg_replication_pad1d_out(tensor *out__, tensor out, tensor self, int64_t *p return 1; } -int atg_replication_pad2d(tensor *out__, tensor self, int64_t *padding_data, int padding_len) { +C_API int atg_replication_pad2d(tensor *out__, tensor self, int64_t *padding_data, int padding_len) { PROTECT( auto outputs__ = torch::replication_pad2d(*self, torch::IntArrayRef(padding_data, padding_len)); out__[0] = new torch::Tensor(outputs__); @@ -6685,7 +6685,7 @@ int atg_replication_pad2d(tensor *out__, tensor self, int64_t *padding_data, int return 1; } -int atg_replication_pad2d_backward(tensor *out__, tensor grad_output, tensor self, int64_t *padding_data, int padding_len) { +C_API int atg_replication_pad2d_backward(tensor *out__, tensor grad_output, tensor self, int64_t *padding_data, int padding_len) { PROTECT( auto outputs__ = torch::replication_pad2d_backward(*grad_output, *self, torch::IntArrayRef(padding_data, padding_len)); out__[0] = new torch::Tensor(outputs__); @@ -6694,7 +6694,7 @@ int atg_replication_pad2d_backward(tensor *out__, tensor grad_output, tensor sel return 1; } -int atg_replication_pad2d_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self, int64_t *padding_data, int padding_len) { +C_API int atg_replication_pad2d_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self, int64_t *padding_data, int padding_len) { PROTECT( auto outputs__ = torch::replication_pad2d_backward_out(*grad_input, *grad_output, *self, torch::IntArrayRef(padding_data, padding_len)); out__[0] = new torch::Tensor(outputs__); @@ -6703,7 +6703,7 @@ int atg_replication_pad2d_backward_out(tensor *out__, tensor grad_input, tensor return 1; } -int atg_replication_pad2d_out(tensor *out__, tensor out, tensor self, int64_t *padding_data, int padding_len) { +C_API int atg_replication_pad2d_out(tensor *out__, tensor out, tensor self, int64_t *padding_data, int padding_len) { PROTECT( auto outputs__ = torch::replication_pad2d_out(*out, *self, torch::IntArrayRef(padding_data, padding_len)); out__[0] = new torch::Tensor(outputs__); @@ -6712,7 +6712,7 @@ int atg_replication_pad2d_out(tensor *out__, tensor out, tensor self, int64_t *p return 1; } -int atg_replication_pad3d(tensor *out__, tensor self, int64_t *padding_data, int padding_len) { +C_API int atg_replication_pad3d(tensor *out__, tensor self, int64_t *padding_data, int padding_len) { PROTECT( auto outputs__ = torch::replication_pad3d(*self, torch::IntArrayRef(padding_data, padding_len)); out__[0] = new torch::Tensor(outputs__); @@ -6721,7 +6721,7 @@ int atg_replication_pad3d(tensor *out__, tensor self, int64_t *padding_data, int return 1; } -int atg_replication_pad3d_backward(tensor *out__, tensor grad_output, tensor self, int64_t *padding_data, int padding_len) { +C_API int atg_replication_pad3d_backward(tensor *out__, tensor grad_output, tensor self, int64_t *padding_data, int padding_len) { PROTECT( auto outputs__ = torch::replication_pad3d_backward(*grad_output, *self, torch::IntArrayRef(padding_data, padding_len)); out__[0] = new torch::Tensor(outputs__); @@ -6730,7 +6730,7 @@ int atg_replication_pad3d_backward(tensor *out__, tensor grad_output, tensor sel return 1; } -int atg_replication_pad3d_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self, int64_t *padding_data, int padding_len) { +C_API int atg_replication_pad3d_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self, int64_t *padding_data, int padding_len) { PROTECT( auto outputs__ = torch::replication_pad3d_backward_out(*grad_input, *grad_output, *self, torch::IntArrayRef(padding_data, padding_len)); out__[0] = new torch::Tensor(outputs__); @@ -6739,7 +6739,7 @@ int atg_replication_pad3d_backward_out(tensor *out__, tensor grad_input, tensor return 1; } -int atg_replication_pad3d_out(tensor *out__, tensor out, tensor self, int64_t *padding_data, int padding_len) { +C_API int atg_replication_pad3d_out(tensor *out__, tensor out, tensor self, int64_t *padding_data, int padding_len) { PROTECT( auto outputs__ = torch::replication_pad3d_out(*out, *self, torch::IntArrayRef(padding_data, padding_len)); out__[0] = new torch::Tensor(outputs__); @@ -6748,7 +6748,7 @@ int atg_replication_pad3d_out(tensor *out__, tensor out, tensor self, int64_t *p return 1; } -int atg_requires_grad_(tensor *out__, tensor self, int _requires_grad) { +C_API int atg_requires_grad_(tensor *out__, tensor self, int _requires_grad) { PROTECT( auto outputs__ = self->requires_grad_((bool)_requires_grad); out__[0] = new torch::Tensor(outputs__); @@ -6757,7 +6757,7 @@ int atg_requires_grad_(tensor *out__, tensor self, int _requires_grad) { return 1; } -int atg_reshape(tensor *out__, tensor self, int64_t *shape_data, int shape_len) { +C_API int atg_reshape(tensor *out__, tensor self, int64_t *shape_data, int shape_len) { PROTECT( auto outputs__ = torch::reshape(*self, torch::IntArrayRef(shape_data, shape_len)); out__[0] = new torch::Tensor(outputs__); @@ -6766,7 +6766,7 @@ int atg_reshape(tensor *out__, tensor self, int64_t *shape_data, int shape_len) return 1; } -int atg_reshape_as(tensor *out__, tensor self, tensor other) { +C_API int atg_reshape_as(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = self->reshape_as(*other); out__[0] = new torch::Tensor(outputs__); @@ -6775,7 +6775,7 @@ int atg_reshape_as(tensor *out__, tensor self, tensor other) { return 1; } -int atg_resize_(tensor *out__, tensor self, int64_t *size_data, int size_len) { +C_API int atg_resize_(tensor *out__, tensor self, int64_t *size_data, int size_len) { PROTECT( auto outputs__ = self->resize_(torch::IntArrayRef(size_data, size_len)); out__[0] = new torch::Tensor(outputs__); @@ -6784,7 +6784,7 @@ int atg_resize_(tensor *out__, tensor self, int64_t *size_data, int size_len) { return 1; } -int atg_resize_as_(tensor *out__, tensor self, tensor the_template) { +C_API int atg_resize_as_(tensor *out__, tensor self, tensor the_template) { PROTECT( auto outputs__ = torch::resize_as_(*self, *the_template); out__[0] = new torch::Tensor(outputs__); @@ -6793,7 +6793,7 @@ int atg_resize_as_(tensor *out__, tensor self, tensor the_template) { return 1; } -int atg_rfft(tensor *out__, tensor self, int64_t signal_ndim, int normalized, int onesided) { +C_API int atg_rfft(tensor *out__, tensor self, int64_t signal_ndim, int normalized, int onesided) { PROTECT( auto outputs__ = torch::rfft(*self, signal_ndim, (bool)normalized, (bool)onesided); out__[0] = new torch::Tensor(outputs__); @@ -6802,7 +6802,7 @@ int atg_rfft(tensor *out__, tensor self, int64_t signal_ndim, int normalized, in return 1; } -int atg_rnn_relu(tensor *out__, tensor input, tensor hx, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional, int batch_first) { +C_API int atg_rnn_relu(tensor *out__, tensor input, tensor hx, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional, int batch_first) { PROTECT( auto outputs__ = torch::rnn_relu(*input, *hx, of_carray_tensor(params_data, params_len), (bool)has_biases, num_layers, dropout, (bool)train, (bool)bidirectional, (bool)batch_first); out__[0] = new torch::Tensor(std::get<0>(outputs__)); @@ -6812,7 +6812,7 @@ int atg_rnn_relu(tensor *out__, tensor input, tensor hx, tensor *params_data, in return 1; } -int atg_rnn_relu1(tensor *out__, tensor data, tensor batch_sizes, tensor hx, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional) { +C_API int atg_rnn_relu1(tensor *out__, tensor data, tensor batch_sizes, tensor hx, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional) { PROTECT( auto outputs__ = torch::rnn_relu(*data, *batch_sizes, *hx, of_carray_tensor(params_data, params_len), (bool)has_biases, num_layers, dropout, (bool)train, (bool)bidirectional); out__[0] = new torch::Tensor(std::get<0>(outputs__)); @@ -6822,7 +6822,7 @@ int atg_rnn_relu1(tensor *out__, tensor data, tensor batch_sizes, tensor hx, ten return 1; } -int atg_rnn_relu_cell(tensor *out__, tensor input, tensor hx, tensor w_ih, tensor w_hh, tensor b_ih, tensor b_hh) { +C_API int atg_rnn_relu_cell(tensor *out__, tensor input, tensor hx, tensor w_ih, tensor w_hh, tensor b_ih, tensor b_hh) { PROTECT( auto outputs__ = torch::rnn_relu_cell(*input, *hx, *w_ih, *w_hh, (b_ih ? *b_ih : torch::Tensor()), (b_hh ? *b_hh : torch::Tensor())); out__[0] = new torch::Tensor(outputs__); @@ -6831,7 +6831,7 @@ int atg_rnn_relu_cell(tensor *out__, tensor input, tensor hx, tensor w_ih, tenso return 1; } -int atg_rnn_tanh(tensor *out__, tensor input, tensor hx, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional, int batch_first) { +C_API int atg_rnn_tanh(tensor *out__, tensor input, tensor hx, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional, int batch_first) { PROTECT( auto outputs__ = torch::rnn_tanh(*input, *hx, of_carray_tensor(params_data, params_len), (bool)has_biases, num_layers, dropout, (bool)train, (bool)bidirectional, (bool)batch_first); out__[0] = new torch::Tensor(std::get<0>(outputs__)); @@ -6841,7 +6841,7 @@ int atg_rnn_tanh(tensor *out__, tensor input, tensor hx, tensor *params_data, in return 1; } -int atg_rnn_tanh1(tensor *out__, tensor data, tensor batch_sizes, tensor hx, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional) { +C_API int atg_rnn_tanh1(tensor *out__, tensor data, tensor batch_sizes, tensor hx, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional) { PROTECT( auto outputs__ = torch::rnn_tanh(*data, *batch_sizes, *hx, of_carray_tensor(params_data, params_len), (bool)has_biases, num_layers, dropout, (bool)train, (bool)bidirectional); out__[0] = new torch::Tensor(std::get<0>(outputs__)); @@ -6851,7 +6851,7 @@ int atg_rnn_tanh1(tensor *out__, tensor data, tensor batch_sizes, tensor hx, ten return 1; } -int atg_rnn_tanh_cell(tensor *out__, tensor input, tensor hx, tensor w_ih, tensor w_hh, tensor b_ih, tensor b_hh) { +C_API int atg_rnn_tanh_cell(tensor *out__, tensor input, tensor hx, tensor w_ih, tensor w_hh, tensor b_ih, tensor b_hh) { PROTECT( auto outputs__ = torch::rnn_tanh_cell(*input, *hx, *w_ih, *w_hh, (b_ih ? *b_ih : torch::Tensor()), (b_hh ? *b_hh : torch::Tensor())); out__[0] = new torch::Tensor(outputs__); @@ -6860,7 +6860,7 @@ int atg_rnn_tanh_cell(tensor *out__, tensor input, tensor hx, tensor w_ih, tenso return 1; } -int atg_roll(tensor *out__, tensor self, int64_t *shifts_data, int shifts_len, int64_t *dims_data, int dims_len) { +C_API int atg_roll(tensor *out__, tensor self, int64_t *shifts_data, int shifts_len, int64_t *dims_data, int dims_len) { PROTECT( auto outputs__ = torch::roll(*self, torch::IntArrayRef(shifts_data, shifts_len), torch::IntArrayRef(dims_data, dims_len)); out__[0] = new torch::Tensor(outputs__); @@ -6869,7 +6869,7 @@ int atg_roll(tensor *out__, tensor self, int64_t *shifts_data, int shifts_len, i return 1; } -int atg_rot90(tensor *out__, tensor self, int64_t k, int64_t *dims_data, int dims_len) { +C_API int atg_rot90(tensor *out__, tensor self, int64_t k, int64_t *dims_data, int dims_len) { PROTECT( auto outputs__ = torch::rot90(*self, k, torch::IntArrayRef(dims_data, dims_len)); out__[0] = new torch::Tensor(outputs__); @@ -6878,7 +6878,7 @@ int atg_rot90(tensor *out__, tensor self, int64_t k, int64_t *dims_data, int dim return 1; } -int atg_round(tensor *out__, tensor self) { +C_API int atg_round(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::round(*self); out__[0] = new torch::Tensor(outputs__); @@ -6887,7 +6887,7 @@ int atg_round(tensor *out__, tensor self) { return 1; } -int atg_round_(tensor *out__, tensor self) { +C_API int atg_round_(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::round_(*self); out__[0] = new torch::Tensor(outputs__); @@ -6896,7 +6896,7 @@ int atg_round_(tensor *out__, tensor self) { return 1; } -int atg_round_out(tensor *out__, tensor out, tensor self) { +C_API int atg_round_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::round_out(*out, *self); out__[0] = new torch::Tensor(outputs__); @@ -6905,7 +6905,7 @@ int atg_round_out(tensor *out__, tensor out, tensor self) { return 1; } -int atg_rrelu(tensor *out__, tensor self, int training) { +C_API int atg_rrelu(tensor *out__, tensor self, int training) { PROTECT( auto outputs__ = torch::rrelu(*self, (bool)training); out__[0] = new torch::Tensor(outputs__); @@ -6914,7 +6914,7 @@ int atg_rrelu(tensor *out__, tensor self, int training) { return 1; } -int atg_rrelu_(tensor *out__, tensor self, int training) { +C_API int atg_rrelu_(tensor *out__, tensor self, int training) { PROTECT( auto outputs__ = torch::rrelu_(*self, (bool)training); out__[0] = new torch::Tensor(outputs__); @@ -6923,7 +6923,7 @@ int atg_rrelu_(tensor *out__, tensor self, int training) { return 1; } -int atg_rrelu_with_noise(tensor *out__, tensor self, tensor noise, int training) { +C_API int atg_rrelu_with_noise(tensor *out__, tensor self, tensor noise, int training) { PROTECT( auto outputs__ = torch::rrelu_with_noise(*self, *noise, (bool)training); out__[0] = new torch::Tensor(outputs__); @@ -6932,7 +6932,7 @@ int atg_rrelu_with_noise(tensor *out__, tensor self, tensor noise, int training) return 1; } -int atg_rrelu_with_noise_(tensor *out__, tensor self, tensor noise, int training) { +C_API int atg_rrelu_with_noise_(tensor *out__, tensor self, tensor noise, int training) { PROTECT( auto outputs__ = torch::rrelu_with_noise_(*self, *noise, (bool)training); out__[0] = new torch::Tensor(outputs__); @@ -6941,7 +6941,7 @@ int atg_rrelu_with_noise_(tensor *out__, tensor self, tensor noise, int training return 1; } -int atg_rrelu_with_noise_backward(tensor *out__, tensor grad_output, tensor self, tensor noise, scalar lower, scalar upper, int training) { +C_API int atg_rrelu_with_noise_backward(tensor *out__, tensor grad_output, tensor self, tensor noise, scalar lower, scalar upper, int training) { PROTECT( auto outputs__ = torch::rrelu_with_noise_backward(*grad_output, *self, *noise, *lower, *upper, (bool)training); out__[0] = new torch::Tensor(outputs__); @@ -6950,7 +6950,7 @@ int atg_rrelu_with_noise_backward(tensor *out__, tensor grad_output, tensor self return 1; } -int atg_rrelu_with_noise_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self, tensor noise, scalar lower, scalar upper, int training) { +C_API int atg_rrelu_with_noise_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self, tensor noise, scalar lower, scalar upper, int training) { PROTECT( auto outputs__ = torch::rrelu_with_noise_backward_out(*grad_input, *grad_output, *self, *noise, *lower, *upper, (bool)training); out__[0] = new torch::Tensor(outputs__); @@ -6959,7 +6959,7 @@ int atg_rrelu_with_noise_backward_out(tensor *out__, tensor grad_input, tensor g return 1; } -int atg_rrelu_with_noise_out(tensor *out__, tensor out, tensor self, tensor noise, int training) { +C_API int atg_rrelu_with_noise_out(tensor *out__, tensor out, tensor self, tensor noise, int training) { PROTECT( auto outputs__ = torch::rrelu_with_noise_out(*out, *self, *noise, (bool)training); out__[0] = new torch::Tensor(outputs__); @@ -6968,7 +6968,7 @@ int atg_rrelu_with_noise_out(tensor *out__, tensor out, tensor self, tensor nois return 1; } -int atg_rsqrt(tensor *out__, tensor self) { +C_API int atg_rsqrt(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::rsqrt(*self); out__[0] = new torch::Tensor(outputs__); @@ -6977,7 +6977,7 @@ int atg_rsqrt(tensor *out__, tensor self) { return 1; } -int atg_rsqrt_(tensor *out__, tensor self) { +C_API int atg_rsqrt_(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::rsqrt_(*self); out__[0] = new torch::Tensor(outputs__); @@ -6986,7 +6986,7 @@ int atg_rsqrt_(tensor *out__, tensor self) { return 1; } -int atg_rsqrt_out(tensor *out__, tensor out, tensor self) { +C_API int atg_rsqrt_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::rsqrt_out(*out, *self); out__[0] = new torch::Tensor(outputs__); @@ -6995,7 +6995,7 @@ int atg_rsqrt_out(tensor *out__, tensor out, tensor self) { return 1; } -int atg_rsub(tensor *out__, tensor self, tensor other) { +C_API int atg_rsub(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = torch::rsub(*self, *other); out__[0] = new torch::Tensor(outputs__); @@ -7004,7 +7004,7 @@ int atg_rsub(tensor *out__, tensor self, tensor other) { return 1; } -int atg_rsub1(tensor *out__, tensor self, scalar other) { +C_API int atg_rsub1(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = torch::rsub(*self, *other); out__[0] = new torch::Tensor(outputs__); @@ -7013,7 +7013,7 @@ int atg_rsub1(tensor *out__, tensor self, scalar other) { return 1; } -int atg_scalar_tensor(tensor *out__, scalar s, int options_kind, int options_device) { +C_API int atg_scalar_tensor(tensor *out__, scalar s, int options_kind, int options_device) { PROTECT( auto outputs__ = torch::scalar_tensor(*s, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); @@ -7022,7 +7022,7 @@ int atg_scalar_tensor(tensor *out__, scalar s, int options_kind, int options_dev return 1; } -int atg_scatter(tensor *out__, tensor self, int64_t dim, tensor index, tensor src) { +C_API int atg_scatter(tensor *out__, tensor self, int64_t dim, tensor index, tensor src) { PROTECT( auto outputs__ = torch::scatter(*self, dim, *index, *src); out__[0] = new torch::Tensor(outputs__); @@ -7031,7 +7031,7 @@ int atg_scatter(tensor *out__, tensor self, int64_t dim, tensor index, tensor sr return 1; } -int atg_scatter1(tensor *out__, tensor self, int64_t dim, tensor index, scalar value) { +C_API int atg_scatter1(tensor *out__, tensor self, int64_t dim, tensor index, scalar value) { PROTECT( auto outputs__ = torch::scatter(*self, dim, *index, *value); out__[0] = new torch::Tensor(outputs__); @@ -7040,7 +7040,7 @@ int atg_scatter1(tensor *out__, tensor self, int64_t dim, tensor index, scalar v return 1; } -int atg_scatter_(tensor *out__, tensor self, int64_t dim, tensor index, tensor src) { +C_API int atg_scatter_(tensor *out__, tensor self, int64_t dim, tensor index, tensor src) { PROTECT( auto outputs__ = self->scatter_(dim, *index, *src); out__[0] = new torch::Tensor(outputs__); @@ -7049,7 +7049,7 @@ int atg_scatter_(tensor *out__, tensor self, int64_t dim, tensor index, tensor s return 1; } -int atg_scatter_1(tensor *out__, tensor self, int64_t dim, tensor index, scalar value) { +C_API int atg_scatter_1(tensor *out__, tensor self, int64_t dim, tensor index, scalar value) { PROTECT( auto outputs__ = self->scatter_(dim, *index, *value); out__[0] = new torch::Tensor(outputs__); @@ -7058,7 +7058,7 @@ int atg_scatter_1(tensor *out__, tensor self, int64_t dim, tensor index, scalar return 1; } -int atg_scatter_add(tensor *out__, tensor self, int64_t dim, tensor index, tensor src) { +C_API int atg_scatter_add(tensor *out__, tensor self, int64_t dim, tensor index, tensor src) { PROTECT( auto outputs__ = torch::scatter_add(*self, dim, *index, *src); out__[0] = new torch::Tensor(outputs__); @@ -7067,7 +7067,7 @@ int atg_scatter_add(tensor *out__, tensor self, int64_t dim, tensor index, tenso return 1; } -int atg_scatter_add_(tensor *out__, tensor self, int64_t dim, tensor index, tensor src) { +C_API int atg_scatter_add_(tensor *out__, tensor self, int64_t dim, tensor index, tensor src) { PROTECT( auto outputs__ = self->scatter_add_(dim, *index, *src); out__[0] = new torch::Tensor(outputs__); @@ -7076,7 +7076,7 @@ int atg_scatter_add_(tensor *out__, tensor self, int64_t dim, tensor index, tens return 1; } -int atg_select(tensor *out__, tensor self, int64_t dim, int64_t index) { +C_API int atg_select(tensor *out__, tensor self, int64_t dim, int64_t index) { PROTECT( auto outputs__ = torch::select(*self, dim, index); out__[0] = new torch::Tensor(outputs__); @@ -7085,7 +7085,7 @@ int atg_select(tensor *out__, tensor self, int64_t dim, int64_t index) { return 1; } -int atg_selu(tensor *out__, tensor self) { +C_API int atg_selu(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::selu(*self); out__[0] = new torch::Tensor(outputs__); @@ -7094,7 +7094,7 @@ int atg_selu(tensor *out__, tensor self) { return 1; } -int atg_selu_(tensor *out__, tensor self) { +C_API int atg_selu_(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::selu_(*self); out__[0] = new torch::Tensor(outputs__); @@ -7103,7 +7103,7 @@ int atg_selu_(tensor *out__, tensor self) { return 1; } -int atg_set_(tensor *out__, tensor self) { +C_API int atg_set_(tensor *out__, tensor self) { PROTECT( auto outputs__ = self->set_(); out__[0] = new torch::Tensor(outputs__); @@ -7112,7 +7112,7 @@ int atg_set_(tensor *out__, tensor self) { return 1; } -int atg_set_1(tensor *out__, tensor self, tensor source) { +C_API int atg_set_1(tensor *out__, tensor self, tensor source) { PROTECT( auto outputs__ = self->set_(*source); out__[0] = new torch::Tensor(outputs__); @@ -7121,7 +7121,7 @@ int atg_set_1(tensor *out__, tensor self, tensor source) { return 1; } -int atg_set_requires_grad(tensor *out__, tensor self, int r) { +C_API int atg_set_requires_grad(tensor *out__, tensor self, int r) { PROTECT( auto outputs__ = self->set_requires_grad((bool)r); out__[0] = new torch::Tensor(outputs__); @@ -7130,7 +7130,7 @@ int atg_set_requires_grad(tensor *out__, tensor self, int r) { return 1; } -int atg_sigmoid(tensor *out__, tensor self) { +C_API int atg_sigmoid(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::sigmoid(*self); out__[0] = new torch::Tensor(outputs__); @@ -7139,7 +7139,7 @@ int atg_sigmoid(tensor *out__, tensor self) { return 1; } -int atg_sigmoid_(tensor *out__, tensor self) { +C_API int atg_sigmoid_(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::sigmoid_(*self); out__[0] = new torch::Tensor(outputs__); @@ -7148,7 +7148,7 @@ int atg_sigmoid_(tensor *out__, tensor self) { return 1; } -int atg_sigmoid_backward(tensor *out__, tensor grad_output, tensor output) { +C_API int atg_sigmoid_backward(tensor *out__, tensor grad_output, tensor output) { PROTECT( auto outputs__ = torch::sigmoid_backward(*grad_output, *output); out__[0] = new torch::Tensor(outputs__); @@ -7157,7 +7157,7 @@ int atg_sigmoid_backward(tensor *out__, tensor grad_output, tensor output) { return 1; } -int atg_sigmoid_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor output) { +C_API int atg_sigmoid_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor output) { PROTECT( auto outputs__ = torch::sigmoid_backward_out(*grad_input, *grad_output, *output); out__[0] = new torch::Tensor(outputs__); @@ -7166,7 +7166,7 @@ int atg_sigmoid_backward_out(tensor *out__, tensor grad_input, tensor grad_outpu return 1; } -int atg_sigmoid_out(tensor *out__, tensor out, tensor self) { +C_API int atg_sigmoid_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::sigmoid_out(*out, *self); out__[0] = new torch::Tensor(outputs__); @@ -7175,7 +7175,7 @@ int atg_sigmoid_out(tensor *out__, tensor out, tensor self) { return 1; } -int atg_sign(tensor *out__, tensor self) { +C_API int atg_sign(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::sign(*self); out__[0] = new torch::Tensor(outputs__); @@ -7184,7 +7184,7 @@ int atg_sign(tensor *out__, tensor self) { return 1; } -int atg_sign_(tensor *out__, tensor self) { +C_API int atg_sign_(tensor *out__, tensor self) { PROTECT( auto outputs__ = self->sign_(); out__[0] = new torch::Tensor(outputs__); @@ -7193,7 +7193,7 @@ int atg_sign_(tensor *out__, tensor self) { return 1; } -int atg_sign_out(tensor *out__, tensor out, tensor self) { +C_API int atg_sign_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::sign_out(*out, *self); out__[0] = new torch::Tensor(outputs__); @@ -7202,7 +7202,7 @@ int atg_sign_out(tensor *out__, tensor out, tensor self) { return 1; } -int atg_sin(tensor *out__, tensor self) { +C_API int atg_sin(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::sin(*self); out__[0] = new torch::Tensor(outputs__); @@ -7211,7 +7211,7 @@ int atg_sin(tensor *out__, tensor self) { return 1; } -int atg_sin_(tensor *out__, tensor self) { +C_API int atg_sin_(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::sin_(*self); out__[0] = new torch::Tensor(outputs__); @@ -7220,7 +7220,7 @@ int atg_sin_(tensor *out__, tensor self) { return 1; } -int atg_sin_out(tensor *out__, tensor out, tensor self) { +C_API int atg_sin_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::sin_out(*out, *self); out__[0] = new torch::Tensor(outputs__); @@ -7229,7 +7229,7 @@ int atg_sin_out(tensor *out__, tensor out, tensor self) { return 1; } -int atg_sinh(tensor *out__, tensor self) { +C_API int atg_sinh(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::sinh(*self); out__[0] = new torch::Tensor(outputs__); @@ -7238,7 +7238,7 @@ int atg_sinh(tensor *out__, tensor self) { return 1; } -int atg_sinh_(tensor *out__, tensor self) { +C_API int atg_sinh_(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::sinh_(*self); out__[0] = new torch::Tensor(outputs__); @@ -7247,7 +7247,7 @@ int atg_sinh_(tensor *out__, tensor self) { return 1; } -int atg_sinh_out(tensor *out__, tensor out, tensor self) { +C_API int atg_sinh_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::sinh_out(*out, *self); out__[0] = new torch::Tensor(outputs__); @@ -7256,7 +7256,7 @@ int atg_sinh_out(tensor *out__, tensor out, tensor self) { return 1; } -int atg_slice(tensor *out__, tensor self, int64_t dim, int64_t start, int64_t end, int64_t step) { +C_API int atg_slice(tensor *out__, tensor self, int64_t dim, int64_t start, int64_t end, int64_t step) { PROTECT( auto outputs__ = torch::slice(*self, dim, start, end, step); out__[0] = new torch::Tensor(outputs__); @@ -7265,7 +7265,7 @@ int atg_slice(tensor *out__, tensor self, int64_t dim, int64_t start, int64_t en return 1; } -int atg_slogdet(tensor *out__, tensor self) { +C_API int atg_slogdet(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::slogdet(*self); out__[0] = new torch::Tensor(std::get<0>(outputs__)); @@ -7275,7 +7275,7 @@ int atg_slogdet(tensor *out__, tensor self) { return 1; } -int atg_slow_conv3d(tensor *out__, tensor self, tensor weight, int64_t *kernel_size_data, int kernel_size_len, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len) { +C_API int atg_slow_conv3d(tensor *out__, tensor self, tensor weight, int64_t *kernel_size_data, int kernel_size_len, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len) { PROTECT( auto outputs__ = torch::slow_conv3d(*self, *weight, torch::IntArrayRef(kernel_size_data, kernel_size_len), (bias ? *bias : torch::Tensor()), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len)); out__[0] = new torch::Tensor(outputs__); @@ -7284,7 +7284,7 @@ int atg_slow_conv3d(tensor *out__, tensor self, tensor weight, int64_t *kernel_s return 1; } -int atg_slow_conv3d_out(tensor *out__, tensor out, tensor self, tensor weight, int64_t *kernel_size_data, int kernel_size_len, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len) { +C_API int atg_slow_conv3d_out(tensor *out__, tensor out, tensor self, tensor weight, int64_t *kernel_size_data, int kernel_size_len, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len) { PROTECT( auto outputs__ = torch::slow_conv3d_out(*out, *self, *weight, torch::IntArrayRef(kernel_size_data, kernel_size_len), (bias ? *bias : torch::Tensor()), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len)); out__[0] = new torch::Tensor(outputs__); @@ -7293,7 +7293,7 @@ int atg_slow_conv3d_out(tensor *out__, tensor out, tensor self, tensor weight, i return 1; } -int atg_slow_conv_dilated2d(tensor *out__, tensor self, tensor weight, int64_t *kernel_size_data, int kernel_size_len, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len) { +C_API int atg_slow_conv_dilated2d(tensor *out__, tensor self, tensor weight, int64_t *kernel_size_data, int kernel_size_len, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len) { PROTECT( auto outputs__ = torch::slow_conv_dilated2d(*self, *weight, torch::IntArrayRef(kernel_size_data, kernel_size_len), (bias ? *bias : torch::Tensor()), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len)); out__[0] = new torch::Tensor(outputs__); @@ -7302,7 +7302,7 @@ int atg_slow_conv_dilated2d(tensor *out__, tensor self, tensor weight, int64_t * return 1; } -int atg_slow_conv_dilated3d(tensor *out__, tensor self, tensor weight, int64_t *kernel_size_data, int kernel_size_len, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len) { +C_API int atg_slow_conv_dilated3d(tensor *out__, tensor self, tensor weight, int64_t *kernel_size_data, int kernel_size_len, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len) { PROTECT( auto outputs__ = torch::slow_conv_dilated3d(*self, *weight, torch::IntArrayRef(kernel_size_data, kernel_size_len), (bias ? *bias : torch::Tensor()), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(dilation_data, dilation_len)); out__[0] = new torch::Tensor(outputs__); @@ -7311,7 +7311,7 @@ int atg_slow_conv_dilated3d(tensor *out__, tensor self, tensor weight, int64_t * return 1; } -int atg_slow_conv_transpose2d(tensor *out__, tensor self, tensor weight, int64_t *kernel_size_data, int kernel_size_len, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *output_padding_data, int output_padding_len, int64_t *dilation_data, int dilation_len) { +C_API int atg_slow_conv_transpose2d(tensor *out__, tensor self, tensor weight, int64_t *kernel_size_data, int kernel_size_len, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *output_padding_data, int output_padding_len, int64_t *dilation_data, int dilation_len) { PROTECT( auto outputs__ = torch::slow_conv_transpose2d(*self, *weight, torch::IntArrayRef(kernel_size_data, kernel_size_len), (bias ? *bias : torch::Tensor()), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(output_padding_data, output_padding_len), torch::IntArrayRef(dilation_data, dilation_len)); out__[0] = new torch::Tensor(outputs__); @@ -7320,7 +7320,7 @@ int atg_slow_conv_transpose2d(tensor *out__, tensor self, tensor weight, int64_t return 1; } -int atg_slow_conv_transpose2d_out(tensor *out__, tensor out, tensor self, tensor weight, int64_t *kernel_size_data, int kernel_size_len, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *output_padding_data, int output_padding_len, int64_t *dilation_data, int dilation_len) { +C_API int atg_slow_conv_transpose2d_out(tensor *out__, tensor out, tensor self, tensor weight, int64_t *kernel_size_data, int kernel_size_len, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *output_padding_data, int output_padding_len, int64_t *dilation_data, int dilation_len) { PROTECT( auto outputs__ = torch::slow_conv_transpose2d_out(*out, *self, *weight, torch::IntArrayRef(kernel_size_data, kernel_size_len), (bias ? *bias : torch::Tensor()), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(output_padding_data, output_padding_len), torch::IntArrayRef(dilation_data, dilation_len)); out__[0] = new torch::Tensor(outputs__); @@ -7329,7 +7329,7 @@ int atg_slow_conv_transpose2d_out(tensor *out__, tensor out, tensor self, tensor return 1; } -int atg_slow_conv_transpose3d(tensor *out__, tensor self, tensor weight, int64_t *kernel_size_data, int kernel_size_len, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *output_padding_data, int output_padding_len, int64_t *dilation_data, int dilation_len) { +C_API int atg_slow_conv_transpose3d(tensor *out__, tensor self, tensor weight, int64_t *kernel_size_data, int kernel_size_len, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *output_padding_data, int output_padding_len, int64_t *dilation_data, int dilation_len) { PROTECT( auto outputs__ = torch::slow_conv_transpose3d(*self, *weight, torch::IntArrayRef(kernel_size_data, kernel_size_len), (bias ? *bias : torch::Tensor()), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(output_padding_data, output_padding_len), torch::IntArrayRef(dilation_data, dilation_len)); out__[0] = new torch::Tensor(outputs__); @@ -7338,7 +7338,7 @@ int atg_slow_conv_transpose3d(tensor *out__, tensor self, tensor weight, int64_t return 1; } -int atg_slow_conv_transpose3d_out(tensor *out__, tensor out, tensor self, tensor weight, int64_t *kernel_size_data, int kernel_size_len, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *output_padding_data, int output_padding_len, int64_t *dilation_data, int dilation_len) { +C_API int atg_slow_conv_transpose3d_out(tensor *out__, tensor out, tensor self, tensor weight, int64_t *kernel_size_data, int kernel_size_len, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *output_padding_data, int output_padding_len, int64_t *dilation_data, int dilation_len) { PROTECT( auto outputs__ = torch::slow_conv_transpose3d_out(*out, *self, *weight, torch::IntArrayRef(kernel_size_data, kernel_size_len), (bias ? *bias : torch::Tensor()), torch::IntArrayRef(stride_data, stride_len), torch::IntArrayRef(padding_data, padding_len), torch::IntArrayRef(output_padding_data, output_padding_len), torch::IntArrayRef(dilation_data, dilation_len)); out__[0] = new torch::Tensor(outputs__); @@ -7347,7 +7347,7 @@ int atg_slow_conv_transpose3d_out(tensor *out__, tensor out, tensor self, tensor return 1; } -int atg_smm(tensor *out__, tensor self, tensor mat2) { +C_API int atg_smm(tensor *out__, tensor self, tensor mat2) { PROTECT( auto outputs__ = torch::smm(*self, *mat2); out__[0] = new torch::Tensor(outputs__); @@ -7356,7 +7356,7 @@ int atg_smm(tensor *out__, tensor self, tensor mat2) { return 1; } -int atg_smooth_l1_loss(tensor *out__, tensor self, tensor target, int64_t reduction) { +C_API int atg_smooth_l1_loss(tensor *out__, tensor self, tensor target, int64_t reduction) { PROTECT( auto outputs__ = torch::smooth_l1_loss(*self, *target, reduction); out__[0] = new torch::Tensor(outputs__); @@ -7365,7 +7365,7 @@ int atg_smooth_l1_loss(tensor *out__, tensor self, tensor target, int64_t reduct return 1; } -int atg_smooth_l1_loss_backward(tensor *out__, tensor grad_output, tensor self, tensor target, int64_t reduction) { +C_API int atg_smooth_l1_loss_backward(tensor *out__, tensor grad_output, tensor self, tensor target, int64_t reduction) { PROTECT( auto outputs__ = torch::smooth_l1_loss_backward(*grad_output, *self, *target, reduction); out__[0] = new torch::Tensor(outputs__); @@ -7374,7 +7374,7 @@ int atg_smooth_l1_loss_backward(tensor *out__, tensor grad_output, tensor self, return 1; } -int atg_smooth_l1_loss_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self, tensor target, int64_t reduction) { +C_API int atg_smooth_l1_loss_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self, tensor target, int64_t reduction) { PROTECT( auto outputs__ = torch::smooth_l1_loss_backward_out(*grad_input, *grad_output, *self, *target, reduction); out__[0] = new torch::Tensor(outputs__); @@ -7383,7 +7383,7 @@ int atg_smooth_l1_loss_backward_out(tensor *out__, tensor grad_input, tensor gra return 1; } -int atg_smooth_l1_loss_out(tensor *out__, tensor out, tensor self, tensor target, int64_t reduction) { +C_API int atg_smooth_l1_loss_out(tensor *out__, tensor out, tensor self, tensor target, int64_t reduction) { PROTECT( auto outputs__ = torch::smooth_l1_loss_out(*out, *self, *target, reduction); out__[0] = new torch::Tensor(outputs__); @@ -7392,7 +7392,7 @@ int atg_smooth_l1_loss_out(tensor *out__, tensor out, tensor self, tensor target return 1; } -int atg_soft_margin_loss(tensor *out__, tensor self, tensor target, int64_t reduction) { +C_API int atg_soft_margin_loss(tensor *out__, tensor self, tensor target, int64_t reduction) { PROTECT( auto outputs__ = torch::soft_margin_loss(*self, *target, reduction); out__[0] = new torch::Tensor(outputs__); @@ -7401,7 +7401,7 @@ int atg_soft_margin_loss(tensor *out__, tensor self, tensor target, int64_t redu return 1; } -int atg_soft_margin_loss_backward(tensor *out__, tensor grad_output, tensor self, tensor target, int64_t reduction) { +C_API int atg_soft_margin_loss_backward(tensor *out__, tensor grad_output, tensor self, tensor target, int64_t reduction) { PROTECT( auto outputs__ = torch::soft_margin_loss_backward(*grad_output, *self, *target, reduction); out__[0] = new torch::Tensor(outputs__); @@ -7410,7 +7410,7 @@ int atg_soft_margin_loss_backward(tensor *out__, tensor grad_output, tensor self return 1; } -int atg_soft_margin_loss_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self, tensor target, int64_t reduction) { +C_API int atg_soft_margin_loss_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self, tensor target, int64_t reduction) { PROTECT( auto outputs__ = torch::soft_margin_loss_backward_out(*grad_input, *grad_output, *self, *target, reduction); out__[0] = new torch::Tensor(outputs__); @@ -7419,7 +7419,7 @@ int atg_soft_margin_loss_backward_out(tensor *out__, tensor grad_input, tensor g return 1; } -int atg_soft_margin_loss_out(tensor *out__, tensor out, tensor self, tensor target, int64_t reduction) { +C_API int atg_soft_margin_loss_out(tensor *out__, tensor out, tensor self, tensor target, int64_t reduction) { PROTECT( auto outputs__ = torch::soft_margin_loss_out(*out, *self, *target, reduction); out__[0] = new torch::Tensor(outputs__); @@ -7428,7 +7428,7 @@ int atg_soft_margin_loss_out(tensor *out__, tensor out, tensor self, tensor targ return 1; } -int atg_softmax(tensor *out__, tensor self, int64_t dim, int dtype) { +C_API int atg_softmax(tensor *out__, tensor self, int64_t dim, int dtype) { PROTECT( auto outputs__ = torch::softmax(*self, dim, torch::ScalarType(dtype)); out__[0] = new torch::Tensor(outputs__); @@ -7437,7 +7437,7 @@ int atg_softmax(tensor *out__, tensor self, int64_t dim, int dtype) { return 1; } -int atg_softplus(tensor *out__, tensor self) { +C_API int atg_softplus(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::softplus(*self); out__[0] = new torch::Tensor(outputs__); @@ -7446,7 +7446,7 @@ int atg_softplus(tensor *out__, tensor self) { return 1; } -int atg_softplus_backward(tensor *out__, tensor grad_output, tensor self, scalar beta, scalar threshold, tensor output) { +C_API int atg_softplus_backward(tensor *out__, tensor grad_output, tensor self, scalar beta, scalar threshold, tensor output) { PROTECT( auto outputs__ = torch::softplus_backward(*grad_output, *self, *beta, *threshold, *output); out__[0] = new torch::Tensor(outputs__); @@ -7455,7 +7455,7 @@ int atg_softplus_backward(tensor *out__, tensor grad_output, tensor self, scalar return 1; } -int atg_softplus_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self, scalar beta, scalar threshold, tensor output) { +C_API int atg_softplus_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self, scalar beta, scalar threshold, tensor output) { PROTECT( auto outputs__ = torch::softplus_backward_out(*grad_input, *grad_output, *self, *beta, *threshold, *output); out__[0] = new torch::Tensor(outputs__); @@ -7464,7 +7464,7 @@ int atg_softplus_backward_out(tensor *out__, tensor grad_input, tensor grad_outp return 1; } -int atg_softplus_out(tensor *out__, tensor out, tensor self) { +C_API int atg_softplus_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::softplus_out(*out, *self); out__[0] = new torch::Tensor(outputs__); @@ -7473,7 +7473,7 @@ int atg_softplus_out(tensor *out__, tensor out, tensor self) { return 1; } -int atg_softshrink(tensor *out__, tensor self) { +C_API int atg_softshrink(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::softshrink(*self); out__[0] = new torch::Tensor(outputs__); @@ -7482,7 +7482,7 @@ int atg_softshrink(tensor *out__, tensor self) { return 1; } -int atg_softshrink_backward(tensor *out__, tensor grad_output, tensor self, scalar lambd) { +C_API int atg_softshrink_backward(tensor *out__, tensor grad_output, tensor self, scalar lambd) { PROTECT( auto outputs__ = torch::softshrink_backward(*grad_output, *self, *lambd); out__[0] = new torch::Tensor(outputs__); @@ -7491,7 +7491,7 @@ int atg_softshrink_backward(tensor *out__, tensor grad_output, tensor self, scal return 1; } -int atg_softshrink_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self, scalar lambd) { +C_API int atg_softshrink_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor self, scalar lambd) { PROTECT( auto outputs__ = torch::softshrink_backward_out(*grad_input, *grad_output, *self, *lambd); out__[0] = new torch::Tensor(outputs__); @@ -7500,7 +7500,7 @@ int atg_softshrink_backward_out(tensor *out__, tensor grad_input, tensor grad_ou return 1; } -int atg_softshrink_out(tensor *out__, tensor out, tensor self) { +C_API int atg_softshrink_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::softshrink_out(*out, *self); out__[0] = new torch::Tensor(outputs__); @@ -7509,7 +7509,7 @@ int atg_softshrink_out(tensor *out__, tensor out, tensor self) { return 1; } -int atg_solve(tensor *out__, tensor self, tensor A) { +C_API int atg_solve(tensor *out__, tensor self, tensor A) { PROTECT( auto outputs__ = torch::solve(*self, *A); out__[0] = new torch::Tensor(std::get<0>(outputs__)); @@ -7519,7 +7519,7 @@ int atg_solve(tensor *out__, tensor self, tensor A) { return 1; } -int atg_solve_out(tensor *out__, tensor solution, tensor lu, tensor self, tensor A) { +C_API int atg_solve_out(tensor *out__, tensor solution, tensor lu, tensor self, tensor A) { PROTECT( auto outputs__ = torch::solve_out(*solution, *lu, *self, *A); out__[0] = new torch::Tensor(std::get<0>(outputs__)); @@ -7529,7 +7529,7 @@ int atg_solve_out(tensor *out__, tensor solution, tensor lu, tensor self, tensor return 1; } -int atg_sort(tensor *out__, tensor self, int64_t dim, int descending) { +C_API int atg_sort(tensor *out__, tensor self, int64_t dim, int descending) { PROTECT( auto outputs__ = torch::sort(*self, dim, (bool)descending); out__[0] = new torch::Tensor(std::get<0>(outputs__)); @@ -7539,7 +7539,7 @@ int atg_sort(tensor *out__, tensor self, int64_t dim, int descending) { return 1; } -int atg_sort_out(tensor *out__, tensor values, tensor indices, tensor self, int64_t dim, int descending) { +C_API int atg_sort_out(tensor *out__, tensor values, tensor indices, tensor self, int64_t dim, int descending) { PROTECT( auto outputs__ = torch::sort_out(*values, *indices, *self, dim, (bool)descending); out__[0] = new torch::Tensor(std::get<0>(outputs__)); @@ -7549,7 +7549,7 @@ int atg_sort_out(tensor *out__, tensor values, tensor indices, tensor self, int6 return 1; } -int atg_sparse_coo_tensor(tensor *out__, int64_t *size_data, int size_len, int options_kind, int options_device) { +C_API int atg_sparse_coo_tensor(tensor *out__, int64_t *size_data, int size_len, int options_kind, int options_device) { PROTECT( auto outputs__ = torch::sparse_coo_tensor(torch::IntArrayRef(size_data, size_len), at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); @@ -7558,7 +7558,7 @@ int atg_sparse_coo_tensor(tensor *out__, int64_t *size_data, int size_len, int o return 1; } -int atg_sparse_coo_tensor1(tensor *out__, tensor indices, tensor values, int options_kind, int options_device) { +C_API int atg_sparse_coo_tensor1(tensor *out__, tensor indices, tensor values, int options_kind, int options_device) { PROTECT( auto outputs__ = torch::sparse_coo_tensor(*indices, *values, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); @@ -7567,7 +7567,7 @@ int atg_sparse_coo_tensor1(tensor *out__, tensor indices, tensor values, int opt return 1; } -int atg_sparse_coo_tensor2(tensor *out__, tensor indices, tensor values, int64_t *size_data, int size_len, int options_kind, int options_device) { +C_API int atg_sparse_coo_tensor2(tensor *out__, tensor indices, tensor values, int64_t *size_data, int size_len, int options_kind, int options_device) { PROTECT( auto outputs__ = torch::sparse_coo_tensor(*indices, *values, torch::IntArrayRef(size_data, size_len), at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); @@ -7576,7 +7576,7 @@ int atg_sparse_coo_tensor2(tensor *out__, tensor indices, tensor values, int64_t return 1; } -int atg_sparse_mask(tensor *out__, tensor self, tensor mask) { +C_API int atg_sparse_mask(tensor *out__, tensor self, tensor mask) { PROTECT( auto outputs__ = self->sparse_mask(*mask); out__[0] = new torch::Tensor(outputs__); @@ -7585,7 +7585,7 @@ int atg_sparse_mask(tensor *out__, tensor self, tensor mask) { return 1; } -int atg_sparse_resize_(tensor *out__, tensor self, int64_t *size_data, int size_len, int64_t sparse_dim, int64_t dense_dim) { +C_API int atg_sparse_resize_(tensor *out__, tensor self, int64_t *size_data, int size_len, int64_t sparse_dim, int64_t dense_dim) { PROTECT( auto outputs__ = self->sparse_resize_(torch::IntArrayRef(size_data, size_len), sparse_dim, dense_dim); out__[0] = new torch::Tensor(outputs__); @@ -7594,7 +7594,7 @@ int atg_sparse_resize_(tensor *out__, tensor self, int64_t *size_data, int size_ return 1; } -int atg_sparse_resize_and_clear_(tensor *out__, tensor self, int64_t *size_data, int size_len, int64_t sparse_dim, int64_t dense_dim) { +C_API int atg_sparse_resize_and_clear_(tensor *out__, tensor self, int64_t *size_data, int size_len, int64_t sparse_dim, int64_t dense_dim) { PROTECT( auto outputs__ = self->sparse_resize_and_clear_(torch::IntArrayRef(size_data, size_len), sparse_dim, dense_dim); out__[0] = new torch::Tensor(outputs__); @@ -7603,7 +7603,7 @@ int atg_sparse_resize_and_clear_(tensor *out__, tensor self, int64_t *size_data, return 1; } -int atg_split(tensor *out__, tensor self, int64_t split_size, int64_t dim) { +C_API int atg_split(tensor *out__, tensor self, int64_t split_size, int64_t dim) { PROTECT( auto outputs__ = torch::split(*self, split_size, dim); int sz = outputs__.size(); @@ -7617,7 +7617,7 @@ int atg_split(tensor *out__, tensor self, int64_t split_size, int64_t dim) { return 1; } -int atg_split_with_sizes(tensor *out__, tensor self, int64_t *split_sizes_data, int split_sizes_len, int64_t dim) { +C_API int atg_split_with_sizes(tensor *out__, tensor self, int64_t *split_sizes_data, int split_sizes_len, int64_t dim) { PROTECT( auto outputs__ = torch::split_with_sizes(*self, torch::IntArrayRef(split_sizes_data, split_sizes_len), dim); int sz = outputs__.size(); @@ -7631,7 +7631,7 @@ int atg_split_with_sizes(tensor *out__, tensor self, int64_t *split_sizes_data, return 1; } -int atg_sqrt(tensor *out__, tensor self) { +C_API int atg_sqrt(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::sqrt(*self); out__[0] = new torch::Tensor(outputs__); @@ -7640,7 +7640,7 @@ int atg_sqrt(tensor *out__, tensor self) { return 1; } -int atg_sqrt_(tensor *out__, tensor self) { +C_API int atg_sqrt_(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::sqrt_(*self); out__[0] = new torch::Tensor(outputs__); @@ -7649,7 +7649,7 @@ int atg_sqrt_(tensor *out__, tensor self) { return 1; } -int atg_sqrt_out(tensor *out__, tensor out, tensor self) { +C_API int atg_sqrt_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::sqrt_out(*out, *self); out__[0] = new torch::Tensor(outputs__); @@ -7658,7 +7658,7 @@ int atg_sqrt_out(tensor *out__, tensor out, tensor self) { return 1; } -int atg_squeeze(tensor *out__, tensor self) { +C_API int atg_squeeze(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::squeeze(*self); out__[0] = new torch::Tensor(outputs__); @@ -7667,7 +7667,7 @@ int atg_squeeze(tensor *out__, tensor self) { return 1; } -int atg_squeeze1(tensor *out__, tensor self, int64_t dim) { +C_API int atg_squeeze1(tensor *out__, tensor self, int64_t dim) { PROTECT( auto outputs__ = torch::squeeze(*self, dim); out__[0] = new torch::Tensor(outputs__); @@ -7676,7 +7676,7 @@ int atg_squeeze1(tensor *out__, tensor self, int64_t dim) { return 1; } -int atg_squeeze_(tensor *out__, tensor self) { +C_API int atg_squeeze_(tensor *out__, tensor self) { PROTECT( auto outputs__ = self->squeeze_(); out__[0] = new torch::Tensor(outputs__); @@ -7685,7 +7685,7 @@ int atg_squeeze_(tensor *out__, tensor self) { return 1; } -int atg_squeeze_1(tensor *out__, tensor self, int64_t dim) { +C_API int atg_squeeze_1(tensor *out__, tensor self, int64_t dim) { PROTECT( auto outputs__ = self->squeeze_(dim); out__[0] = new torch::Tensor(outputs__); @@ -7694,7 +7694,7 @@ int atg_squeeze_1(tensor *out__, tensor self, int64_t dim) { return 1; } -int atg_sspaddmm(tensor *out__, tensor self, tensor mat1, tensor mat2) { +C_API int atg_sspaddmm(tensor *out__, tensor self, tensor mat1, tensor mat2) { PROTECT( auto outputs__ = torch::sspaddmm(*self, *mat1, *mat2); out__[0] = new torch::Tensor(outputs__); @@ -7703,7 +7703,7 @@ int atg_sspaddmm(tensor *out__, tensor self, tensor mat1, tensor mat2) { return 1; } -int atg_sspaddmm_out(tensor *out__, tensor out, tensor self, tensor mat1, tensor mat2) { +C_API int atg_sspaddmm_out(tensor *out__, tensor out, tensor self, tensor mat1, tensor mat2) { PROTECT( auto outputs__ = torch::sspaddmm_out(*out, *self, *mat1, *mat2); out__[0] = new torch::Tensor(outputs__); @@ -7712,7 +7712,7 @@ int atg_sspaddmm_out(tensor *out__, tensor out, tensor self, tensor mat1, tensor return 1; } -int atg_stack(tensor *out__, tensor *tensors_data, int tensors_len, int64_t dim) { +C_API int atg_stack(tensor *out__, tensor *tensors_data, int tensors_len, int64_t dim) { PROTECT( auto outputs__ = torch::stack(of_carray_tensor(tensors_data, tensors_len), dim); out__[0] = new torch::Tensor(outputs__); @@ -7721,7 +7721,7 @@ int atg_stack(tensor *out__, tensor *tensors_data, int tensors_len, int64_t dim) return 1; } -int atg_stack_out(tensor *out__, tensor out, tensor *tensors_data, int tensors_len, int64_t dim) { +C_API int atg_stack_out(tensor *out__, tensor out, tensor *tensors_data, int tensors_len, int64_t dim) { PROTECT( auto outputs__ = torch::stack_out(*out, of_carray_tensor(tensors_data, tensors_len), dim); out__[0] = new torch::Tensor(outputs__); @@ -7730,7 +7730,7 @@ int atg_stack_out(tensor *out__, tensor out, tensor *tensors_data, int tensors_l return 1; } -int atg_std(tensor *out__, tensor self, int unbiased) { +C_API int atg_std(tensor *out__, tensor self, int unbiased) { PROTECT( auto outputs__ = torch::std(*self, (bool)unbiased); out__[0] = new torch::Tensor(outputs__); @@ -7739,7 +7739,7 @@ int atg_std(tensor *out__, tensor self, int unbiased) { return 1; } -int atg_std1(tensor *out__, tensor self, int64_t *dim_data, int dim_len, int unbiased, int keepdim) { +C_API int atg_std1(tensor *out__, tensor self, int64_t *dim_data, int dim_len, int unbiased, int keepdim) { PROTECT( auto outputs__ = torch::std(*self, torch::IntArrayRef(dim_data, dim_len), (bool)unbiased, (bool)keepdim); out__[0] = new torch::Tensor(outputs__); @@ -7748,7 +7748,7 @@ int atg_std1(tensor *out__, tensor self, int64_t *dim_data, int dim_len, int unb return 1; } -int atg_std_mean(tensor *out__, tensor self, int unbiased) { +C_API int atg_std_mean(tensor *out__, tensor self, int unbiased) { PROTECT( auto outputs__ = torch::std_mean(*self, (bool)unbiased); out__[0] = new torch::Tensor(std::get<0>(outputs__)); @@ -7758,7 +7758,7 @@ int atg_std_mean(tensor *out__, tensor self, int unbiased) { return 1; } -int atg_std_mean1(tensor *out__, tensor self, int64_t *dim_data, int dim_len, int unbiased, int keepdim) { +C_API int atg_std_mean1(tensor *out__, tensor self, int64_t *dim_data, int dim_len, int unbiased, int keepdim) { PROTECT( auto outputs__ = torch::std_mean(*self, torch::IntArrayRef(dim_data, dim_len), (bool)unbiased, (bool)keepdim); out__[0] = new torch::Tensor(std::get<0>(outputs__)); @@ -7768,7 +7768,7 @@ int atg_std_mean1(tensor *out__, tensor self, int64_t *dim_data, int dim_len, in return 1; } -int atg_std_out(tensor *out__, tensor out, tensor self, int64_t *dim_data, int dim_len, int unbiased, int keepdim) { +C_API int atg_std_out(tensor *out__, tensor out, tensor self, int64_t *dim_data, int dim_len, int unbiased, int keepdim) { PROTECT( auto outputs__ = torch::std_out(*out, *self, torch::IntArrayRef(dim_data, dim_len), (bool)unbiased, (bool)keepdim); out__[0] = new torch::Tensor(outputs__); @@ -7777,7 +7777,7 @@ int atg_std_out(tensor *out__, tensor out, tensor self, int64_t *dim_data, int d return 1; } -int atg_stft(tensor *out__, tensor self, int64_t n_fft, int64_t hop_length, int64_t win_length, tensor window, int normalized, int onesided) { +C_API int atg_stft(tensor *out__, tensor self, int64_t n_fft, int64_t hop_length, int64_t win_length, tensor window, int normalized, int onesided) { PROTECT( auto outputs__ = torch::stft(*self, n_fft, hop_length, win_length, (window ? *window : torch::Tensor()), (bool)normalized, (bool)onesided); out__[0] = new torch::Tensor(outputs__); @@ -7786,7 +7786,7 @@ int atg_stft(tensor *out__, tensor self, int64_t n_fft, int64_t hop_length, int6 return 1; } -int atg_sub(tensor *out__, tensor self, tensor other) { +C_API int atg_sub(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = torch::sub(*self, *other); out__[0] = new torch::Tensor(outputs__); @@ -7795,7 +7795,7 @@ int atg_sub(tensor *out__, tensor self, tensor other) { return 1; } -int atg_sub1(tensor *out__, tensor self, scalar other) { +C_API int atg_sub1(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = torch::sub(*self, *other); out__[0] = new torch::Tensor(outputs__); @@ -7804,7 +7804,7 @@ int atg_sub1(tensor *out__, tensor self, scalar other) { return 1; } -int atg_sub_(tensor *out__, tensor self, tensor other) { +C_API int atg_sub_(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = self->sub_(*other); out__[0] = new torch::Tensor(outputs__); @@ -7813,7 +7813,7 @@ int atg_sub_(tensor *out__, tensor self, tensor other) { return 1; } -int atg_sub_1(tensor *out__, tensor self, scalar other) { +C_API int atg_sub_1(tensor *out__, tensor self, scalar other) { PROTECT( auto outputs__ = self->sub_(*other); out__[0] = new torch::Tensor(outputs__); @@ -7822,7 +7822,7 @@ int atg_sub_1(tensor *out__, tensor self, scalar other) { return 1; } -int atg_sub_out(tensor *out__, tensor out, tensor self, tensor other) { +C_API int atg_sub_out(tensor *out__, tensor out, tensor self, tensor other) { PROTECT( auto outputs__ = torch::sub_out(*out, *self, *other); out__[0] = new torch::Tensor(outputs__); @@ -7831,7 +7831,7 @@ int atg_sub_out(tensor *out__, tensor out, tensor self, tensor other) { return 1; } -int atg_sum(tensor *out__, tensor self, int dtype) { +C_API int atg_sum(tensor *out__, tensor self, int dtype) { PROTECT( auto outputs__ = torch::sum(*self, torch::ScalarType(dtype)); out__[0] = new torch::Tensor(outputs__); @@ -7840,7 +7840,7 @@ int atg_sum(tensor *out__, tensor self, int dtype) { return 1; } -int atg_sum1(tensor *out__, tensor self, int64_t *dim_data, int dim_len, int keepdim, int dtype) { +C_API int atg_sum1(tensor *out__, tensor self, int64_t *dim_data, int dim_len, int keepdim, int dtype) { PROTECT( auto outputs__ = torch::sum(*self, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim, torch::ScalarType(dtype)); out__[0] = new torch::Tensor(outputs__); @@ -7849,7 +7849,7 @@ int atg_sum1(tensor *out__, tensor self, int64_t *dim_data, int dim_len, int kee return 1; } -int atg_sum_out(tensor *out__, tensor out, tensor self, int64_t *dim_data, int dim_len, int keepdim, int dtype) { +C_API int atg_sum_out(tensor *out__, tensor out, tensor self, int64_t *dim_data, int dim_len, int keepdim, int dtype) { PROTECT( auto outputs__ = torch::sum_out(*out, *self, torch::IntArrayRef(dim_data, dim_len), (bool)keepdim, torch::ScalarType(dtype)); out__[0] = new torch::Tensor(outputs__); @@ -7858,7 +7858,7 @@ int atg_sum_out(tensor *out__, tensor out, tensor self, int64_t *dim_data, int d return 1; } -int atg_sum_to_size(tensor *out__, tensor self, int64_t *size_data, int size_len) { +C_API int atg_sum_to_size(tensor *out__, tensor self, int64_t *size_data, int size_len) { PROTECT( auto outputs__ = self->sum_to_size(torch::IntArrayRef(size_data, size_len)); out__[0] = new torch::Tensor(outputs__); @@ -7867,7 +7867,7 @@ int atg_sum_to_size(tensor *out__, tensor self, int64_t *size_data, int size_len return 1; } -int atg_svd(tensor *out__, tensor self, int some, int compute_uv) { +C_API int atg_svd(tensor *out__, tensor self, int some, int compute_uv) { PROTECT( auto outputs__ = torch::svd(*self, (bool)some, (bool)compute_uv); out__[0] = new torch::Tensor(std::get<0>(outputs__)); @@ -7878,7 +7878,7 @@ int atg_svd(tensor *out__, tensor self, int some, int compute_uv) { return 1; } -int atg_svd_out(tensor *out__, tensor U, tensor S, tensor V, tensor self, int some, int compute_uv) { +C_API int atg_svd_out(tensor *out__, tensor U, tensor S, tensor V, tensor self, int some, int compute_uv) { PROTECT( auto outputs__ = torch::svd_out(*U, *S, *V, *self, (bool)some, (bool)compute_uv); out__[0] = new torch::Tensor(std::get<0>(outputs__)); @@ -7889,7 +7889,7 @@ int atg_svd_out(tensor *out__, tensor U, tensor S, tensor V, tensor self, int so return 1; } -int atg_symeig(tensor *out__, tensor self, int eigenvectors, int upper) { +C_API int atg_symeig(tensor *out__, tensor self, int eigenvectors, int upper) { PROTECT( auto outputs__ = torch::symeig(*self, (bool)eigenvectors, (bool)upper); out__[0] = new torch::Tensor(std::get<0>(outputs__)); @@ -7899,7 +7899,7 @@ int atg_symeig(tensor *out__, tensor self, int eigenvectors, int upper) { return 1; } -int atg_symeig_out(tensor *out__, tensor e, tensor V, tensor self, int eigenvectors, int upper) { +C_API int atg_symeig_out(tensor *out__, tensor e, tensor V, tensor self, int eigenvectors, int upper) { PROTECT( auto outputs__ = torch::symeig_out(*e, *V, *self, (bool)eigenvectors, (bool)upper); out__[0] = new torch::Tensor(std::get<0>(outputs__)); @@ -7909,7 +7909,7 @@ int atg_symeig_out(tensor *out__, tensor e, tensor V, tensor self, int eigenvect return 1; } -int atg_t(tensor *out__, tensor self) { +C_API int atg_t(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::t(*self); out__[0] = new torch::Tensor(outputs__); @@ -7918,7 +7918,7 @@ int atg_t(tensor *out__, tensor self) { return 1; } -int atg_t_(tensor *out__, tensor self) { +C_API int atg_t_(tensor *out__, tensor self) { PROTECT( auto outputs__ = self->t_(); out__[0] = new torch::Tensor(outputs__); @@ -7927,7 +7927,7 @@ int atg_t_(tensor *out__, tensor self) { return 1; } -int atg_take(tensor *out__, tensor self, tensor index) { +C_API int atg_take(tensor *out__, tensor self, tensor index) { PROTECT( auto outputs__ = torch::take(*self, *index); out__[0] = new torch::Tensor(outputs__); @@ -7936,7 +7936,7 @@ int atg_take(tensor *out__, tensor self, tensor index) { return 1; } -int atg_take_out(tensor *out__, tensor out, tensor self, tensor index) { +C_API int atg_take_out(tensor *out__, tensor out, tensor self, tensor index) { PROTECT( auto outputs__ = torch::take_out(*out, *self, *index); out__[0] = new torch::Tensor(outputs__); @@ -7945,7 +7945,7 @@ int atg_take_out(tensor *out__, tensor out, tensor self, tensor index) { return 1; } -int atg_tan(tensor *out__, tensor self) { +C_API int atg_tan(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::tan(*self); out__[0] = new torch::Tensor(outputs__); @@ -7954,7 +7954,7 @@ int atg_tan(tensor *out__, tensor self) { return 1; } -int atg_tan_(tensor *out__, tensor self) { +C_API int atg_tan_(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::tan_(*self); out__[0] = new torch::Tensor(outputs__); @@ -7963,7 +7963,7 @@ int atg_tan_(tensor *out__, tensor self) { return 1; } -int atg_tan_out(tensor *out__, tensor out, tensor self) { +C_API int atg_tan_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::tan_out(*out, *self); out__[0] = new torch::Tensor(outputs__); @@ -7972,7 +7972,7 @@ int atg_tan_out(tensor *out__, tensor out, tensor self) { return 1; } -int atg_tanh(tensor *out__, tensor self) { +C_API int atg_tanh(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::tanh(*self); out__[0] = new torch::Tensor(outputs__); @@ -7981,7 +7981,7 @@ int atg_tanh(tensor *out__, tensor self) { return 1; } -int atg_tanh_(tensor *out__, tensor self) { +C_API int atg_tanh_(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::tanh_(*self); out__[0] = new torch::Tensor(outputs__); @@ -7990,7 +7990,7 @@ int atg_tanh_(tensor *out__, tensor self) { return 1; } -int atg_tanh_backward(tensor *out__, tensor grad_output, tensor output) { +C_API int atg_tanh_backward(tensor *out__, tensor grad_output, tensor output) { PROTECT( auto outputs__ = torch::tanh_backward(*grad_output, *output); out__[0] = new torch::Tensor(outputs__); @@ -7999,7 +7999,7 @@ int atg_tanh_backward(tensor *out__, tensor grad_output, tensor output) { return 1; } -int atg_tanh_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor output) { +C_API int atg_tanh_backward_out(tensor *out__, tensor grad_input, tensor grad_output, tensor output) { PROTECT( auto outputs__ = torch::tanh_backward_out(*grad_input, *grad_output, *output); out__[0] = new torch::Tensor(outputs__); @@ -8008,7 +8008,7 @@ int atg_tanh_backward_out(tensor *out__, tensor grad_input, tensor grad_output, return 1; } -int atg_tanh_out(tensor *out__, tensor out, tensor self) { +C_API int atg_tanh_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::tanh_out(*out, *self); out__[0] = new torch::Tensor(outputs__); @@ -8017,7 +8017,7 @@ int atg_tanh_out(tensor *out__, tensor out, tensor self) { return 1; } -int atg_tensordot(tensor *out__, tensor self, tensor other, int64_t *dims_self_data, int dims_self_len, int64_t *dims_other_data, int dims_other_len) { +C_API int atg_tensordot(tensor *out__, tensor self, tensor other, int64_t *dims_self_data, int dims_self_len, int64_t *dims_other_data, int dims_other_len) { PROTECT( auto outputs__ = torch::tensordot(*self, *other, torch::IntArrayRef(dims_self_data, dims_self_len), torch::IntArrayRef(dims_other_data, dims_other_len)); out__[0] = new torch::Tensor(outputs__); @@ -8026,7 +8026,7 @@ int atg_tensordot(tensor *out__, tensor self, tensor other, int64_t *dims_self_d return 1; } -int atg_threshold(tensor *out__, tensor self, scalar threshold, scalar value) { +C_API int atg_threshold(tensor *out__, tensor self, scalar threshold, scalar value) { PROTECT( auto outputs__ = torch::threshold(*self, *threshold, *value); out__[0] = new torch::Tensor(outputs__); @@ -8035,7 +8035,7 @@ int atg_threshold(tensor *out__, tensor self, scalar threshold, scalar value) { return 1; } -int atg_threshold_(tensor *out__, tensor self, scalar threshold, scalar value) { +C_API int atg_threshold_(tensor *out__, tensor self, scalar threshold, scalar value) { PROTECT( auto outputs__ = torch::threshold_(*self, *threshold, *value); out__[0] = new torch::Tensor(outputs__); @@ -8044,7 +8044,7 @@ int atg_threshold_(tensor *out__, tensor self, scalar threshold, scalar value) { return 1; } -int atg_threshold_backward(tensor *out__, tensor grad_output, tensor self, scalar threshold) { +C_API int atg_threshold_backward(tensor *out__, tensor grad_output, tensor self, scalar threshold) { PROTECT( auto outputs__ = torch::threshold_backward(*grad_output, *self, *threshold); out__[0] = new torch::Tensor(outputs__); @@ -8053,7 +8053,7 @@ int atg_threshold_backward(tensor *out__, tensor grad_output, tensor self, scala return 1; } -int atg_threshold_out(tensor *out__, tensor out, tensor self, scalar threshold, scalar value) { +C_API int atg_threshold_out(tensor *out__, tensor out, tensor self, scalar threshold, scalar value) { PROTECT( auto outputs__ = torch::threshold_out(*out, *self, *threshold, *value); out__[0] = new torch::Tensor(outputs__); @@ -8062,7 +8062,7 @@ int atg_threshold_out(tensor *out__, tensor out, tensor self, scalar threshold, return 1; } -int atg_to(tensor *out__, tensor self, int device) { +C_API int atg_to(tensor *out__, tensor self, int device) { PROTECT( auto outputs__ = self->to(device_of_int(device)); // auto t = new torch::Tensor(outputs__); @@ -8072,7 +8072,7 @@ int atg_to(tensor *out__, tensor self, int device) { return 1; } -int atg_to1(tensor *out__, tensor self, int options_kind, int options_device, int non_blocking, int copy) { +C_API int atg_to1(tensor *out__, tensor self, int options_kind, int options_device, int non_blocking, int copy) { PROTECT( auto outputs__ = self->to(at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind)), (bool)non_blocking, (bool)copy); out__[0] = new torch::Tensor(outputs__); @@ -8081,7 +8081,7 @@ int atg_to1(tensor *out__, tensor self, int options_kind, int options_device, in return 1; } -int atg_to2(tensor *out__, tensor self, int dtype, int non_blocking, int copy) { +C_API int atg_to2(tensor *out__, tensor self, int dtype, int non_blocking, int copy) { PROTECT( auto outputs__ = self->to(torch::ScalarType(dtype), (bool)non_blocking, (bool)copy); out__[0] = new torch::Tensor(outputs__); @@ -8090,7 +8090,7 @@ int atg_to2(tensor *out__, tensor self, int dtype, int non_blocking, int copy) { return 1; } -int atg_to3(tensor *out__, tensor self, tensor other, int non_blocking, int copy) { +C_API int atg_to3(tensor *out__, tensor self, tensor other, int non_blocking, int copy) { PROTECT( auto outputs__ = self->to(*other, (bool)non_blocking, (bool)copy); out__[0] = new torch::Tensor(outputs__); @@ -8099,7 +8099,7 @@ int atg_to3(tensor *out__, tensor self, tensor other, int non_blocking, int copy return 1; } -int atg_to4(tensor *out__, tensor self, int device, int dtype, int non_blocking, int copy) { +C_API int atg_to4(tensor *out__, tensor self, int device, int dtype, int non_blocking, int copy) { PROTECT( auto outputs__ = self->to(device_of_int(device), torch::ScalarType(dtype), (bool)non_blocking, (bool)copy); out__[0] = new torch::Tensor(outputs__); @@ -8108,7 +8108,7 @@ int atg_to4(tensor *out__, tensor self, int device, int dtype, int non_blocking, return 1; } -int atg_to_dense(tensor *out__, tensor self) { +C_API int atg_to_dense(tensor *out__, tensor self) { PROTECT( auto outputs__ = self->to_dense(); out__[0] = new torch::Tensor(outputs__); @@ -8117,7 +8117,7 @@ int atg_to_dense(tensor *out__, tensor self) { return 1; } -int atg_to_dense_backward(tensor *out__, tensor grad, tensor input) { +C_API int atg_to_dense_backward(tensor *out__, tensor grad, tensor input) { PROTECT( auto outputs__ = torch::to_dense_backward(*grad, *input); out__[0] = new torch::Tensor(outputs__); @@ -8126,7 +8126,7 @@ int atg_to_dense_backward(tensor *out__, tensor grad, tensor input) { return 1; } -int atg_to_mkldnn(tensor *out__, tensor self) { +C_API int atg_to_mkldnn(tensor *out__, tensor self) { PROTECT( auto outputs__ = self->to_mkldnn(); out__[0] = new torch::Tensor(outputs__); @@ -8135,7 +8135,7 @@ int atg_to_mkldnn(tensor *out__, tensor self) { return 1; } -int atg_to_mkldnn_backward(tensor *out__, tensor grad, tensor input) { +C_API int atg_to_mkldnn_backward(tensor *out__, tensor grad, tensor input) { PROTECT( auto outputs__ = torch::to_mkldnn_backward(*grad, *input); out__[0] = new torch::Tensor(outputs__); @@ -8144,7 +8144,7 @@ int atg_to_mkldnn_backward(tensor *out__, tensor grad, tensor input) { return 1; } -int atg_to_sparse(tensor *out__, tensor self) { +C_API int atg_to_sparse(tensor *out__, tensor self) { PROTECT( auto outputs__ = self->to_sparse(); out__[0] = new torch::Tensor(outputs__); @@ -8153,7 +8153,7 @@ int atg_to_sparse(tensor *out__, tensor self) { return 1; } -int atg_to_sparse1(tensor *out__, tensor self, int64_t sparse_dim) { +C_API int atg_to_sparse1(tensor *out__, tensor self, int64_t sparse_dim) { PROTECT( auto outputs__ = self->to_sparse(sparse_dim); out__[0] = new torch::Tensor(outputs__); @@ -8162,7 +8162,7 @@ int atg_to_sparse1(tensor *out__, tensor self, int64_t sparse_dim) { return 1; } -int atg_topk(tensor *out__, tensor self, int64_t k, int64_t dim, int largest, int sorted) { +C_API int atg_topk(tensor *out__, tensor self, int64_t k, int64_t dim, int largest, int sorted) { PROTECT( auto outputs__ = torch::topk(*self, k, dim, (bool)largest, (bool)sorted); out__[0] = new torch::Tensor(std::get<0>(outputs__)); @@ -8172,7 +8172,7 @@ int atg_topk(tensor *out__, tensor self, int64_t k, int64_t dim, int largest, in return 1; } -int atg_topk_out(tensor *out__, tensor values, tensor indices, tensor self, int64_t k, int64_t dim, int largest, int sorted) { +C_API int atg_topk_out(tensor *out__, tensor values, tensor indices, tensor self, int64_t k, int64_t dim, int largest, int sorted) { PROTECT( auto outputs__ = torch::topk_out(*values, *indices, *self, k, dim, (bool)largest, (bool)sorted); out__[0] = new torch::Tensor(std::get<0>(outputs__)); @@ -8182,7 +8182,7 @@ int atg_topk_out(tensor *out__, tensor values, tensor indices, tensor self, int6 return 1; } -int atg_totype(tensor *out__, tensor self, int scalar_type) { +C_API int atg_totype(tensor *out__, tensor self, int scalar_type) { PROTECT( auto outputs__ = self->toType(torch::ScalarType(scalar_type)); out__[0] = new torch::Tensor(outputs__); @@ -8191,7 +8191,7 @@ int atg_totype(tensor *out__, tensor self, int scalar_type) { return 1; } -int atg_trace(tensor *out__, tensor self) { +C_API int atg_trace(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::trace(*self); out__[0] = new torch::Tensor(outputs__); @@ -8200,7 +8200,7 @@ int atg_trace(tensor *out__, tensor self) { return 1; } -int atg_transpose(tensor *out__, tensor self, int64_t dim0, int64_t dim1) { +C_API int atg_transpose(tensor *out__, tensor self, int64_t dim0, int64_t dim1) { PROTECT( auto outputs__ = torch::transpose(*self, dim0, dim1); out__[0] = new torch::Tensor(outputs__); @@ -8209,7 +8209,7 @@ int atg_transpose(tensor *out__, tensor self, int64_t dim0, int64_t dim1) { return 1; } -int atg_transpose_(tensor *out__, tensor self, int64_t dim0, int64_t dim1) { +C_API int atg_transpose_(tensor *out__, tensor self, int64_t dim0, int64_t dim1) { PROTECT( auto outputs__ = self->transpose_(dim0, dim1); out__[0] = new torch::Tensor(outputs__); @@ -8218,7 +8218,7 @@ int atg_transpose_(tensor *out__, tensor self, int64_t dim0, int64_t dim1) { return 1; } -int atg_trapz(tensor *out__, tensor y, tensor x, int64_t dim) { +C_API int atg_trapz(tensor *out__, tensor y, tensor x, int64_t dim) { PROTECT( auto outputs__ = torch::trapz(*y, *x, dim); out__[0] = new torch::Tensor(outputs__); @@ -8227,7 +8227,7 @@ int atg_trapz(tensor *out__, tensor y, tensor x, int64_t dim) { return 1; } -int atg_trapz1(tensor *out__, tensor y, double dx, int64_t dim) { +C_API int atg_trapz1(tensor *out__, tensor y, double dx, int64_t dim) { PROTECT( auto outputs__ = torch::trapz(*y, dx, dim); out__[0] = new torch::Tensor(outputs__); @@ -8236,7 +8236,7 @@ int atg_trapz1(tensor *out__, tensor y, double dx, int64_t dim) { return 1; } -int atg_triangular_solve(tensor *out__, tensor self, tensor A, int upper, int transpose, int unitriangular) { +C_API int atg_triangular_solve(tensor *out__, tensor self, tensor A, int upper, int transpose, int unitriangular) { PROTECT( auto outputs__ = torch::triangular_solve(*self, *A, (bool)upper, (bool)transpose, (bool)unitriangular); out__[0] = new torch::Tensor(std::get<0>(outputs__)); @@ -8246,7 +8246,7 @@ int atg_triangular_solve(tensor *out__, tensor self, tensor A, int upper, int tr return 1; } -int atg_triangular_solve_out(tensor *out__, tensor X, tensor M, tensor self, tensor A, int upper, int transpose, int unitriangular) { +C_API int atg_triangular_solve_out(tensor *out__, tensor X, tensor M, tensor self, tensor A, int upper, int transpose, int unitriangular) { PROTECT( auto outputs__ = torch::triangular_solve_out(*X, *M, *self, *A, (bool)upper, (bool)transpose, (bool)unitriangular); out__[0] = new torch::Tensor(std::get<0>(outputs__)); @@ -8256,7 +8256,7 @@ int atg_triangular_solve_out(tensor *out__, tensor X, tensor M, tensor self, ten return 1; } -int atg_tril(tensor *out__, tensor self, int64_t diagonal) { +C_API int atg_tril(tensor *out__, tensor self, int64_t diagonal) { PROTECT( auto outputs__ = torch::tril(*self, diagonal); out__[0] = new torch::Tensor(outputs__); @@ -8265,7 +8265,7 @@ int atg_tril(tensor *out__, tensor self, int64_t diagonal) { return 1; } -int atg_tril_(tensor *out__, tensor self, int64_t diagonal) { +C_API int atg_tril_(tensor *out__, tensor self, int64_t diagonal) { PROTECT( auto outputs__ = self->tril_(diagonal); out__[0] = new torch::Tensor(outputs__); @@ -8274,7 +8274,7 @@ int atg_tril_(tensor *out__, tensor self, int64_t diagonal) { return 1; } -int atg_tril_indices(tensor *out__, int64_t row, int64_t col, int64_t offset, int options_kind, int options_device) { +C_API int atg_tril_indices(tensor *out__, int64_t row, int64_t col, int64_t offset, int options_kind, int options_device) { PROTECT( auto outputs__ = torch::tril_indices(row, col, offset, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); @@ -8283,7 +8283,7 @@ int atg_tril_indices(tensor *out__, int64_t row, int64_t col, int64_t offset, in return 1; } -int atg_tril_out(tensor *out__, tensor out, tensor self, int64_t diagonal) { +C_API int atg_tril_out(tensor *out__, tensor out, tensor self, int64_t diagonal) { PROTECT( auto outputs__ = torch::tril_out(*out, *self, diagonal); out__[0] = new torch::Tensor(outputs__); @@ -8292,7 +8292,7 @@ int atg_tril_out(tensor *out__, tensor out, tensor self, int64_t diagonal) { return 1; } -int atg_triplet_margin_loss(tensor *out__, tensor anchor, tensor positive, tensor negative, double margin, double p, double eps, int swap, int64_t reduction) { +C_API int atg_triplet_margin_loss(tensor *out__, tensor anchor, tensor positive, tensor negative, double margin, double p, double eps, int swap, int64_t reduction) { PROTECT( auto outputs__ = torch::triplet_margin_loss(*anchor, *positive, *negative, margin, p, eps, (bool)swap, reduction); out__[0] = new torch::Tensor(outputs__); @@ -8301,7 +8301,7 @@ int atg_triplet_margin_loss(tensor *out__, tensor anchor, tensor positive, tenso return 1; } -int atg_triu(tensor *out__, tensor self, int64_t diagonal) { +C_API int atg_triu(tensor *out__, tensor self, int64_t diagonal) { PROTECT( auto outputs__ = torch::triu(*self, diagonal); out__[0] = new torch::Tensor(outputs__); @@ -8310,7 +8310,7 @@ int atg_triu(tensor *out__, tensor self, int64_t diagonal) { return 1; } -int atg_triu_(tensor *out__, tensor self, int64_t diagonal) { +C_API int atg_triu_(tensor *out__, tensor self, int64_t diagonal) { PROTECT( auto outputs__ = self->triu_(diagonal); out__[0] = new torch::Tensor(outputs__); @@ -8319,7 +8319,7 @@ int atg_triu_(tensor *out__, tensor self, int64_t diagonal) { return 1; } -int atg_triu_indices(tensor *out__, int64_t row, int64_t col, int64_t offset, int options_kind, int options_device) { +C_API int atg_triu_indices(tensor *out__, int64_t row, int64_t col, int64_t offset, int options_kind, int options_device) { PROTECT( auto outputs__ = torch::triu_indices(row, col, offset, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); @@ -8328,7 +8328,7 @@ int atg_triu_indices(tensor *out__, int64_t row, int64_t col, int64_t offset, in return 1; } -int atg_triu_out(tensor *out__, tensor out, tensor self, int64_t diagonal) { +C_API int atg_triu_out(tensor *out__, tensor out, tensor self, int64_t diagonal) { PROTECT( auto outputs__ = torch::triu_out(*out, *self, diagonal); out__[0] = new torch::Tensor(outputs__); @@ -8337,7 +8337,7 @@ int atg_triu_out(tensor *out__, tensor out, tensor self, int64_t diagonal) { return 1; } -int atg_trunc(tensor *out__, tensor self) { +C_API int atg_trunc(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::trunc(*self); out__[0] = new torch::Tensor(outputs__); @@ -8346,7 +8346,7 @@ int atg_trunc(tensor *out__, tensor self) { return 1; } -int atg_trunc_(tensor *out__, tensor self) { +C_API int atg_trunc_(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::trunc_(*self); out__[0] = new torch::Tensor(outputs__); @@ -8355,7 +8355,7 @@ int atg_trunc_(tensor *out__, tensor self) { return 1; } -int atg_trunc_out(tensor *out__, tensor out, tensor self) { +C_API int atg_trunc_out(tensor *out__, tensor out, tensor self) { PROTECT( auto outputs__ = torch::trunc_out(*out, *self); out__[0] = new torch::Tensor(outputs__); @@ -8364,7 +8364,7 @@ int atg_trunc_out(tensor *out__, tensor out, tensor self) { return 1; } -int atg_type_as(tensor *out__, tensor self, tensor other) { +C_API int atg_type_as(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = self->type_as(*other); out__[0] = new torch::Tensor(outputs__); @@ -8373,7 +8373,7 @@ int atg_type_as(tensor *out__, tensor self, tensor other) { return 1; } -int atg_unbind(tensor *out__, tensor self, int64_t dim) { +C_API int atg_unbind(tensor *out__, tensor self, int64_t dim) { PROTECT( auto outputs__ = torch::unbind(*self, dim); int sz = outputs__.size(); @@ -8387,7 +8387,7 @@ int atg_unbind(tensor *out__, tensor self, int64_t dim) { return 1; } -int atg_unfold(tensor *out__, tensor self, int64_t dimension, int64_t size, int64_t step) { +C_API int atg_unfold(tensor *out__, tensor self, int64_t dimension, int64_t size, int64_t step) { PROTECT( auto outputs__ = self->unfold(dimension, size, step); out__[0] = new torch::Tensor(outputs__); @@ -8396,7 +8396,7 @@ int atg_unfold(tensor *out__, tensor self, int64_t dimension, int64_t size, int6 return 1; } -int atg_uniform_(tensor *out__, tensor self, double from, double to) { +C_API int atg_uniform_(tensor *out__, tensor self, double from, double to) { PROTECT( auto outputs__ = self->uniform_(from, to); out__[0] = new torch::Tensor(outputs__); @@ -8405,7 +8405,7 @@ int atg_uniform_(tensor *out__, tensor self, double from, double to) { return 1; } -int atg_unique_consecutive(tensor *out__, tensor self, int return_inverse, int return_counts, int64_t dim) { +C_API int atg_unique_consecutive(tensor *out__, tensor self, int return_inverse, int return_counts, int64_t dim) { PROTECT( auto outputs__ = torch::unique_consecutive(*self, (bool)return_inverse, (bool)return_counts, dim); out__[0] = new torch::Tensor(std::get<0>(outputs__)); @@ -8416,7 +8416,7 @@ int atg_unique_consecutive(tensor *out__, tensor self, int return_inverse, int r return 1; } -int atg_unique_dim(tensor *out__, tensor self, int64_t dim, int sorted, int return_inverse, int return_counts) { +C_API int atg_unique_dim(tensor *out__, tensor self, int64_t dim, int sorted, int return_inverse, int return_counts) { PROTECT( auto outputs__ = torch::unique_dim(*self, dim, (bool)sorted, (bool)return_inverse, (bool)return_counts); out__[0] = new torch::Tensor(std::get<0>(outputs__)); @@ -8427,7 +8427,7 @@ int atg_unique_dim(tensor *out__, tensor self, int64_t dim, int sorted, int retu return 1; } -int atg_unique_dim_consecutive(tensor *out__, tensor self, int64_t dim, int return_inverse, int return_counts) { +C_API int atg_unique_dim_consecutive(tensor *out__, tensor self, int64_t dim, int return_inverse, int return_counts) { PROTECT( auto outputs__ = torch::unique_dim_consecutive(*self, dim, (bool)return_inverse, (bool)return_counts); out__[0] = new torch::Tensor(std::get<0>(outputs__)); @@ -8438,7 +8438,7 @@ int atg_unique_dim_consecutive(tensor *out__, tensor self, int64_t dim, int retu return 1; } -int atg_unsqueeze(tensor *out__, tensor self, int64_t dim) { +C_API int atg_unsqueeze(tensor *out__, tensor self, int64_t dim) { PROTECT( auto outputs__ = torch::unsqueeze(*self, dim); out__[0] = new torch::Tensor(outputs__); @@ -8447,7 +8447,7 @@ int atg_unsqueeze(tensor *out__, tensor self, int64_t dim) { return 1; } -int atg_unsqueeze_(tensor *out__, tensor self, int64_t dim) { +C_API int atg_unsqueeze_(tensor *out__, tensor self, int64_t dim) { PROTECT( auto outputs__ = self->unsqueeze_(dim); out__[0] = new torch::Tensor(outputs__); @@ -8456,7 +8456,7 @@ int atg_unsqueeze_(tensor *out__, tensor self, int64_t dim) { return 1; } -int atg_upsample_bicubic2d(tensor *out__, tensor self, int64_t *output_size_data, int output_size_len, int align_corners) { +C_API int atg_upsample_bicubic2d(tensor *out__, tensor self, int64_t *output_size_data, int output_size_len, int align_corners) { PROTECT( auto outputs__ = torch::upsample_bicubic2d(*self, torch::IntArrayRef(output_size_data, output_size_len), (bool)align_corners); out__[0] = new torch::Tensor(outputs__); @@ -8465,7 +8465,7 @@ int atg_upsample_bicubic2d(tensor *out__, tensor self, int64_t *output_size_data return 1; } -int atg_upsample_bicubic2d_backward(tensor *out__, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners) { +C_API int atg_upsample_bicubic2d_backward(tensor *out__, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners) { PROTECT( auto outputs__ = torch::upsample_bicubic2d_backward(*grad_output, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(input_size_data, input_size_len), (bool)align_corners); out__[0] = new torch::Tensor(outputs__); @@ -8474,7 +8474,7 @@ int atg_upsample_bicubic2d_backward(tensor *out__, tensor grad_output, int64_t * return 1; } -int atg_upsample_bicubic2d_backward_out(tensor *out__, tensor grad_input, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners) { +C_API int atg_upsample_bicubic2d_backward_out(tensor *out__, tensor grad_input, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners) { PROTECT( auto outputs__ = torch::upsample_bicubic2d_backward_out(*grad_input, *grad_output, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(input_size_data, input_size_len), (bool)align_corners); out__[0] = new torch::Tensor(outputs__); @@ -8483,7 +8483,7 @@ int atg_upsample_bicubic2d_backward_out(tensor *out__, tensor grad_input, tensor return 1; } -int atg_upsample_bicubic2d_out(tensor *out__, tensor out, tensor self, int64_t *output_size_data, int output_size_len, int align_corners) { +C_API int atg_upsample_bicubic2d_out(tensor *out__, tensor out, tensor self, int64_t *output_size_data, int output_size_len, int align_corners) { PROTECT( auto outputs__ = torch::upsample_bicubic2d_out(*out, *self, torch::IntArrayRef(output_size_data, output_size_len), (bool)align_corners); out__[0] = new torch::Tensor(outputs__); @@ -8492,7 +8492,7 @@ int atg_upsample_bicubic2d_out(tensor *out__, tensor out, tensor self, int64_t * return 1; } -int atg_upsample_bilinear2d(tensor *out__, tensor self, int64_t *output_size_data, int output_size_len, int align_corners) { +C_API int atg_upsample_bilinear2d(tensor *out__, tensor self, int64_t *output_size_data, int output_size_len, int align_corners) { PROTECT( auto outputs__ = torch::upsample_bilinear2d(*self, torch::IntArrayRef(output_size_data, output_size_len), (bool)align_corners); out__[0] = new torch::Tensor(outputs__); @@ -8501,7 +8501,7 @@ int atg_upsample_bilinear2d(tensor *out__, tensor self, int64_t *output_size_dat return 1; } -int atg_upsample_bilinear2d_backward(tensor *out__, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners) { +C_API int atg_upsample_bilinear2d_backward(tensor *out__, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners) { PROTECT( auto outputs__ = torch::upsample_bilinear2d_backward(*grad_output, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(input_size_data, input_size_len), (bool)align_corners); out__[0] = new torch::Tensor(outputs__); @@ -8510,7 +8510,7 @@ int atg_upsample_bilinear2d_backward(tensor *out__, tensor grad_output, int64_t return 1; } -int atg_upsample_bilinear2d_backward_out(tensor *out__, tensor grad_input, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners) { +C_API int atg_upsample_bilinear2d_backward_out(tensor *out__, tensor grad_input, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners) { PROTECT( auto outputs__ = torch::upsample_bilinear2d_backward_out(*grad_input, *grad_output, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(input_size_data, input_size_len), (bool)align_corners); out__[0] = new torch::Tensor(outputs__); @@ -8519,7 +8519,7 @@ int atg_upsample_bilinear2d_backward_out(tensor *out__, tensor grad_input, tenso return 1; } -int atg_upsample_bilinear2d_out(tensor *out__, tensor out, tensor self, int64_t *output_size_data, int output_size_len, int align_corners) { +C_API int atg_upsample_bilinear2d_out(tensor *out__, tensor out, tensor self, int64_t *output_size_data, int output_size_len, int align_corners) { PROTECT( auto outputs__ = torch::upsample_bilinear2d_out(*out, *self, torch::IntArrayRef(output_size_data, output_size_len), (bool)align_corners); out__[0] = new torch::Tensor(outputs__); @@ -8528,7 +8528,7 @@ int atg_upsample_bilinear2d_out(tensor *out__, tensor out, tensor self, int64_t return 1; } -int atg_upsample_linear1d(tensor *out__, tensor self, int64_t *output_size_data, int output_size_len, int align_corners) { +C_API int atg_upsample_linear1d(tensor *out__, tensor self, int64_t *output_size_data, int output_size_len, int align_corners) { PROTECT( auto outputs__ = torch::upsample_linear1d(*self, torch::IntArrayRef(output_size_data, output_size_len), (bool)align_corners); out__[0] = new torch::Tensor(outputs__); @@ -8537,7 +8537,7 @@ int atg_upsample_linear1d(tensor *out__, tensor self, int64_t *output_size_data, return 1; } -int atg_upsample_linear1d_backward(tensor *out__, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners) { +C_API int atg_upsample_linear1d_backward(tensor *out__, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners) { PROTECT( auto outputs__ = torch::upsample_linear1d_backward(*grad_output, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(input_size_data, input_size_len), (bool)align_corners); out__[0] = new torch::Tensor(outputs__); @@ -8546,7 +8546,7 @@ int atg_upsample_linear1d_backward(tensor *out__, tensor grad_output, int64_t *o return 1; } -int atg_upsample_linear1d_backward_out(tensor *out__, tensor grad_input, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners) { +C_API int atg_upsample_linear1d_backward_out(tensor *out__, tensor grad_input, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners) { PROTECT( auto outputs__ = torch::upsample_linear1d_backward_out(*grad_input, *grad_output, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(input_size_data, input_size_len), (bool)align_corners); out__[0] = new torch::Tensor(outputs__); @@ -8555,7 +8555,7 @@ int atg_upsample_linear1d_backward_out(tensor *out__, tensor grad_input, tensor return 1; } -int atg_upsample_linear1d_out(tensor *out__, tensor out, tensor self, int64_t *output_size_data, int output_size_len, int align_corners) { +C_API int atg_upsample_linear1d_out(tensor *out__, tensor out, tensor self, int64_t *output_size_data, int output_size_len, int align_corners) { PROTECT( auto outputs__ = torch::upsample_linear1d_out(*out, *self, torch::IntArrayRef(output_size_data, output_size_len), (bool)align_corners); out__[0] = new torch::Tensor(outputs__); @@ -8564,7 +8564,7 @@ int atg_upsample_linear1d_out(tensor *out__, tensor out, tensor self, int64_t *o return 1; } -int atg_upsample_nearest1d(tensor *out__, tensor self, int64_t *output_size_data, int output_size_len) { +C_API int atg_upsample_nearest1d(tensor *out__, tensor self, int64_t *output_size_data, int output_size_len) { PROTECT( auto outputs__ = torch::upsample_nearest1d(*self, torch::IntArrayRef(output_size_data, output_size_len)); out__[0] = new torch::Tensor(outputs__); @@ -8573,7 +8573,7 @@ int atg_upsample_nearest1d(tensor *out__, tensor self, int64_t *output_size_data return 1; } -int atg_upsample_nearest1d_backward(tensor *out__, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len) { +C_API int atg_upsample_nearest1d_backward(tensor *out__, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len) { PROTECT( auto outputs__ = torch::upsample_nearest1d_backward(*grad_output, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(input_size_data, input_size_len)); out__[0] = new torch::Tensor(outputs__); @@ -8582,7 +8582,7 @@ int atg_upsample_nearest1d_backward(tensor *out__, tensor grad_output, int64_t * return 1; } -int atg_upsample_nearest1d_backward_out(tensor *out__, tensor grad_input, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len) { +C_API int atg_upsample_nearest1d_backward_out(tensor *out__, tensor grad_input, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len) { PROTECT( auto outputs__ = torch::upsample_nearest1d_backward_out(*grad_input, *grad_output, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(input_size_data, input_size_len)); out__[0] = new torch::Tensor(outputs__); @@ -8591,7 +8591,7 @@ int atg_upsample_nearest1d_backward_out(tensor *out__, tensor grad_input, tensor return 1; } -int atg_upsample_nearest1d_out(tensor *out__, tensor out, tensor self, int64_t *output_size_data, int output_size_len) { +C_API int atg_upsample_nearest1d_out(tensor *out__, tensor out, tensor self, int64_t *output_size_data, int output_size_len) { PROTECT( auto outputs__ = torch::upsample_nearest1d_out(*out, *self, torch::IntArrayRef(output_size_data, output_size_len)); out__[0] = new torch::Tensor(outputs__); @@ -8600,7 +8600,7 @@ int atg_upsample_nearest1d_out(tensor *out__, tensor out, tensor self, int64_t * return 1; } -int atg_upsample_nearest2d(tensor *out__, tensor self, int64_t *output_size_data, int output_size_len) { +C_API int atg_upsample_nearest2d(tensor *out__, tensor self, int64_t *output_size_data, int output_size_len) { PROTECT( auto outputs__ = torch::upsample_nearest2d(*self, torch::IntArrayRef(output_size_data, output_size_len)); out__[0] = new torch::Tensor(outputs__); @@ -8609,7 +8609,7 @@ int atg_upsample_nearest2d(tensor *out__, tensor self, int64_t *output_size_data return 1; } -int atg_upsample_nearest2d_backward(tensor *out__, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len) { +C_API int atg_upsample_nearest2d_backward(tensor *out__, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len) { PROTECT( auto outputs__ = torch::upsample_nearest2d_backward(*grad_output, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(input_size_data, input_size_len)); out__[0] = new torch::Tensor(outputs__); @@ -8618,7 +8618,7 @@ int atg_upsample_nearest2d_backward(tensor *out__, tensor grad_output, int64_t * return 1; } -int atg_upsample_nearest2d_backward_out(tensor *out__, tensor grad_input, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len) { +C_API int atg_upsample_nearest2d_backward_out(tensor *out__, tensor grad_input, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len) { PROTECT( auto outputs__ = torch::upsample_nearest2d_backward_out(*grad_input, *grad_output, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(input_size_data, input_size_len)); out__[0] = new torch::Tensor(outputs__); @@ -8627,7 +8627,7 @@ int atg_upsample_nearest2d_backward_out(tensor *out__, tensor grad_input, tensor return 1; } -int atg_upsample_nearest2d_out(tensor *out__, tensor out, tensor self, int64_t *output_size_data, int output_size_len) { +C_API int atg_upsample_nearest2d_out(tensor *out__, tensor out, tensor self, int64_t *output_size_data, int output_size_len) { PROTECT( auto outputs__ = torch::upsample_nearest2d_out(*out, *self, torch::IntArrayRef(output_size_data, output_size_len)); out__[0] = new torch::Tensor(outputs__); @@ -8636,7 +8636,7 @@ int atg_upsample_nearest2d_out(tensor *out__, tensor out, tensor self, int64_t * return 1; } -int atg_upsample_nearest3d(tensor *out__, tensor self, int64_t *output_size_data, int output_size_len) { +C_API int atg_upsample_nearest3d(tensor *out__, tensor self, int64_t *output_size_data, int output_size_len) { PROTECT( auto outputs__ = torch::upsample_nearest3d(*self, torch::IntArrayRef(output_size_data, output_size_len)); out__[0] = new torch::Tensor(outputs__); @@ -8645,7 +8645,7 @@ int atg_upsample_nearest3d(tensor *out__, tensor self, int64_t *output_size_data return 1; } -int atg_upsample_nearest3d_backward(tensor *out__, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len) { +C_API int atg_upsample_nearest3d_backward(tensor *out__, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len) { PROTECT( auto outputs__ = torch::upsample_nearest3d_backward(*grad_output, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(input_size_data, input_size_len)); out__[0] = new torch::Tensor(outputs__); @@ -8654,7 +8654,7 @@ int atg_upsample_nearest3d_backward(tensor *out__, tensor grad_output, int64_t * return 1; } -int atg_upsample_nearest3d_backward_out(tensor *out__, tensor grad_input, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len) { +C_API int atg_upsample_nearest3d_backward_out(tensor *out__, tensor grad_input, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len) { PROTECT( auto outputs__ = torch::upsample_nearest3d_backward_out(*grad_input, *grad_output, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(input_size_data, input_size_len)); out__[0] = new torch::Tensor(outputs__); @@ -8663,7 +8663,7 @@ int atg_upsample_nearest3d_backward_out(tensor *out__, tensor grad_input, tensor return 1; } -int atg_upsample_nearest3d_out(tensor *out__, tensor out, tensor self, int64_t *output_size_data, int output_size_len) { +C_API int atg_upsample_nearest3d_out(tensor *out__, tensor out, tensor self, int64_t *output_size_data, int output_size_len) { PROTECT( auto outputs__ = torch::upsample_nearest3d_out(*out, *self, torch::IntArrayRef(output_size_data, output_size_len)); out__[0] = new torch::Tensor(outputs__); @@ -8672,7 +8672,7 @@ int atg_upsample_nearest3d_out(tensor *out__, tensor out, tensor self, int64_t * return 1; } -int atg_upsample_trilinear3d(tensor *out__, tensor self, int64_t *output_size_data, int output_size_len, int align_corners) { +C_API int atg_upsample_trilinear3d(tensor *out__, tensor self, int64_t *output_size_data, int output_size_len, int align_corners) { PROTECT( auto outputs__ = torch::upsample_trilinear3d(*self, torch::IntArrayRef(output_size_data, output_size_len), (bool)align_corners); out__[0] = new torch::Tensor(outputs__); @@ -8681,7 +8681,7 @@ int atg_upsample_trilinear3d(tensor *out__, tensor self, int64_t *output_size_da return 1; } -int atg_upsample_trilinear3d_backward(tensor *out__, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners) { +C_API int atg_upsample_trilinear3d_backward(tensor *out__, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners) { PROTECT( auto outputs__ = torch::upsample_trilinear3d_backward(*grad_output, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(input_size_data, input_size_len), (bool)align_corners); out__[0] = new torch::Tensor(outputs__); @@ -8690,7 +8690,7 @@ int atg_upsample_trilinear3d_backward(tensor *out__, tensor grad_output, int64_t return 1; } -int atg_upsample_trilinear3d_backward_out(tensor *out__, tensor grad_input, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners) { +C_API int atg_upsample_trilinear3d_backward_out(tensor *out__, tensor grad_input, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners) { PROTECT( auto outputs__ = torch::upsample_trilinear3d_backward_out(*grad_input, *grad_output, torch::IntArrayRef(output_size_data, output_size_len), torch::IntArrayRef(input_size_data, input_size_len), (bool)align_corners); out__[0] = new torch::Tensor(outputs__); @@ -8699,7 +8699,7 @@ int atg_upsample_trilinear3d_backward_out(tensor *out__, tensor grad_input, tens return 1; } -int atg_upsample_trilinear3d_out(tensor *out__, tensor out, tensor self, int64_t *output_size_data, int output_size_len, int align_corners) { +C_API int atg_upsample_trilinear3d_out(tensor *out__, tensor out, tensor self, int64_t *output_size_data, int output_size_len, int align_corners) { PROTECT( auto outputs__ = torch::upsample_trilinear3d_out(*out, *self, torch::IntArrayRef(output_size_data, output_size_len), (bool)align_corners); out__[0] = new torch::Tensor(outputs__); @@ -8708,7 +8708,7 @@ int atg_upsample_trilinear3d_out(tensor *out__, tensor out, tensor self, int64_t return 1; } -int atg_values(tensor *out__, tensor self) { +C_API int atg_values(tensor *out__, tensor self) { PROTECT( auto outputs__ = self->values(); out__[0] = new torch::Tensor(outputs__); @@ -8717,7 +8717,7 @@ int atg_values(tensor *out__, tensor self) { return 1; } -int atg_var(tensor *out__, tensor self, int unbiased) { +C_API int atg_var(tensor *out__, tensor self, int unbiased) { PROTECT( auto outputs__ = torch::var(*self, (bool)unbiased); out__[0] = new torch::Tensor(outputs__); @@ -8726,7 +8726,7 @@ int atg_var(tensor *out__, tensor self, int unbiased) { return 1; } -int atg_var1(tensor *out__, tensor self, int64_t *dim_data, int dim_len, int unbiased, int keepdim) { +C_API int atg_var1(tensor *out__, tensor self, int64_t *dim_data, int dim_len, int unbiased, int keepdim) { PROTECT( auto outputs__ = torch::var(*self, torch::IntArrayRef(dim_data, dim_len), (bool)unbiased, (bool)keepdim); out__[0] = new torch::Tensor(outputs__); @@ -8735,7 +8735,7 @@ int atg_var1(tensor *out__, tensor self, int64_t *dim_data, int dim_len, int unb return 1; } -int atg_var_mean(tensor *out__, tensor self, int unbiased) { +C_API int atg_var_mean(tensor *out__, tensor self, int unbiased) { PROTECT( auto outputs__ = torch::var_mean(*self, (bool)unbiased); out__[0] = new torch::Tensor(std::get<0>(outputs__)); @@ -8745,7 +8745,7 @@ int atg_var_mean(tensor *out__, tensor self, int unbiased) { return 1; } -int atg_var_mean1(tensor *out__, tensor self, int64_t *dim_data, int dim_len, int unbiased, int keepdim) { +C_API int atg_var_mean1(tensor *out__, tensor self, int64_t *dim_data, int dim_len, int unbiased, int keepdim) { PROTECT( auto outputs__ = torch::var_mean(*self, torch::IntArrayRef(dim_data, dim_len), (bool)unbiased, (bool)keepdim); out__[0] = new torch::Tensor(std::get<0>(outputs__)); @@ -8755,7 +8755,7 @@ int atg_var_mean1(tensor *out__, tensor self, int64_t *dim_data, int dim_len, in return 1; } -int atg_var_out(tensor *out__, tensor out, tensor self, int64_t *dim_data, int dim_len, int unbiased, int keepdim) { +C_API int atg_var_out(tensor *out__, tensor out, tensor self, int64_t *dim_data, int dim_len, int unbiased, int keepdim) { PROTECT( auto outputs__ = torch::var_out(*out, *self, torch::IntArrayRef(dim_data, dim_len), (bool)unbiased, (bool)keepdim); out__[0] = new torch::Tensor(outputs__); @@ -8764,7 +8764,7 @@ int atg_var_out(tensor *out__, tensor out, tensor self, int64_t *dim_data, int d return 1; } -int atg_view(tensor *out__, tensor self, int64_t *size_data, int size_len) { +C_API int atg_view(tensor *out__, tensor self, int64_t *size_data, int size_len) { PROTECT( auto outputs__ = self->view(torch::IntArrayRef(size_data, size_len)); out__[0] = new torch::Tensor(outputs__); @@ -8773,7 +8773,7 @@ int atg_view(tensor *out__, tensor self, int64_t *size_data, int size_len) { return 1; } -int atg_view_as(tensor *out__, tensor self, tensor other) { +C_API int atg_view_as(tensor *out__, tensor self, tensor other) { PROTECT( auto outputs__ = self->view_as(*other); out__[0] = new torch::Tensor(outputs__); @@ -8782,7 +8782,7 @@ int atg_view_as(tensor *out__, tensor self, tensor other) { return 1; } -int atg_where(tensor *out__, tensor condition) { +C_API int atg_where(tensor *out__, tensor condition) { PROTECT( auto outputs__ = torch::where(*condition); int sz = outputs__.size(); @@ -8794,7 +8794,7 @@ int atg_where(tensor *out__, tensor condition) { return 1; } -int atg_where1(tensor *out__, tensor condition, tensor self, tensor other) { +C_API int atg_where1(tensor *out__, tensor condition, tensor self, tensor other) { PROTECT( auto outputs__ = torch::where(*condition, *self, *other); out__[0] = new torch::Tensor(outputs__); @@ -8803,7 +8803,7 @@ int atg_where1(tensor *out__, tensor condition, tensor self, tensor other) { return 1; } -int atg_zero_(tensor *out__, tensor self) { +C_API int atg_zero_(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::zero_(*self); out__[0] = new torch::Tensor(outputs__); @@ -8812,7 +8812,7 @@ int atg_zero_(tensor *out__, tensor self) { return 1; } -int atg_zeros(tensor *out__, int64_t *size_data, int size_len, int options_kind, int options_device) { +C_API int atg_zeros(tensor *out__, int64_t *size_data, int size_len, int options_kind, int options_device) { PROTECT( auto outputs__ = torch::zeros(torch::IntArrayRef(size_data, size_len), at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); @@ -8821,7 +8821,7 @@ int atg_zeros(tensor *out__, int64_t *size_data, int size_len, int options_kind, return 1; } -int atg_zeros_like(tensor *out__, tensor self) { +C_API int atg_zeros_like(tensor *out__, tensor self) { PROTECT( auto outputs__ = torch::zeros_like(*self); out__[0] = new torch::Tensor(outputs__); @@ -8830,7 +8830,7 @@ int atg_zeros_like(tensor *out__, tensor self) { return 1; } -int atg_zeros_like1(tensor *out__, tensor self, int options_kind, int options_device) { +C_API int atg_zeros_like1(tensor *out__, tensor self, int options_kind, int options_device) { PROTECT( auto outputs__ = torch::zeros_like(*self, at::device(device_of_int(options_device)).dtype(at::ScalarType(options_kind))); out__[0] = new torch::Tensor(outputs__); @@ -8839,7 +8839,7 @@ int atg_zeros_like1(tensor *out__, tensor self, int options_kind, int options_de return 1; } -int atg_zeros_out(tensor *out__, tensor out, int64_t *size_data, int size_len) { +C_API int atg_zeros_out(tensor *out__, tensor out, int64_t *size_data, int size_len) { PROTECT( auto outputs__ = torch::zeros_out(*out, torch::IntArrayRef(size_data, size_len)); out__[0] = new torch::Tensor(outputs__); diff --git a/build/torch_api_generated.h b/build/torch_api_generated.h index d2f4e24a..95bcbcd8 100644 --- a/build/torch_api_generated.h +++ b/build/torch_api_generated.h @@ -1,976 +1,976 @@ // THIS FILE IS AUTOMATICALLY GENERATED, DO NOT EDIT BY HAND! -int atg_abs(tensor *, tensor self); -int atg_abs_(tensor *, tensor self); -int atg_abs_out(tensor *, tensor out, tensor self); -int atg_acos(tensor *, tensor self); -int atg_acos_(tensor *, tensor self); -int atg_acos_out(tensor *, tensor out, tensor self); -int atg_adaptive_avg_pool1d(tensor *, tensor self, int64_t *output_size_data, int output_size_len); -int atg_adaptive_avg_pool2d(tensor *, tensor self, int64_t *output_size_data, int output_size_len); -int atg_adaptive_avg_pool2d_out(tensor *, tensor out, tensor self, int64_t *output_size_data, int output_size_len); -int atg_adaptive_avg_pool3d(tensor *, tensor self, int64_t *output_size_data, int output_size_len); -int atg_adaptive_avg_pool3d_backward(tensor *, tensor grad_output, tensor self); -int atg_adaptive_avg_pool3d_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self); -int atg_adaptive_avg_pool3d_out(tensor *, tensor out, tensor self, int64_t *output_size_data, int output_size_len); -int atg_adaptive_max_pool1d(tensor *, tensor self, int64_t *output_size_data, int output_size_len); -int atg_adaptive_max_pool2d(tensor *, tensor self, int64_t *output_size_data, int output_size_len); -int atg_adaptive_max_pool2d_backward(tensor *, tensor grad_output, tensor self, tensor indices); -int atg_adaptive_max_pool2d_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, tensor indices); -int atg_adaptive_max_pool2d_out(tensor *, tensor out, tensor indices, tensor self, int64_t *output_size_data, int output_size_len); -int atg_adaptive_max_pool3d(tensor *, tensor self, int64_t *output_size_data, int output_size_len); -int atg_adaptive_max_pool3d_backward(tensor *, tensor grad_output, tensor self, tensor indices); -int atg_adaptive_max_pool3d_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, tensor indices); -int atg_adaptive_max_pool3d_out(tensor *, tensor out, tensor indices, tensor self, int64_t *output_size_data, int output_size_len); -int atg_add(tensor *, tensor self, tensor other); -int atg_add1(tensor *, tensor self, scalar other); -int atg_add_(tensor *, tensor self, tensor other); -int atg_add_1(tensor *, tensor self, scalar other); -int atg_add_out(tensor *, tensor out, tensor self, tensor other); -int atg_addbmm(tensor *, tensor self, tensor batch1, tensor batch2); -int atg_addbmm_(tensor *, tensor self, tensor batch1, tensor batch2); -int atg_addbmm_out(tensor *, tensor out, tensor self, tensor batch1, tensor batch2); -int atg_addcdiv(tensor *, tensor self, tensor tensor1, tensor tensor2); -int atg_addcdiv_(tensor *, tensor self, tensor tensor1, tensor tensor2); -int atg_addcdiv_out(tensor *, tensor out, tensor self, tensor tensor1, tensor tensor2); -int atg_addcmul(tensor *, tensor self, tensor tensor1, tensor tensor2); -int atg_addcmul_(tensor *, tensor self, tensor tensor1, tensor tensor2); -int atg_addcmul_out(tensor *, tensor out, tensor self, tensor tensor1, tensor tensor2); -int atg_addmm(tensor *, tensor self, tensor mat1, tensor mat2); -int atg_addmm_(tensor *, tensor self, tensor mat1, tensor mat2); -int atg_addmm_out(tensor *, tensor out, tensor self, tensor mat1, tensor mat2); -int atg_addmv(tensor *, tensor self, tensor mat, tensor vec); -int atg_addmv_(tensor *, tensor self, tensor mat, tensor vec); -int atg_addmv_out(tensor *, tensor out, tensor self, tensor mat, tensor vec); -int atg_addr(tensor *, tensor self, tensor vec1, tensor vec2); -int atg_addr_(tensor *, tensor self, tensor vec1, tensor vec2); -int atg_addr_out(tensor *, tensor out, tensor self, tensor vec1, tensor vec2); -int atg_affine_grid_generator(tensor *, tensor theta, int64_t *size_data, int size_len, int align_corners); -int atg_affine_grid_generator_backward(tensor *, tensor grad, int64_t *size_data, int size_len, int align_corners); -int atg_alias(tensor *, tensor self); -int atg_align_as(tensor *, tensor self, tensor other); +C_API int atg_abs(tensor *, tensor self); +C_API int atg_abs_(tensor *, tensor self); +C_API int atg_abs_out(tensor *, tensor out, tensor self); +C_API int atg_acos(tensor *, tensor self); +C_API int atg_acos_(tensor *, tensor self); +C_API int atg_acos_out(tensor *, tensor out, tensor self); +C_API int atg_adaptive_avg_pool1d(tensor *, tensor self, int64_t *output_size_data, int output_size_len); +C_API int atg_adaptive_avg_pool2d(tensor *, tensor self, int64_t *output_size_data, int output_size_len); +C_API int atg_adaptive_avg_pool2d_out(tensor *, tensor out, tensor self, int64_t *output_size_data, int output_size_len); +C_API int atg_adaptive_avg_pool3d(tensor *, tensor self, int64_t *output_size_data, int output_size_len); +C_API int atg_adaptive_avg_pool3d_backward(tensor *, tensor grad_output, tensor self); +C_API int atg_adaptive_avg_pool3d_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self); +C_API int atg_adaptive_avg_pool3d_out(tensor *, tensor out, tensor self, int64_t *output_size_data, int output_size_len); +C_API int atg_adaptive_max_pool1d(tensor *, tensor self, int64_t *output_size_data, int output_size_len); +C_API int atg_adaptive_max_pool2d(tensor *, tensor self, int64_t *output_size_data, int output_size_len); +C_API int atg_adaptive_max_pool2d_backward(tensor *, tensor grad_output, tensor self, tensor indices); +C_API int atg_adaptive_max_pool2d_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, tensor indices); +C_API int atg_adaptive_max_pool2d_out(tensor *, tensor out, tensor indices, tensor self, int64_t *output_size_data, int output_size_len); +C_API int atg_adaptive_max_pool3d(tensor *, tensor self, int64_t *output_size_data, int output_size_len); +C_API int atg_adaptive_max_pool3d_backward(tensor *, tensor grad_output, tensor self, tensor indices); +C_API int atg_adaptive_max_pool3d_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, tensor indices); +C_API int atg_adaptive_max_pool3d_out(tensor *, tensor out, tensor indices, tensor self, int64_t *output_size_data, int output_size_len); +C_API int atg_add(tensor *, tensor self, tensor other); +C_API int atg_add1(tensor *, tensor self, scalar other); +C_API int atg_add_(tensor *, tensor self, tensor other); +C_API int atg_add_1(tensor *, tensor self, scalar other); +C_API int atg_add_out(tensor *, tensor out, tensor self, tensor other); +C_API int atg_addbmm(tensor *, tensor self, tensor batch1, tensor batch2); +C_API int atg_addbmm_(tensor *, tensor self, tensor batch1, tensor batch2); +C_API int atg_addbmm_out(tensor *, tensor out, tensor self, tensor batch1, tensor batch2); +C_API int atg_addcdiv(tensor *, tensor self, tensor tensor1, tensor tensor2); +C_API int atg_addcdiv_(tensor *, tensor self, tensor tensor1, tensor tensor2); +C_API int atg_addcdiv_out(tensor *, tensor out, tensor self, tensor tensor1, tensor tensor2); +C_API int atg_addcmul(tensor *, tensor self, tensor tensor1, tensor tensor2); +C_API int atg_addcmul_(tensor *, tensor self, tensor tensor1, tensor tensor2); +C_API int atg_addcmul_out(tensor *, tensor out, tensor self, tensor tensor1, tensor tensor2); +C_API int atg_addmm(tensor *, tensor self, tensor mat1, tensor mat2); +C_API int atg_addmm_(tensor *, tensor self, tensor mat1, tensor mat2); +C_API int atg_addmm_out(tensor *, tensor out, tensor self, tensor mat1, tensor mat2); +C_API int atg_addmv(tensor *, tensor self, tensor mat, tensor vec); +C_API int atg_addmv_(tensor *, tensor self, tensor mat, tensor vec); +C_API int atg_addmv_out(tensor *, tensor out, tensor self, tensor mat, tensor vec); +C_API int atg_addr(tensor *, tensor self, tensor vec1, tensor vec2); +C_API int atg_addr_(tensor *, tensor self, tensor vec1, tensor vec2); +C_API int atg_addr_out(tensor *, tensor out, tensor self, tensor vec1, tensor vec2); +C_API int atg_affine_grid_generator(tensor *, tensor theta, int64_t *size_data, int size_len, int align_corners); +C_API int atg_affine_grid_generator_backward(tensor *, tensor grad, int64_t *size_data, int size_len, int align_corners); +C_API int atg_alias(tensor *, tensor self); +C_API int atg_align_as(tensor *, tensor self, tensor other); // tensor *atg_align_tensors(tensor *tensors_data, int tensors_len); -int atg_align_tensors(tensor *, tensor *tensors_data, int tensors_len); -int atg_all(tensor *, tensor self); -int atg_all1(tensor *, tensor self, int64_t dim, int keepdim); -int atg_all_out(tensor *, tensor out, tensor self, int64_t dim, int keepdim); -int atg_alpha_dropout(tensor *, tensor input, double p, int train); -int atg_alpha_dropout_(tensor *, tensor self, double p, int train); -int atg_angle(tensor *, tensor self); -int atg_angle_out(tensor *, tensor out, tensor self); -int atg_any(tensor *, tensor self); -int atg_any1(tensor *, tensor self, int64_t dim, int keepdim); -int atg_any_out(tensor *, tensor out, tensor self, int64_t dim, int keepdim); -int atg_arange(tensor *, scalar end, int options_kind, int options_device); -int atg_arange1(tensor *, scalar start, scalar end, int options_kind, int options_device); -int atg_arange2(tensor *, scalar start, scalar end, scalar step, int options_kind, int options_device); -int atg_arange_out(tensor *, tensor out, scalar end); -int atg_arange_out1(tensor *, tensor out, scalar start, scalar end); -int atg_argmax(tensor *, tensor self, int64_t dim, int keepdim); -int atg_argmin(tensor *, tensor self, int64_t dim, int keepdim); -int atg_argsort(tensor *, tensor self, int64_t dim, int descending); -int atg_as_strided(tensor *, tensor self, int64_t *size_data, int size_len, int64_t *stride_data, int stride_len, int64_t storage_offset); -int atg_as_strided_(tensor *, tensor self, int64_t *size_data, int size_len, int64_t *stride_data, int stride_len, int64_t storage_offset); -int atg_asin(tensor *, tensor self); -int atg_asin_(tensor *, tensor self); -int atg_asin_out(tensor *, tensor out, tensor self); -int atg_atan(tensor *, tensor self); -int atg_atan2(tensor *, tensor self, tensor other); -int atg_atan2_(tensor *, tensor self, tensor other); -int atg_atan2_out(tensor *, tensor out, tensor self, tensor other); -int atg_atan_(tensor *, tensor self); -int atg_atan_out(tensor *, tensor out, tensor self); -int atg_avg_pool1d(tensor *, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad); -int atg_avg_pool2d(tensor *, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad, int64_t divisor_override); -int atg_avg_pool2d_backward(tensor *, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad, int64_t divisor_override); -int atg_avg_pool2d_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad, int64_t divisor_override); -int atg_avg_pool2d_out(tensor *, tensor out, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad, int64_t divisor_override); -int atg_avg_pool3d(tensor *, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad, int64_t divisor_override); -int atg_avg_pool3d_backward(tensor *, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad, int64_t divisor_override); -int atg_avg_pool3d_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad, int64_t divisor_override); -int atg_avg_pool3d_out(tensor *, tensor out, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad, int64_t divisor_override); -int atg_baddbmm(tensor *, tensor self, tensor batch1, tensor batch2); -int atg_baddbmm_(tensor *, tensor self, tensor batch1, tensor batch2); -int atg_baddbmm_out(tensor *, tensor out, tensor self, tensor batch1, tensor batch2); -int atg_bartlett_window(tensor *, int64_t window_length, int options_kind, int options_device); -int atg_bartlett_window1(tensor *, int64_t window_length, int periodic, int options_kind, int options_device); -int atg_batch_norm(tensor *, tensor input, tensor weight, tensor bias, tensor running_mean, tensor running_var, int training, double momentum, double eps, int cudnn_enabled); -int atg_batch_norm_backward_elemt(tensor *, tensor grad_out, tensor input, tensor mean, tensor invstd, tensor weight, tensor mean_dy, tensor mean_dy_xmu); -int atg_batch_norm_backward_reduce(tensor *, tensor grad_out, tensor input, tensor mean, tensor invstd, tensor weight, int input_g, int weight_g, int bias_g); -int atg_batch_norm_elemt(tensor *, tensor input, tensor weight, tensor bias, tensor mean, tensor invstd, double eps); -int atg_batch_norm_elemt_out(tensor *, tensor out, tensor input, tensor weight, tensor bias, tensor mean, tensor invstd, double eps); -int atg_batch_norm_gather_stats(tensor *, tensor input, tensor mean, tensor invstd, tensor running_mean, tensor running_var, double momentum, double eps, int64_t count); -int atg_batch_norm_gather_stats_with_counts(tensor *, tensor input, tensor mean, tensor invstd, tensor running_mean, tensor running_var, double momentum, double eps, int64_t *counts_data, int counts_len); -int atg_batch_norm_stats(tensor *, tensor input, double eps); -int atg_batch_norm_update_stats(tensor *, tensor input, tensor running_mean, tensor running_var, double momentum); -int atg_bernoulli(tensor *, tensor self); -int atg_bernoulli1(tensor *, tensor self, double p); -int atg_bernoulli_(tensor *, tensor self, tensor p); -int atg_bernoulli_1(tensor *, tensor self, double p); -int atg_bernoulli_out(tensor *, tensor out, tensor self); -int atg_bilinear(tensor *, tensor input1, tensor input2, tensor weight, tensor bias); -int atg_binary_cross_entropy(tensor *, tensor self, tensor target, tensor weight, int64_t reduction); -int atg_binary_cross_entropy_backward(tensor *, tensor grad_output, tensor self, tensor target, tensor weight, int64_t reduction); -int atg_binary_cross_entropy_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, tensor target, tensor weight, int64_t reduction); -int atg_binary_cross_entropy_out(tensor *, tensor out, tensor self, tensor target, tensor weight, int64_t reduction); -int atg_binary_cross_entropy_with_logits(tensor *, tensor self, tensor target, tensor weight, tensor pos_weight, int64_t reduction); -int atg_binary_cross_entropy_with_logits_backward(tensor *, tensor grad_output, tensor self, tensor target, tensor weight, tensor pos_weight, int64_t reduction); -int atg_bincount(tensor *, tensor self, tensor weights, int64_t minlength); -int atg_bitwise_not(tensor *, tensor self); -int atg_bitwise_not_(tensor *, tensor self); -int atg_bitwise_not_out(tensor *, tensor out, tensor self); -int atg_bitwise_xor(tensor *, tensor self, scalar other); -int atg_bitwise_xor1(tensor *, tensor self, tensor other); -int atg_bitwise_xor_(tensor *, tensor self, scalar other); -int atg_bitwise_xor_1(tensor *, tensor self, tensor other); -int atg_bitwise_xor_out(tensor *, tensor out, tensor self, tensor other); -int atg_bitwise_xor_out1(tensor *, tensor out, tensor self, scalar other); -int atg_blackman_window(tensor *, int64_t window_length, int options_kind, int options_device); -int atg_blackman_window1(tensor *, int64_t window_length, int periodic, int options_kind, int options_device); -int atg_bmm(tensor *, tensor self, tensor mat2); -int atg_bmm_out(tensor *, tensor out, tensor self, tensor mat2); +C_API int atg_align_tensors(tensor *, tensor *tensors_data, int tensors_len); +C_API int atg_all(tensor *, tensor self); +C_API int atg_all1(tensor *, tensor self, int64_t dim, int keepdim); +C_API int atg_all_out(tensor *, tensor out, tensor self, int64_t dim, int keepdim); +C_API int atg_alpha_dropout(tensor *, tensor input, double p, int train); +C_API int atg_alpha_dropout_(tensor *, tensor self, double p, int train); +C_API int atg_angle(tensor *, tensor self); +C_API int atg_angle_out(tensor *, tensor out, tensor self); +C_API int atg_any(tensor *, tensor self); +C_API int atg_any1(tensor *, tensor self, int64_t dim, int keepdim); +C_API int atg_any_out(tensor *, tensor out, tensor self, int64_t dim, int keepdim); +C_API int atg_arange(tensor *, scalar end, int options_kind, int options_device); +C_API int atg_arange1(tensor *, scalar start, scalar end, int options_kind, int options_device); +C_API int atg_arange2(tensor *, scalar start, scalar end, scalar step, int options_kind, int options_device); +C_API int atg_arange_out(tensor *, tensor out, scalar end); +C_API int atg_arange_out1(tensor *, tensor out, scalar start, scalar end); +C_API int atg_argmax(tensor *, tensor self, int64_t dim, int keepdim); +C_API int atg_argmin(tensor *, tensor self, int64_t dim, int keepdim); +C_API int atg_argsort(tensor *, tensor self, int64_t dim, int descending); +C_API int atg_as_strided(tensor *, tensor self, int64_t *size_data, int size_len, int64_t *stride_data, int stride_len, int64_t storage_offset); +C_API int atg_as_strided_(tensor *, tensor self, int64_t *size_data, int size_len, int64_t *stride_data, int stride_len, int64_t storage_offset); +C_API int atg_asin(tensor *, tensor self); +C_API int atg_asin_(tensor *, tensor self); +C_API int atg_asin_out(tensor *, tensor out, tensor self); +C_API int atg_atan(tensor *, tensor self); +C_API int atg_atan2(tensor *, tensor self, tensor other); +C_API int atg_atan2_(tensor *, tensor self, tensor other); +C_API int atg_atan2_out(tensor *, tensor out, tensor self, tensor other); +C_API int atg_atan_(tensor *, tensor self); +C_API int atg_atan_out(tensor *, tensor out, tensor self); +C_API int atg_avg_pool1d(tensor *, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad); +C_API int atg_avg_pool2d(tensor *, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad, int64_t divisor_override); +C_API int atg_avg_pool2d_backward(tensor *, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad, int64_t divisor_override); +C_API int atg_avg_pool2d_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad, int64_t divisor_override); +C_API int atg_avg_pool2d_out(tensor *, tensor out, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad, int64_t divisor_override); +C_API int atg_avg_pool3d(tensor *, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad, int64_t divisor_override); +C_API int atg_avg_pool3d_backward(tensor *, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad, int64_t divisor_override); +C_API int atg_avg_pool3d_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad, int64_t divisor_override); +C_API int atg_avg_pool3d_out(tensor *, tensor out, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int ceil_mode, int count_include_pad, int64_t divisor_override); +C_API int atg_baddbmm(tensor *, tensor self, tensor batch1, tensor batch2); +C_API int atg_baddbmm_(tensor *, tensor self, tensor batch1, tensor batch2); +C_API int atg_baddbmm_out(tensor *, tensor out, tensor self, tensor batch1, tensor batch2); +C_API int atg_bartlett_window(tensor *, int64_t window_length, int options_kind, int options_device); +C_API int atg_bartlett_window1(tensor *, int64_t window_length, int periodic, int options_kind, int options_device); +C_API int atg_batch_norm(tensor *, tensor input, tensor weight, tensor bias, tensor running_mean, tensor running_var, int training, double momentum, double eps, int cudnn_enabled); +C_API int atg_batch_norm_backward_elemt(tensor *, tensor grad_out, tensor input, tensor mean, tensor invstd, tensor weight, tensor mean_dy, tensor mean_dy_xmu); +C_API int atg_batch_norm_backward_reduce(tensor *, tensor grad_out, tensor input, tensor mean, tensor invstd, tensor weight, int input_g, int weight_g, int bias_g); +C_API int atg_batch_norm_elemt(tensor *, tensor input, tensor weight, tensor bias, tensor mean, tensor invstd, double eps); +C_API int atg_batch_norm_elemt_out(tensor *, tensor out, tensor input, tensor weight, tensor bias, tensor mean, tensor invstd, double eps); +C_API int atg_batch_norm_gather_stats(tensor *, tensor input, tensor mean, tensor invstd, tensor running_mean, tensor running_var, double momentum, double eps, int64_t count); +C_API int atg_batch_norm_gather_stats_with_counts(tensor *, tensor input, tensor mean, tensor invstd, tensor running_mean, tensor running_var, double momentum, double eps, int64_t *counts_data, int counts_len); +C_API int atg_batch_norm_stats(tensor *, tensor input, double eps); +C_API int atg_batch_norm_update_stats(tensor *, tensor input, tensor running_mean, tensor running_var, double momentum); +C_API int atg_bernoulli(tensor *, tensor self); +C_API int atg_bernoulli1(tensor *, tensor self, double p); +C_API int atg_bernoulli_(tensor *, tensor self, tensor p); +C_API int atg_bernoulli_1(tensor *, tensor self, double p); +C_API int atg_bernoulli_out(tensor *, tensor out, tensor self); +C_API int atg_bilinear(tensor *, tensor input1, tensor input2, tensor weight, tensor bias); +C_API int atg_binary_cross_entropy(tensor *, tensor self, tensor target, tensor weight, int64_t reduction); +C_API int atg_binary_cross_entropy_backward(tensor *, tensor grad_output, tensor self, tensor target, tensor weight, int64_t reduction); +C_API int atg_binary_cross_entropy_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, tensor target, tensor weight, int64_t reduction); +C_API int atg_binary_cross_entropy_out(tensor *, tensor out, tensor self, tensor target, tensor weight, int64_t reduction); +C_API int atg_binary_cross_entropy_with_logits(tensor *, tensor self, tensor target, tensor weight, tensor pos_weight, int64_t reduction); +C_API int atg_binary_cross_entropy_with_logits_backward(tensor *, tensor grad_output, tensor self, tensor target, tensor weight, tensor pos_weight, int64_t reduction); +C_API int atg_bincount(tensor *, tensor self, tensor weights, int64_t minlength); +C_API int atg_bitwise_not(tensor *, tensor self); +C_API int atg_bitwise_not_(tensor *, tensor self); +C_API int atg_bitwise_not_out(tensor *, tensor out, tensor self); +C_API int atg_bitwise_xor(tensor *, tensor self, scalar other); +C_API int atg_bitwise_xor1(tensor *, tensor self, tensor other); +C_API int atg_bitwise_xor_(tensor *, tensor self, scalar other); +C_API int atg_bitwise_xor_1(tensor *, tensor self, tensor other); +C_API int atg_bitwise_xor_out(tensor *, tensor out, tensor self, tensor other); +C_API int atg_bitwise_xor_out1(tensor *, tensor out, tensor self, scalar other); +C_API int atg_blackman_window(tensor *, int64_t window_length, int options_kind, int options_device); +C_API int atg_blackman_window1(tensor *, int64_t window_length, int periodic, int options_kind, int options_device); +C_API int atg_bmm(tensor *, tensor self, tensor mat2); +C_API int atg_bmm_out(tensor *, tensor out, tensor self, tensor mat2); // tensor *atg_broadcast_tensors(tensor *tensors_data, int tensors_len); -int atg_broadcast_tensors(tensor *, tensor *tensors_data, int tensors_len); -int atg_cartesian_prod(tensor *, tensor *tensors_data, int tensors_len); -int atg_cat(tensor *, tensor *tensors_data, int tensors_len, int64_t dim); -int atg_cat_out(tensor *, tensor out, tensor *tensors_data, int tensors_len, int64_t dim); -int atg_cauchy_(tensor *, tensor self, double median, double sigma); -int atg_cdist(tensor *, tensor x1, tensor x2, double p, int64_t compute_mode); -int atg_ceil(tensor *, tensor self); -int atg_ceil_(tensor *, tensor self); -int atg_ceil_out(tensor *, tensor out, tensor self); -int atg_celu(tensor *, tensor self); -int atg_celu_(tensor *, tensor self); -int atg_chain_matmul(tensor *, tensor *matrices_data, int matrices_len); -int atg_cholesky(tensor *, tensor self, int upper); -int atg_cholesky_inverse(tensor *, tensor self, int upper); -int atg_cholesky_inverse_out(tensor *, tensor out, tensor self, int upper); -int atg_cholesky_out(tensor *, tensor out, tensor self, int upper); -int atg_cholesky_solve(tensor *, tensor self, tensor input2, int upper); -int atg_cholesky_solve_out(tensor *, tensor out, tensor self, tensor input2, int upper); +C_API int atg_broadcast_tensors(tensor *, tensor *tensors_data, int tensors_len); +C_API int atg_cartesian_prod(tensor *, tensor *tensors_data, int tensors_len); +C_API int atg_cat(tensor *, tensor *tensors_data, int tensors_len, int64_t dim); +C_API int atg_cat_out(tensor *, tensor out, tensor *tensors_data, int tensors_len, int64_t dim); +C_API int atg_cauchy_(tensor *, tensor self, double median, double sigma); +C_API int atg_cdist(tensor *, tensor x1, tensor x2, double p, int64_t compute_mode); +C_API int atg_ceil(tensor *, tensor self); +C_API int atg_ceil_(tensor *, tensor self); +C_API int atg_ceil_out(tensor *, tensor out, tensor self); +C_API int atg_celu(tensor *, tensor self); +C_API int atg_celu_(tensor *, tensor self); +C_API int atg_chain_matmul(tensor *, tensor *matrices_data, int matrices_len); +C_API int atg_cholesky(tensor *, tensor self, int upper); +C_API int atg_cholesky_inverse(tensor *, tensor self, int upper); +C_API int atg_cholesky_inverse_out(tensor *, tensor out, tensor self, int upper); +C_API int atg_cholesky_out(tensor *, tensor out, tensor self, int upper); +C_API int atg_cholesky_solve(tensor *, tensor self, tensor input2, int upper); +C_API int atg_cholesky_solve_out(tensor *, tensor out, tensor self, tensor input2, int upper); // tensor *atg_chunk(tensor self, int64_t chunks, int64_t dim); -int atg_chunk(tensor *, tensor self, int64_t chunks, int64_t dim); -int atg_clamp(tensor *, tensor self, scalar min, scalar max); -int atg_clamp_(tensor *, tensor self, scalar min, scalar max); -int atg_clamp_max(tensor *, tensor self, scalar max); -int atg_clamp_max_(tensor *, tensor self, scalar max); -int atg_clamp_max_out(tensor *, tensor out, tensor self, scalar max); -int atg_clamp_min(tensor *, tensor self, scalar min); -int atg_clamp_min_(tensor *, tensor self, scalar min); -int atg_clamp_min_out(tensor *, tensor out, tensor self, scalar min); -int atg_clamp_out(tensor *, tensor out, tensor self, scalar min, scalar max); -int atg_clone(tensor *, tensor self); -int atg_coalesce(tensor *, tensor self); -int atg_col2im(tensor *, tensor self, int64_t *output_size_data, int output_size_len, int64_t *kernel_size_data, int kernel_size_len, int64_t *dilation_data, int dilation_len, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len); -int atg_col2im_backward(tensor *, tensor grad_output, int64_t *kernel_size_data, int kernel_size_len, int64_t *dilation_data, int dilation_len, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len); -int atg_col2im_backward_out(tensor *, tensor grad_input, tensor grad_output, int64_t *kernel_size_data, int kernel_size_len, int64_t *dilation_data, int dilation_len, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len); -int atg_col2im_out(tensor *, tensor out, tensor self, int64_t *output_size_data, int output_size_len, int64_t *kernel_size_data, int kernel_size_len, int64_t *dilation_data, int dilation_len, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len); -int atg_combinations(tensor *, tensor self, int64_t r, int with_replacement); -int atg_conj(tensor *, tensor self); -int atg_conj_out(tensor *, tensor out, tensor self); -int atg_constant_pad_nd(tensor *, tensor self, int64_t *pad_data, int pad_len); -int atg_contiguous(tensor *, tensor self); -int atg_conv1d(tensor *, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int64_t groups); -int atg_conv2d(tensor *, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int64_t groups); -int atg_conv3d(tensor *, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int64_t groups); -int atg_conv_tbc(tensor *, tensor self, tensor weight, tensor bias, int64_t pad); -int atg_conv_tbc_backward(tensor *, tensor self, tensor input, tensor weight, tensor bias, int64_t pad); -int atg_conv_transpose1d(tensor *, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *output_padding_data, int output_padding_len, int64_t groups, int64_t *dilation_data, int dilation_len); -int atg_conv_transpose2d(tensor *, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *output_padding_data, int output_padding_len, int64_t groups, int64_t *dilation_data, int dilation_len); -int atg_conv_transpose3d(tensor *, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *output_padding_data, int output_padding_len, int64_t groups, int64_t *dilation_data, int dilation_len); -int atg_convolution(tensor *, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int transposed, int64_t *output_padding_data, int output_padding_len, int64_t groups); -int atg_convolution_overrideable(tensor *, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int transposed, int64_t *output_padding_data, int output_padding_len, int64_t groups); -int atg_copy_sparse_to_sparse_(tensor *, tensor self, tensor src, int non_blocking); -int atg_cos(tensor *, tensor self); -int atg_cos_(tensor *, tensor self); -int atg_cos_out(tensor *, tensor out, tensor self); -int atg_cosh(tensor *, tensor self); -int atg_cosh_(tensor *, tensor self); -int atg_cosh_out(tensor *, tensor out, tensor self); -int atg_cosine_embedding_loss(tensor *, tensor input1, tensor input2, tensor target, double margin, int64_t reduction); -int atg_cosine_similarity(tensor *, tensor x1, tensor x2, int64_t dim, double eps); -int atg_cross(tensor *, tensor self, tensor other, int64_t dim); -int atg_cross_out(tensor *, tensor out, tensor self, tensor other, int64_t dim); -int atg_ctc_loss(tensor *, tensor log_probs, tensor targets, int64_t *input_lengths_data, int input_lengths_len, int64_t *target_lengths_data, int target_lengths_len, int64_t blank, int64_t reduction, int zero_infinity); -int atg_ctc_loss1(tensor *, tensor log_probs, tensor targets, tensor input_lengths, tensor target_lengths, int64_t blank, int64_t reduction, int zero_infinity); -int atg_cudnn_affine_grid_generator(tensor *, tensor theta, int64_t n, int64_t C, int64_t H, int64_t W); -int atg_cudnn_affine_grid_generator_backward(tensor *, tensor grad, int64_t n, int64_t C, int64_t H, int64_t W); -int atg_cudnn_batch_norm(tensor *, tensor input, tensor weight, tensor bias, tensor running_mean, tensor running_var, int training, double exponential_average_factor, double epsilon); -int atg_cudnn_batch_norm_backward(tensor *, tensor input, tensor grad_output, tensor weight, tensor running_mean, tensor running_var, tensor save_mean, tensor save_var, double epsilon, tensor reserveSpace); -int atg_cudnn_convolution(tensor *, tensor self, tensor weight, tensor bias, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic); -int atg_cudnn_convolution_backward_bias(tensor *, tensor grad_output); -int atg_cudnn_convolution_backward_input(tensor *, int64_t *self_size_data, int self_size_len, tensor grad_output, tensor weight, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic); -int atg_cudnn_convolution_backward_weight(tensor *, int64_t *weight_size_data, int weight_size_len, tensor grad_output, tensor self, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic); -int atg_cudnn_convolution_transpose(tensor *, tensor self, tensor weight, tensor bias, int64_t *padding_data, int padding_len, int64_t *output_padding_data, int output_padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic); -int atg_cudnn_convolution_transpose_backward_bias(tensor *, tensor grad_output); -int atg_cudnn_convolution_transpose_backward_input(tensor *, tensor grad_output, tensor weight, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic); -int atg_cudnn_convolution_transpose_backward_weight(tensor *, int64_t *weight_size_data, int weight_size_len, tensor grad_output, tensor self, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic); -int atg_cudnn_grid_sampler(tensor *, tensor self, tensor grid); -int atg_cudnn_grid_sampler_backward(tensor *, tensor self, tensor grid, tensor grad_output); -int atg_cumprod(tensor *, tensor self, int64_t dim, int dtype); -int atg_cumprod_out(tensor *, tensor out, tensor self, int64_t dim, int dtype); -int atg_cumsum(tensor *, tensor self, int64_t dim, int dtype); -int atg_cumsum_out(tensor *, tensor out, tensor self, int64_t dim, int dtype); -int atg_data(tensor *, tensor self); -int atg_dequantize(tensor *, tensor self); -int atg_det(tensor *, tensor self); -int atg_detach(tensor *, tensor self); -int atg_detach_(tensor *, tensor self); -int atg_diag(tensor *, tensor self, int64_t diagonal); -int atg_diag_embed(tensor *, tensor self, int64_t offset, int64_t dim1, int64_t dim2); -int atg_diag_out(tensor *, tensor out, tensor self, int64_t diagonal); -int atg_diagflat(tensor *, tensor self, int64_t offset); -int atg_diagonal(tensor *, tensor self, int64_t offset, int64_t dim1, int64_t dim2); -int atg_digamma(tensor *, tensor self); -int atg_digamma_(tensor *, tensor self); -int atg_digamma_out(tensor *, tensor out, tensor self); -int atg_dist(tensor *, tensor self, tensor other); -int atg_div(tensor *, tensor self, tensor other); -int atg_div1(tensor *, tensor self, scalar other); -int atg_div_(tensor *, tensor self, tensor other); -int atg_div_1(tensor *, tensor self, scalar other); -int atg_div_out(tensor *, tensor out, tensor self, tensor other); -int atg_dot(tensor *, tensor self, tensor tensor); -int atg_dot_out(tensor *, tensor out, tensor self, tensor tensor); -int atg_dropout(tensor *, tensor input, double p, int train); -int atg_dropout_(tensor *, tensor self, double p, int train); -int atg_eig(tensor *, tensor self, int eigenvectors); -int atg_eig_out(tensor *, tensor e, tensor v, tensor self, int eigenvectors); -int atg_elu(tensor *, tensor self); -int atg_elu_(tensor *, tensor self); -int atg_elu_backward(tensor *, tensor grad_output, scalar alpha, scalar scale, scalar input_scale, tensor output); -int atg_elu_backward_out(tensor *, tensor grad_input, tensor grad_output, scalar alpha, scalar scale, scalar input_scale, tensor output); -int atg_elu_out(tensor *, tensor out, tensor self); -int atg_embedding(tensor *, tensor weight, tensor indices, int64_t padding_idx, int scale_grad_by_freq, int sparse); -int atg_embedding_backward(tensor *, tensor grad, tensor indices, int64_t num_weights, int64_t padding_idx, int scale_grad_by_freq, int sparse); -int atg_embedding_bag(tensor *, tensor weight, tensor indices, tensor offsets, int scale_grad_by_freq, int64_t mode, int sparse, tensor per_sample_weights); -int atg_embedding_dense_backward(tensor *, tensor grad_output, tensor indices, int64_t num_weights, int64_t padding_idx, int scale_grad_by_freq); -int atg_embedding_renorm_(tensor *, tensor self, tensor indices, double max_norm, double norm_type); -int atg_embedding_sparse_backward(tensor *, tensor grad, tensor indices, int64_t num_weights, int64_t padding_idx, int scale_grad_by_freq); -int atg_empty(tensor *, int64_t *size_data, int size_len, int options_kind, int options_device); -int atg_empty_like(tensor *, tensor self); -int atg_empty_like1(tensor *, tensor self, int options_kind, int options_device); -int atg_empty_out(tensor *, tensor out, int64_t *size_data, int size_len); -int atg_empty_strided(tensor *, int64_t *size_data, int size_len, int64_t *stride_data, int stride_len, int options_kind, int options_device); -int atg_eq(tensor *, tensor self, scalar other); -int atg_eq1(tensor *, tensor self, tensor other); -int atg_eq_(tensor *, tensor self, scalar other); -int atg_eq_1(tensor *, tensor self, tensor other); -int atg_eq_out(tensor *, tensor out, tensor self, scalar other); -int atg_eq_out1(tensor *, tensor out, tensor self, tensor other); -int atg_erf(tensor *, tensor self); -int atg_erf_(tensor *, tensor self); -int atg_erf_out(tensor *, tensor out, tensor self); -int atg_erfc(tensor *, tensor self); -int atg_erfc_(tensor *, tensor self); -int atg_erfc_out(tensor *, tensor out, tensor self); -int atg_erfinv(tensor *, tensor self); -int atg_erfinv_(tensor *, tensor self); -int atg_erfinv_out(tensor *, tensor out, tensor self); -int atg_exp(tensor *, tensor self); -int atg_exp_(tensor *, tensor self); -int atg_exp_out(tensor *, tensor out, tensor self); -int atg_expand(tensor *, tensor self, int64_t *size_data, int size_len, int implicit); -int atg_expand_as(tensor *, tensor self, tensor other); -int atg_expm1(tensor *, tensor self); -int atg_expm1_(tensor *, tensor self); -int atg_expm1_out(tensor *, tensor out, tensor self); -int atg_exponential_(tensor *, tensor self, double lambd); -int atg_eye(tensor *, int64_t n, int options_kind, int options_device); -int atg_eye1(tensor *, int64_t n, int64_t m, int options_kind, int options_device); -int atg_eye_out(tensor *, tensor out, int64_t n); -int atg_eye_out1(tensor *, tensor out, int64_t n, int64_t m); -int atg_fake_quantize_per_channel_affine(tensor *, tensor self, tensor scale, tensor zero_point, int64_t axis, int64_t quant_min, int64_t quant_max); -int atg_fake_quantize_per_channel_affine_backward(tensor *, tensor grad, tensor self, tensor scale, tensor zero_point, int64_t axis, int64_t quant_min, int64_t quant_max); -int atg_fake_quantize_per_tensor_affine(tensor *, tensor self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max); -int atg_fake_quantize_per_tensor_affine_backward(tensor *, tensor grad, tensor self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max); -int atg_fbgemm_linear_fp16_weight(tensor *, tensor input, tensor packed_weight, tensor bias); -int atg_fbgemm_linear_fp16_weight_fp32_activation(tensor *, tensor input, tensor packed_weight, tensor bias); -int atg_fbgemm_linear_int8_weight(tensor *, tensor input, tensor weight, tensor packed, tensor col_offsets, scalar weight_scale, scalar weight_zero_point, tensor bias); -int atg_fbgemm_linear_int8_weight_fp32_activation(tensor *, tensor input, tensor weight, tensor packed, tensor col_offsets, scalar weight_scale, scalar weight_zero_point, tensor bias); -int atg_fbgemm_pack_gemm_matrix_fp16(tensor *, tensor input); -int atg_fbgemm_pack_quantized_matrix(tensor *, tensor input); -int atg_fbgemm_pack_quantized_matrix1(tensor *, tensor input, int64_t K, int64_t n); -int atg_feature_alpha_dropout(tensor *, tensor input, double p, int train); -int atg_feature_alpha_dropout_(tensor *, tensor self, double p, int train); -int atg_feature_dropout(tensor *, tensor input, double p, int train); -int atg_feature_dropout_(tensor *, tensor self, double p, int train); -int atg_fft(tensor *, tensor self, int64_t signal_ndim, int normalized); -int atg_fill_(tensor *, tensor self, scalar value); -int atg_fill_1(tensor *, tensor self, tensor value); -int atg_fill_diagonal_(tensor *, tensor self, scalar fill_value, int wrap); -int atg_flatten(tensor *, tensor self, int64_t start_dim, int64_t end_dim); -int atg_flip(tensor *, tensor self, int64_t *dims_data, int dims_len); -int atg_floor(tensor *, tensor self); -int atg_floor_(tensor *, tensor self); -int atg_floor_out(tensor *, tensor out, tensor self); -int atg_fmod(tensor *, tensor self, scalar other); -int atg_fmod1(tensor *, tensor self, tensor other); -int atg_fmod_(tensor *, tensor self, scalar other); -int atg_fmod_1(tensor *, tensor self, tensor other); -int atg_fmod_out(tensor *, tensor out, tensor self, scalar other); -int atg_fmod_out1(tensor *, tensor out, tensor self, tensor other); -int atg_frac(tensor *, tensor self); -int atg_frac_(tensor *, tensor self); -int atg_frac_out(tensor *, tensor out, tensor self); -int atg_fractional_max_pool2d(tensor *, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *output_size_data, int output_size_len, tensor random_samples); -int atg_fractional_max_pool2d_backward(tensor *, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *output_size_data, int output_size_len, tensor indices); -int atg_fractional_max_pool2d_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *output_size_data, int output_size_len, tensor indices); -int atg_fractional_max_pool2d_out(tensor *, tensor output, tensor indices, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *output_size_data, int output_size_len, tensor random_samples); -int atg_fractional_max_pool3d(tensor *, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *output_size_data, int output_size_len, tensor random_samples); -int atg_fractional_max_pool3d_backward(tensor *, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *output_size_data, int output_size_len, tensor indices); -int atg_fractional_max_pool3d_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *output_size_data, int output_size_len, tensor indices); -int atg_fractional_max_pool3d_out(tensor *, tensor output, tensor indices, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *output_size_data, int output_size_len, tensor random_samples); -int atg_frobenius_norm(tensor *, tensor self); -int atg_frobenius_norm1(tensor *, tensor self, int64_t *dim_data, int dim_len, int keepdim); -int atg_frobenius_norm_out(tensor *, tensor out, tensor self, int64_t *dim_data, int dim_len, int keepdim); -int atg_full(tensor *, int64_t *size_data, int size_len, scalar fill_value, int options_kind, int options_device); -int atg_full_like(tensor *, tensor self, scalar fill_value); -int atg_full_like1(tensor *, tensor self, scalar fill_value, int options_kind, int options_device); -int atg_full_out(tensor *, tensor out, int64_t *size_data, int size_len, scalar fill_value); -int atg_gather(tensor *, tensor self, int64_t dim, tensor index, int sparse_grad); -int atg_gather_out(tensor *, tensor out, tensor self, int64_t dim, tensor index, int sparse_grad); -int atg_ge(tensor *, tensor self, scalar other); -int atg_ge1(tensor *, tensor self, tensor other); -int atg_ge_(tensor *, tensor self, scalar other); -int atg_ge_1(tensor *, tensor self, tensor other); -int atg_ge_out(tensor *, tensor out, tensor self, scalar other); -int atg_ge_out1(tensor *, tensor out, tensor self, tensor other); -int atg_gelu(tensor *, tensor self); -int atg_gelu_backward(tensor *, tensor grad, tensor self); -int atg_geometric_(tensor *, tensor self, double p); -int atg_geqrf(tensor *, tensor self); -int atg_geqrf_out(tensor *, tensor a, tensor tau, tensor self); -int atg_ger(tensor *, tensor self, tensor vec2); -int atg_ger_out(tensor *, tensor out, tensor self, tensor vec2); -int atg_glu(tensor *, tensor self, int64_t dim); -int atg_glu_backward(tensor *, tensor grad_output, tensor self, int64_t dim); -int atg_glu_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, int64_t dim); -int atg_glu_out(tensor *, tensor out, tensor self, int64_t dim); -int atg_grad(tensor *, tensor self); -int atg_grid_sampler(tensor *, tensor input, tensor grid, int64_t interpolation_mode, int64_t padding_mode, int align_corners); -int atg_grid_sampler_2d(tensor *, tensor input, tensor grid, int64_t interpolation_mode, int64_t padding_mode, int align_corners); -int atg_grid_sampler_2d_backward(tensor *, tensor grad_output, tensor input, tensor grid, int64_t interpolation_mode, int64_t padding_mode, int align_corners); -int atg_grid_sampler_3d(tensor *, tensor input, tensor grid, int64_t interpolation_mode, int64_t padding_mode, int align_corners); -int atg_grid_sampler_3d_backward(tensor *, tensor grad_output, tensor input, tensor grid, int64_t interpolation_mode, int64_t padding_mode, int align_corners); -int atg_group_norm(tensor *, tensor input, int64_t num_groups, tensor weight, tensor bias, double eps, int cudnn_enabled); -int atg_gru(tensor *, tensor input, tensor hx, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional, int batch_first); -int atg_gru1(tensor *, tensor data, tensor batch_sizes, tensor hx, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional); -int atg_gru_cell(tensor *, tensor input, tensor hx, tensor w_ih, tensor w_hh, tensor b_ih, tensor b_hh); -int atg_gt(tensor *, tensor self, scalar other); -int atg_gt1(tensor *, tensor self, tensor other); -int atg_gt_(tensor *, tensor self, scalar other); -int atg_gt_1(tensor *, tensor self, tensor other); -int atg_gt_out(tensor *, tensor out, tensor self, scalar other); -int atg_gt_out1(tensor *, tensor out, tensor self, tensor other); -int atg_hamming_window(tensor *, int64_t window_length, int options_kind, int options_device); -int atg_hamming_window1(tensor *, int64_t window_length, int periodic, int options_kind, int options_device); -int atg_hamming_window2(tensor *, int64_t window_length, int periodic, double alpha, int options_kind, int options_device); -int atg_hamming_window3(tensor *, int64_t window_length, int periodic, double alpha, double beta, int options_kind, int options_device); -int atg_hann_window(tensor *, int64_t window_length, int options_kind, int options_device); -int atg_hann_window1(tensor *, int64_t window_length, int periodic, int options_kind, int options_device); -int atg_hardshrink(tensor *, tensor self); -int atg_hardshrink_backward(tensor *, tensor grad_out, tensor self, scalar lambd); -int atg_hardtanh(tensor *, tensor self); -int atg_hardtanh_(tensor *, tensor self); -int atg_hardtanh_backward(tensor *, tensor grad_output, tensor self, scalar min_val, scalar max_val); -int atg_hardtanh_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, scalar min_val, scalar max_val); -int atg_hardtanh_out(tensor *, tensor out, tensor self); -int atg_hinge_embedding_loss(tensor *, tensor self, tensor target, double margin, int64_t reduction); -int atg_histc(tensor *, tensor self, int64_t bins); -int atg_histc_out(tensor *, tensor out, tensor self, int64_t bins); -int atg_hspmm(tensor *, tensor mat1, tensor mat2); -int atg_hspmm_out(tensor *, tensor out, tensor mat1, tensor mat2); -int atg_ifft(tensor *, tensor self, int64_t signal_ndim, int normalized); -int atg_im2col(tensor *, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *dilation_data, int dilation_len, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len); -int atg_im2col_backward(tensor *, tensor grad_output, int64_t *input_size_data, int input_size_len, int64_t *kernel_size_data, int kernel_size_len, int64_t *dilation_data, int dilation_len, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len); -int atg_im2col_backward_out(tensor *, tensor grad_input, tensor grad_output, int64_t *input_size_data, int input_size_len, int64_t *kernel_size_data, int kernel_size_len, int64_t *dilation_data, int dilation_len, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len); -int atg_im2col_out(tensor *, tensor out, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *dilation_data, int dilation_len, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len); -int atg_imag(tensor *, tensor self); -int atg_imag_out(tensor *, tensor out, tensor self); -int atg_index(tensor *, tensor self, tensor *indices_data, int indices_len); -int atg_index_add(tensor *, tensor self, int64_t dim, tensor index, tensor source); -int atg_index_add_(tensor *, tensor self, int64_t dim, tensor index, tensor source); -int atg_index_copy(tensor *, tensor self, int64_t dim, tensor index, tensor source); -int atg_index_copy_(tensor *, tensor self, int64_t dim, tensor index, tensor source); -int atg_index_fill(tensor *, tensor self, int64_t dim, tensor index, scalar value); -int atg_index_fill1(tensor *, tensor self, int64_t dim, tensor index, tensor value); -int atg_index_fill_(tensor *, tensor self, int64_t dim, tensor index, scalar value); -int atg_index_fill_1(tensor *, tensor self, int64_t dim, tensor index, tensor value); -int atg_index_put(tensor *, tensor self, tensor *indices_data, int indices_len, tensor values, int accumulate); -int atg_index_put_(tensor *, tensor self, tensor *indices_data, int indices_len, tensor values, int accumulate); -int atg_index_select(tensor *, tensor self, int64_t dim, tensor index); -int atg_index_select_out(tensor *, tensor out, tensor self, int64_t dim, tensor index); -int atg_indices(tensor *, tensor self); -int atg_instance_norm(tensor *, tensor input, tensor weight, tensor bias, tensor running_mean, tensor running_var, int use_input_stats, double momentum, double eps, int cudnn_enabled); -int atg_int_repr(tensor *, tensor self); -int atg_inverse(tensor *, tensor self); -int atg_inverse_out(tensor *, tensor out, tensor self); -int atg_irfft(tensor *, tensor self, int64_t signal_ndim, int normalized, int onesided, int64_t *signal_sizes_data, int signal_sizes_len); -int atg_isclose(tensor *, tensor self, tensor other, double rtol, double atol, int equal_nan); -int atg_isfinite(tensor *, tensor self); -int atg_isnan(tensor *, tensor self); -int atg_kl_div(tensor *, tensor self, tensor target, int64_t reduction); -int atg_kl_div_backward(tensor *, tensor grad_output, tensor self, tensor target, int64_t reduction); -int atg_kthvalue(tensor *, tensor self, int64_t k, int64_t dim, int keepdim); -int atg_kthvalue_out(tensor *, tensor values, tensor indices, tensor self, int64_t k, int64_t dim, int keepdim); -int atg_l1_loss(tensor *, tensor self, tensor target, int64_t reduction); -int atg_l1_loss_backward(tensor *, tensor grad_output, tensor self, tensor target, int64_t reduction); -int atg_l1_loss_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, tensor target, int64_t reduction); -int atg_l1_loss_out(tensor *, tensor out, tensor self, tensor target, int64_t reduction); -int atg_layer_norm(tensor *, tensor input, int64_t *normalized_shape_data, int normalized_shape_len, tensor weight, tensor bias, double eps, int cudnn_enable); -int atg_le(tensor *, tensor self, scalar other); -int atg_le1(tensor *, tensor self, tensor other); -int atg_le_(tensor *, tensor self, scalar other); -int atg_le_1(tensor *, tensor self, tensor other); -int atg_le_out(tensor *, tensor out, tensor self, scalar other); -int atg_le_out1(tensor *, tensor out, tensor self, tensor other); -int atg_leaky_relu(tensor *, tensor self); -int atg_leaky_relu_(tensor *, tensor self); -int atg_leaky_relu_backward(tensor *, tensor grad_output, tensor self, scalar negative_slope); -int atg_leaky_relu_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, scalar negative_slope); -int atg_leaky_relu_out(tensor *, tensor out, tensor self); -int atg_lerp(tensor *, tensor self, tensor end, scalar weight); -int atg_lerp1(tensor *, tensor self, tensor end, tensor weight); -int atg_lerp_(tensor *, tensor self, tensor end, scalar weight); -int atg_lerp_1(tensor *, tensor self, tensor end, tensor weight); -int atg_lerp_out(tensor *, tensor out, tensor self, tensor end, scalar weight); -int atg_lerp_out1(tensor *, tensor out, tensor self, tensor end, tensor weight); -int atg_lgamma(tensor *, tensor self); -int atg_lgamma_(tensor *, tensor self); -int atg_lgamma_out(tensor *, tensor out, tensor self); -int atg_linear(tensor *, tensor input, tensor weight, tensor bias); -int atg_linspace(tensor *, scalar start, scalar end, int64_t steps, int options_kind, int options_device); -int atg_linspace_out(tensor *, tensor out, scalar start, scalar end, int64_t steps); -int atg_log(tensor *, tensor self); -int atg_log10(tensor *, tensor self); -int atg_log10_(tensor *, tensor self); -int atg_log10_out(tensor *, tensor out, tensor self); -int atg_log1p(tensor *, tensor self); -int atg_log1p_(tensor *, tensor self); -int atg_log1p_out(tensor *, tensor out, tensor self); -int atg_log2(tensor *, tensor self); -int atg_log2_(tensor *, tensor self); -int atg_log2_out(tensor *, tensor out, tensor self); -int atg_log_(tensor *, tensor self); -int atg_log_normal_(tensor *, tensor self, double mean, double std); -int atg_log_out(tensor *, tensor out, tensor self); -int atg_log_sigmoid(tensor *, tensor self); -int atg_log_sigmoid_backward(tensor *, tensor grad_output, tensor self, tensor buffer); -int atg_log_sigmoid_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, tensor buffer); -int atg_log_sigmoid_out(tensor *, tensor out, tensor self); -int atg_log_softmax(tensor *, tensor self, int64_t dim, int dtype); -int atg_logdet(tensor *, tensor self); -int atg_logical_not(tensor *, tensor self); -int atg_logical_not_(tensor *, tensor self); -int atg_logical_not_out(tensor *, tensor out, tensor self); -int atg_logical_xor(tensor *, tensor self, tensor other); -int atg_logical_xor_(tensor *, tensor self, tensor other); -int atg_logical_xor_out(tensor *, tensor out, tensor self, tensor other); -int atg_logspace(tensor *, scalar start, scalar end, int64_t steps, double base, int options_kind, int options_device); -int atg_logspace_out(tensor *, tensor out, scalar start, scalar end, int64_t steps, double base); -int atg_logsumexp(tensor *, tensor self, int64_t *dim_data, int dim_len, int keepdim); -int atg_logsumexp_out(tensor *, tensor out, tensor self, int64_t *dim_data, int dim_len, int keepdim); -int atg_lstm(tensor *, tensor input, tensor *hx_data, int hx_len, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional, int batch_first); -int atg_lstm1(tensor *, tensor data, tensor batch_sizes, tensor *hx_data, int hx_len, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional); -int atg_lstm_cell(tensor *, tensor input, tensor *hx_data, int hx_len, tensor w_ih, tensor w_hh, tensor b_ih, tensor b_hh); -int atg_lstsq(tensor *, tensor self, tensor A); -int atg_lstsq_out(tensor *, tensor X, tensor qr, tensor self, tensor A); -int atg_lt(tensor *, tensor self, scalar other); -int atg_lt1(tensor *, tensor self, tensor other); -int atg_lt_(tensor *, tensor self, scalar other); -int atg_lt_1(tensor *, tensor self, tensor other); -int atg_lt_out(tensor *, tensor out, tensor self, scalar other); -int atg_lt_out1(tensor *, tensor out, tensor self, tensor other); -int atg_lu_solve(tensor *, tensor self, tensor LU_data, tensor LU_pivots); -int atg_lu_solve_out(tensor *, tensor out, tensor self, tensor LU_data, tensor LU_pivots); -int atg_margin_ranking_loss(tensor *, tensor input1, tensor input2, tensor target, double margin, int64_t reduction); -int atg_masked_fill(tensor *, tensor self, tensor mask, scalar value); -int atg_masked_fill1(tensor *, tensor self, tensor mask, tensor value); -int atg_masked_fill_(tensor *, tensor self, tensor mask, scalar value); -int atg_masked_fill_1(tensor *, tensor self, tensor mask, tensor value); -int atg_masked_scatter(tensor *, tensor self, tensor mask, tensor source); -int atg_masked_scatter_(tensor *, tensor self, tensor mask, tensor source); -int atg_masked_select(tensor *, tensor self, tensor mask); -int atg_masked_select_out(tensor *, tensor out, tensor self, tensor mask); -int atg_matmul(tensor *, tensor self, tensor other); -int atg_matmul_out(tensor *, tensor out, tensor self, tensor other); -int atg_matrix_power(tensor *, tensor self, int64_t n); -int atg_matrix_rank(tensor *, tensor self, int symmetric); -int atg_matrix_rank1(tensor *, tensor self, double tol, int symmetric); -int atg_max(tensor *, tensor self); -int atg_max1(tensor *, tensor self, tensor other); -int atg_max2(tensor *, tensor self, int64_t dim, int keepdim); -int atg_max_out(tensor *, tensor out, tensor self, tensor other); -int atg_max_out1(tensor *, tensor max, tensor max_values, tensor self, int64_t dim, int keepdim); -int atg_max_pool1d(tensor *, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode); -int atg_max_pool1d_with_indices(tensor *, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode); -int atg_max_pool2d(tensor *, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode); -int atg_max_pool2d_with_indices(tensor *, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode); -int atg_max_pool2d_with_indices_backward(tensor *, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode, tensor indices); -int atg_max_pool2d_with_indices_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode, tensor indices); -int atg_max_pool2d_with_indices_out(tensor *, tensor out, tensor indices, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode); -int atg_max_pool3d(tensor *, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode); -int atg_max_pool3d_with_indices(tensor *, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode); -int atg_max_pool3d_with_indices_backward(tensor *, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode, tensor indices); -int atg_max_pool3d_with_indices_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode, tensor indices); -int atg_max_pool3d_with_indices_out(tensor *, tensor out, tensor indices, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode); -int atg_max_unpool2d(tensor *, tensor self, tensor indices, int64_t *output_size_data, int output_size_len); -int atg_max_unpool2d_backward(tensor *, tensor grad_output, tensor self, tensor indices, int64_t *output_size_data, int output_size_len); -int atg_max_unpool2d_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, tensor indices, int64_t *output_size_data, int output_size_len); -int atg_max_unpool2d_out(tensor *, tensor out, tensor self, tensor indices, int64_t *output_size_data, int output_size_len); -int atg_max_unpool3d(tensor *, tensor self, tensor indices, int64_t *output_size_data, int output_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len); -int atg_max_unpool3d_backward(tensor *, tensor grad_output, tensor self, tensor indices, int64_t *output_size_data, int output_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len); -int atg_max_unpool3d_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, tensor indices, int64_t *output_size_data, int output_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len); -int atg_max_unpool3d_out(tensor *, tensor out, tensor self, tensor indices, int64_t *output_size_data, int output_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len); -int atg_max_values(tensor *, tensor self, int64_t *dim_data, int dim_len, int keepdim); -int atg_mean(tensor *, tensor self, int dtype); -int atg_mean1(tensor *, tensor self, int64_t *dim_data, int dim_len, int keepdim, int dtype); -int atg_mean_out(tensor *, tensor out, tensor self, int64_t *dim_data, int dim_len, int keepdim, int dtype); -int atg_median(tensor *, tensor self); -int atg_median1(tensor *, tensor self, int64_t dim, int keepdim); -int atg_median_out(tensor *, tensor values, tensor indices, tensor self, int64_t dim, int keepdim); +C_API int atg_chunk(tensor *, tensor self, int64_t chunks, int64_t dim); +C_API int atg_clamp(tensor *, tensor self, scalar min, scalar max); +C_API int atg_clamp_(tensor *, tensor self, scalar min, scalar max); +C_API int atg_clamp_max(tensor *, tensor self, scalar max); +C_API int atg_clamp_max_(tensor *, tensor self, scalar max); +C_API int atg_clamp_max_out(tensor *, tensor out, tensor self, scalar max); +C_API int atg_clamp_min(tensor *, tensor self, scalar min); +C_API int atg_clamp_min_(tensor *, tensor self, scalar min); +C_API int atg_clamp_min_out(tensor *, tensor out, tensor self, scalar min); +C_API int atg_clamp_out(tensor *, tensor out, tensor self, scalar min, scalar max); +C_API int atg_clone(tensor *, tensor self); +C_API int atg_coalesce(tensor *, tensor self); +C_API int atg_col2im(tensor *, tensor self, int64_t *output_size_data, int output_size_len, int64_t *kernel_size_data, int kernel_size_len, int64_t *dilation_data, int dilation_len, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len); +C_API int atg_col2im_backward(tensor *, tensor grad_output, int64_t *kernel_size_data, int kernel_size_len, int64_t *dilation_data, int dilation_len, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len); +C_API int atg_col2im_backward_out(tensor *, tensor grad_input, tensor grad_output, int64_t *kernel_size_data, int kernel_size_len, int64_t *dilation_data, int dilation_len, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len); +C_API int atg_col2im_out(tensor *, tensor out, tensor self, int64_t *output_size_data, int output_size_len, int64_t *kernel_size_data, int kernel_size_len, int64_t *dilation_data, int dilation_len, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len); +C_API int atg_combinations(tensor *, tensor self, int64_t r, int with_replacement); +C_API int atg_conj(tensor *, tensor self); +C_API int atg_conj_out(tensor *, tensor out, tensor self); +C_API int atg_constant_pad_nd(tensor *, tensor self, int64_t *pad_data, int pad_len); +C_API int atg_contiguous(tensor *, tensor self); +C_API int atg_conv1d(tensor *, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int64_t groups); +C_API int atg_conv2d(tensor *, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int64_t groups); +C_API int atg_conv3d(tensor *, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int64_t groups); +C_API int atg_conv_tbc(tensor *, tensor self, tensor weight, tensor bias, int64_t pad); +C_API int atg_conv_tbc_backward(tensor *, tensor self, tensor input, tensor weight, tensor bias, int64_t pad); +C_API int atg_conv_transpose1d(tensor *, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *output_padding_data, int output_padding_len, int64_t groups, int64_t *dilation_data, int dilation_len); +C_API int atg_conv_transpose2d(tensor *, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *output_padding_data, int output_padding_len, int64_t groups, int64_t *dilation_data, int dilation_len); +C_API int atg_conv_transpose3d(tensor *, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *output_padding_data, int output_padding_len, int64_t groups, int64_t *dilation_data, int dilation_len); +C_API int atg_convolution(tensor *, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int transposed, int64_t *output_padding_data, int output_padding_len, int64_t groups); +C_API int atg_convolution_overrideable(tensor *, tensor input, tensor weight, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int transposed, int64_t *output_padding_data, int output_padding_len, int64_t groups); +C_API int atg_copy_sparse_to_sparse_(tensor *, tensor self, tensor src, int non_blocking); +C_API int atg_cos(tensor *, tensor self); +C_API int atg_cos_(tensor *, tensor self); +C_API int atg_cos_out(tensor *, tensor out, tensor self); +C_API int atg_cosh(tensor *, tensor self); +C_API int atg_cosh_(tensor *, tensor self); +C_API int atg_cosh_out(tensor *, tensor out, tensor self); +C_API int atg_cosine_embedding_loss(tensor *, tensor input1, tensor input2, tensor target, double margin, int64_t reduction); +C_API int atg_cosine_similarity(tensor *, tensor x1, tensor x2, int64_t dim, double eps); +C_API int atg_cross(tensor *, tensor self, tensor other, int64_t dim); +C_API int atg_cross_out(tensor *, tensor out, tensor self, tensor other, int64_t dim); +C_API int atg_ctc_loss(tensor *, tensor log_probs, tensor targets, int64_t *input_lengths_data, int input_lengths_len, int64_t *target_lengths_data, int target_lengths_len, int64_t blank, int64_t reduction, int zero_infinity); +C_API int atg_ctc_loss1(tensor *, tensor log_probs, tensor targets, tensor input_lengths, tensor target_lengths, int64_t blank, int64_t reduction, int zero_infinity); +C_API int atg_cudnn_affine_grid_generator(tensor *, tensor theta, int64_t n, int64_t C, int64_t H, int64_t W); +C_API int atg_cudnn_affine_grid_generator_backward(tensor *, tensor grad, int64_t n, int64_t C, int64_t H, int64_t W); +C_API int atg_cudnn_batch_norm(tensor *, tensor input, tensor weight, tensor bias, tensor running_mean, tensor running_var, int training, double exponential_average_factor, double epsilon); +C_API int atg_cudnn_batch_norm_backward(tensor *, tensor input, tensor grad_output, tensor weight, tensor running_mean, tensor running_var, tensor save_mean, tensor save_var, double epsilon, tensor reserveSpace); +C_API int atg_cudnn_convolution(tensor *, tensor self, tensor weight, tensor bias, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic); +C_API int atg_cudnn_convolution_backward_bias(tensor *, tensor grad_output); +C_API int atg_cudnn_convolution_backward_input(tensor *, int64_t *self_size_data, int self_size_len, tensor grad_output, tensor weight, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic); +C_API int atg_cudnn_convolution_backward_weight(tensor *, int64_t *weight_size_data, int weight_size_len, tensor grad_output, tensor self, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic); +C_API int atg_cudnn_convolution_transpose(tensor *, tensor self, tensor weight, tensor bias, int64_t *padding_data, int padding_len, int64_t *output_padding_data, int output_padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic); +C_API int atg_cudnn_convolution_transpose_backward_bias(tensor *, tensor grad_output); +C_API int atg_cudnn_convolution_transpose_backward_input(tensor *, tensor grad_output, tensor weight, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic); +C_API int atg_cudnn_convolution_transpose_backward_weight(tensor *, int64_t *weight_size_data, int weight_size_len, tensor grad_output, tensor self, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic); +C_API int atg_cudnn_grid_sampler(tensor *, tensor self, tensor grid); +C_API int atg_cudnn_grid_sampler_backward(tensor *, tensor self, tensor grid, tensor grad_output); +C_API int atg_cumprod(tensor *, tensor self, int64_t dim, int dtype); +C_API int atg_cumprod_out(tensor *, tensor out, tensor self, int64_t dim, int dtype); +C_API int atg_cumsum(tensor *, tensor self, int64_t dim, int dtype); +C_API int atg_cumsum_out(tensor *, tensor out, tensor self, int64_t dim, int dtype); +C_API int atg_data(tensor *, tensor self); +C_API int atg_dequantize(tensor *, tensor self); +C_API int atg_det(tensor *, tensor self); +C_API int atg_detach(tensor *, tensor self); +C_API int atg_detach_(tensor *, tensor self); +C_API int atg_diag(tensor *, tensor self, int64_t diagonal); +C_API int atg_diag_embed(tensor *, tensor self, int64_t offset, int64_t dim1, int64_t dim2); +C_API int atg_diag_out(tensor *, tensor out, tensor self, int64_t diagonal); +C_API int atg_diagflat(tensor *, tensor self, int64_t offset); +C_API int atg_diagonal(tensor *, tensor self, int64_t offset, int64_t dim1, int64_t dim2); +C_API int atg_digamma(tensor *, tensor self); +C_API int atg_digamma_(tensor *, tensor self); +C_API int atg_digamma_out(tensor *, tensor out, tensor self); +C_API int atg_dist(tensor *, tensor self, tensor other); +C_API int atg_div(tensor *, tensor self, tensor other); +C_API int atg_div1(tensor *, tensor self, scalar other); +C_API int atg_div_(tensor *, tensor self, tensor other); +C_API int atg_div_1(tensor *, tensor self, scalar other); +C_API int atg_div_out(tensor *, tensor out, tensor self, tensor other); +C_API int atg_dot(tensor *, tensor self, tensor tensor); +C_API int atg_dot_out(tensor *, tensor out, tensor self, tensor tensor); +C_API int atg_dropout(tensor *, tensor input, double p, int train); +C_API int atg_dropout_(tensor *, tensor self, double p, int train); +C_API int atg_eig(tensor *, tensor self, int eigenvectors); +C_API int atg_eig_out(tensor *, tensor e, tensor v, tensor self, int eigenvectors); +C_API int atg_elu(tensor *, tensor self); +C_API int atg_elu_(tensor *, tensor self); +C_API int atg_elu_backward(tensor *, tensor grad_output, scalar alpha, scalar scale, scalar input_scale, tensor output); +C_API int atg_elu_backward_out(tensor *, tensor grad_input, tensor grad_output, scalar alpha, scalar scale, scalar input_scale, tensor output); +C_API int atg_elu_out(tensor *, tensor out, tensor self); +C_API int atg_embedding(tensor *, tensor weight, tensor indices, int64_t padding_idx, int scale_grad_by_freq, int sparse); +C_API int atg_embedding_backward(tensor *, tensor grad, tensor indices, int64_t num_weights, int64_t padding_idx, int scale_grad_by_freq, int sparse); +C_API int atg_embedding_bag(tensor *, tensor weight, tensor indices, tensor offsets, int scale_grad_by_freq, int64_t mode, int sparse, tensor per_sample_weights); +C_API int atg_embedding_dense_backward(tensor *, tensor grad_output, tensor indices, int64_t num_weights, int64_t padding_idx, int scale_grad_by_freq); +C_API int atg_embedding_renorm_(tensor *, tensor self, tensor indices, double max_norm, double norm_type); +C_API int atg_embedding_sparse_backward(tensor *, tensor grad, tensor indices, int64_t num_weights, int64_t padding_idx, int scale_grad_by_freq); +C_API int atg_empty(tensor *, int64_t *size_data, int size_len, int options_kind, int options_device); +C_API int atg_empty_like(tensor *, tensor self); +C_API int atg_empty_like1(tensor *, tensor self, int options_kind, int options_device); +C_API int atg_empty_out(tensor *, tensor out, int64_t *size_data, int size_len); +C_API int atg_empty_strided(tensor *, int64_t *size_data, int size_len, int64_t *stride_data, int stride_len, int options_kind, int options_device); +C_API int atg_eq(tensor *, tensor self, scalar other); +C_API int atg_eq1(tensor *, tensor self, tensor other); +C_API int atg_eq_(tensor *, tensor self, scalar other); +C_API int atg_eq_1(tensor *, tensor self, tensor other); +C_API int atg_eq_out(tensor *, tensor out, tensor self, scalar other); +C_API int atg_eq_out1(tensor *, tensor out, tensor self, tensor other); +C_API int atg_erf(tensor *, tensor self); +C_API int atg_erf_(tensor *, tensor self); +C_API int atg_erf_out(tensor *, tensor out, tensor self); +C_API int atg_erfc(tensor *, tensor self); +C_API int atg_erfc_(tensor *, tensor self); +C_API int atg_erfc_out(tensor *, tensor out, tensor self); +C_API int atg_erfinv(tensor *, tensor self); +C_API int atg_erfinv_(tensor *, tensor self); +C_API int atg_erfinv_out(tensor *, tensor out, tensor self); +C_API int atg_exp(tensor *, tensor self); +C_API int atg_exp_(tensor *, tensor self); +C_API int atg_exp_out(tensor *, tensor out, tensor self); +C_API int atg_expand(tensor *, tensor self, int64_t *size_data, int size_len, int implicit); +C_API int atg_expand_as(tensor *, tensor self, tensor other); +C_API int atg_expm1(tensor *, tensor self); +C_API int atg_expm1_(tensor *, tensor self); +C_API int atg_expm1_out(tensor *, tensor out, tensor self); +C_API int atg_exponential_(tensor *, tensor self, double lambd); +C_API int atg_eye(tensor *, int64_t n, int options_kind, int options_device); +C_API int atg_eye1(tensor *, int64_t n, int64_t m, int options_kind, int options_device); +C_API int atg_eye_out(tensor *, tensor out, int64_t n); +C_API int atg_eye_out1(tensor *, tensor out, int64_t n, int64_t m); +C_API int atg_fake_quantize_per_channel_affine(tensor *, tensor self, tensor scale, tensor zero_point, int64_t axis, int64_t quant_min, int64_t quant_max); +C_API int atg_fake_quantize_per_channel_affine_backward(tensor *, tensor grad, tensor self, tensor scale, tensor zero_point, int64_t axis, int64_t quant_min, int64_t quant_max); +C_API int atg_fake_quantize_per_tensor_affine(tensor *, tensor self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max); +C_API int atg_fake_quantize_per_tensor_affine_backward(tensor *, tensor grad, tensor self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max); +C_API int atg_fbgemm_linear_fp16_weight(tensor *, tensor input, tensor packed_weight, tensor bias); +C_API int atg_fbgemm_linear_fp16_weight_fp32_activation(tensor *, tensor input, tensor packed_weight, tensor bias); +C_API int atg_fbgemm_linear_int8_weight(tensor *, tensor input, tensor weight, tensor packed, tensor col_offsets, scalar weight_scale, scalar weight_zero_point, tensor bias); +C_API int atg_fbgemm_linear_int8_weight_fp32_activation(tensor *, tensor input, tensor weight, tensor packed, tensor col_offsets, scalar weight_scale, scalar weight_zero_point, tensor bias); +C_API int atg_fbgemm_pack_gemm_matrix_fp16(tensor *, tensor input); +C_API int atg_fbgemm_pack_quantized_matrix(tensor *, tensor input); +C_API int atg_fbgemm_pack_quantized_matrix1(tensor *, tensor input, int64_t K, int64_t n); +C_API int atg_feature_alpha_dropout(tensor *, tensor input, double p, int train); +C_API int atg_feature_alpha_dropout_(tensor *, tensor self, double p, int train); +C_API int atg_feature_dropout(tensor *, tensor input, double p, int train); +C_API int atg_feature_dropout_(tensor *, tensor self, double p, int train); +C_API int atg_fft(tensor *, tensor self, int64_t signal_ndim, int normalized); +C_API int atg_fill_(tensor *, tensor self, scalar value); +C_API int atg_fill_1(tensor *, tensor self, tensor value); +C_API int atg_fill_diagonal_(tensor *, tensor self, scalar fill_value, int wrap); +C_API int atg_flatten(tensor *, tensor self, int64_t start_dim, int64_t end_dim); +C_API int atg_flip(tensor *, tensor self, int64_t *dims_data, int dims_len); +C_API int atg_floor(tensor *, tensor self); +C_API int atg_floor_(tensor *, tensor self); +C_API int atg_floor_out(tensor *, tensor out, tensor self); +C_API int atg_fmod(tensor *, tensor self, scalar other); +C_API int atg_fmod1(tensor *, tensor self, tensor other); +C_API int atg_fmod_(tensor *, tensor self, scalar other); +C_API int atg_fmod_1(tensor *, tensor self, tensor other); +C_API int atg_fmod_out(tensor *, tensor out, tensor self, scalar other); +C_API int atg_fmod_out1(tensor *, tensor out, tensor self, tensor other); +C_API int atg_frac(tensor *, tensor self); +C_API int atg_frac_(tensor *, tensor self); +C_API int atg_frac_out(tensor *, tensor out, tensor self); +C_API int atg_fractional_max_pool2d(tensor *, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *output_size_data, int output_size_len, tensor random_samples); +C_API int atg_fractional_max_pool2d_backward(tensor *, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *output_size_data, int output_size_len, tensor indices); +C_API int atg_fractional_max_pool2d_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *output_size_data, int output_size_len, tensor indices); +C_API int atg_fractional_max_pool2d_out(tensor *, tensor output, tensor indices, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *output_size_data, int output_size_len, tensor random_samples); +C_API int atg_fractional_max_pool3d(tensor *, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *output_size_data, int output_size_len, tensor random_samples); +C_API int atg_fractional_max_pool3d_backward(tensor *, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *output_size_data, int output_size_len, tensor indices); +C_API int atg_fractional_max_pool3d_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *output_size_data, int output_size_len, tensor indices); +C_API int atg_fractional_max_pool3d_out(tensor *, tensor output, tensor indices, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *output_size_data, int output_size_len, tensor random_samples); +C_API int atg_frobenius_norm(tensor *, tensor self); +C_API int atg_frobenius_norm1(tensor *, tensor self, int64_t *dim_data, int dim_len, int keepdim); +C_API int atg_frobenius_norm_out(tensor *, tensor out, tensor self, int64_t *dim_data, int dim_len, int keepdim); +C_API int atg_full(tensor *, int64_t *size_data, int size_len, scalar fill_value, int options_kind, int options_device); +C_API int atg_full_like(tensor *, tensor self, scalar fill_value); +C_API int atg_full_like1(tensor *, tensor self, scalar fill_value, int options_kind, int options_device); +C_API int atg_full_out(tensor *, tensor out, int64_t *size_data, int size_len, scalar fill_value); +C_API int atg_gather(tensor *, tensor self, int64_t dim, tensor index, int sparse_grad); +C_API int atg_gather_out(tensor *, tensor out, tensor self, int64_t dim, tensor index, int sparse_grad); +C_API int atg_ge(tensor *, tensor self, scalar other); +C_API int atg_ge1(tensor *, tensor self, tensor other); +C_API int atg_ge_(tensor *, tensor self, scalar other); +C_API int atg_ge_1(tensor *, tensor self, tensor other); +C_API int atg_ge_out(tensor *, tensor out, tensor self, scalar other); +C_API int atg_ge_out1(tensor *, tensor out, tensor self, tensor other); +C_API int atg_gelu(tensor *, tensor self); +C_API int atg_gelu_backward(tensor *, tensor grad, tensor self); +C_API int atg_geometric_(tensor *, tensor self, double p); +C_API int atg_geqrf(tensor *, tensor self); +C_API int atg_geqrf_out(tensor *, tensor a, tensor tau, tensor self); +C_API int atg_ger(tensor *, tensor self, tensor vec2); +C_API int atg_ger_out(tensor *, tensor out, tensor self, tensor vec2); +C_API int atg_glu(tensor *, tensor self, int64_t dim); +C_API int atg_glu_backward(tensor *, tensor grad_output, tensor self, int64_t dim); +C_API int atg_glu_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, int64_t dim); +C_API int atg_glu_out(tensor *, tensor out, tensor self, int64_t dim); +C_API int atg_grad(tensor *, tensor self); +C_API int atg_grid_sampler(tensor *, tensor input, tensor grid, int64_t interpolation_mode, int64_t padding_mode, int align_corners); +C_API int atg_grid_sampler_2d(tensor *, tensor input, tensor grid, int64_t interpolation_mode, int64_t padding_mode, int align_corners); +C_API int atg_grid_sampler_2d_backward(tensor *, tensor grad_output, tensor input, tensor grid, int64_t interpolation_mode, int64_t padding_mode, int align_corners); +C_API int atg_grid_sampler_3d(tensor *, tensor input, tensor grid, int64_t interpolation_mode, int64_t padding_mode, int align_corners); +C_API int atg_grid_sampler_3d_backward(tensor *, tensor grad_output, tensor input, tensor grid, int64_t interpolation_mode, int64_t padding_mode, int align_corners); +C_API int atg_group_norm(tensor *, tensor input, int64_t num_groups, tensor weight, tensor bias, double eps, int cudnn_enabled); +C_API int atg_gru(tensor *, tensor input, tensor hx, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional, int batch_first); +C_API int atg_gru1(tensor *, tensor data, tensor batch_sizes, tensor hx, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional); +C_API int atg_gru_cell(tensor *, tensor input, tensor hx, tensor w_ih, tensor w_hh, tensor b_ih, tensor b_hh); +C_API int atg_gt(tensor *, tensor self, scalar other); +C_API int atg_gt1(tensor *, tensor self, tensor other); +C_API int atg_gt_(tensor *, tensor self, scalar other); +C_API int atg_gt_1(tensor *, tensor self, tensor other); +C_API int atg_gt_out(tensor *, tensor out, tensor self, scalar other); +C_API int atg_gt_out1(tensor *, tensor out, tensor self, tensor other); +C_API int atg_hamming_window(tensor *, int64_t window_length, int options_kind, int options_device); +C_API int atg_hamming_window1(tensor *, int64_t window_length, int periodic, int options_kind, int options_device); +C_API int atg_hamming_window2(tensor *, int64_t window_length, int periodic, double alpha, int options_kind, int options_device); +C_API int atg_hamming_window3(tensor *, int64_t window_length, int periodic, double alpha, double beta, int options_kind, int options_device); +C_API int atg_hann_window(tensor *, int64_t window_length, int options_kind, int options_device); +C_API int atg_hann_window1(tensor *, int64_t window_length, int periodic, int options_kind, int options_device); +C_API int atg_hardshrink(tensor *, tensor self); +C_API int atg_hardshrink_backward(tensor *, tensor grad_out, tensor self, scalar lambd); +C_API int atg_hardtanh(tensor *, tensor self); +C_API int atg_hardtanh_(tensor *, tensor self); +C_API int atg_hardtanh_backward(tensor *, tensor grad_output, tensor self, scalar min_val, scalar max_val); +C_API int atg_hardtanh_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, scalar min_val, scalar max_val); +C_API int atg_hardtanh_out(tensor *, tensor out, tensor self); +C_API int atg_hinge_embedding_loss(tensor *, tensor self, tensor target, double margin, int64_t reduction); +C_API int atg_histc(tensor *, tensor self, int64_t bins); +C_API int atg_histc_out(tensor *, tensor out, tensor self, int64_t bins); +C_API int atg_hspmm(tensor *, tensor mat1, tensor mat2); +C_API int atg_hspmm_out(tensor *, tensor out, tensor mat1, tensor mat2); +C_API int atg_ifft(tensor *, tensor self, int64_t signal_ndim, int normalized); +C_API int atg_im2col(tensor *, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *dilation_data, int dilation_len, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len); +C_API int atg_im2col_backward(tensor *, tensor grad_output, int64_t *input_size_data, int input_size_len, int64_t *kernel_size_data, int kernel_size_len, int64_t *dilation_data, int dilation_len, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len); +C_API int atg_im2col_backward_out(tensor *, tensor grad_input, tensor grad_output, int64_t *input_size_data, int input_size_len, int64_t *kernel_size_data, int kernel_size_len, int64_t *dilation_data, int dilation_len, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len); +C_API int atg_im2col_out(tensor *, tensor out, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *dilation_data, int dilation_len, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len); +C_API int atg_imag(tensor *, tensor self); +C_API int atg_imag_out(tensor *, tensor out, tensor self); +C_API int atg_index(tensor *, tensor self, tensor *indices_data, int indices_len); +C_API int atg_index_add(tensor *, tensor self, int64_t dim, tensor index, tensor source); +C_API int atg_index_add_(tensor *, tensor self, int64_t dim, tensor index, tensor source); +C_API int atg_index_copy(tensor *, tensor self, int64_t dim, tensor index, tensor source); +C_API int atg_index_copy_(tensor *, tensor self, int64_t dim, tensor index, tensor source); +C_API int atg_index_fill(tensor *, tensor self, int64_t dim, tensor index, scalar value); +C_API int atg_index_fill1(tensor *, tensor self, int64_t dim, tensor index, tensor value); +C_API int atg_index_fill_(tensor *, tensor self, int64_t dim, tensor index, scalar value); +C_API int atg_index_fill_1(tensor *, tensor self, int64_t dim, tensor index, tensor value); +C_API int atg_index_put(tensor *, tensor self, tensor *indices_data, int indices_len, tensor values, int accumulate); +C_API int atg_index_put_(tensor *, tensor self, tensor *indices_data, int indices_len, tensor values, int accumulate); +C_API int atg_index_select(tensor *, tensor self, int64_t dim, tensor index); +C_API int atg_index_select_out(tensor *, tensor out, tensor self, int64_t dim, tensor index); +C_API int atg_indices(tensor *, tensor self); +C_API int atg_instance_norm(tensor *, tensor input, tensor weight, tensor bias, tensor running_mean, tensor running_var, int use_input_stats, double momentum, double eps, int cudnn_enabled); +C_API int atg_int_repr(tensor *, tensor self); +C_API int atg_inverse(tensor *, tensor self); +C_API int atg_inverse_out(tensor *, tensor out, tensor self); +C_API int atg_irfft(tensor *, tensor self, int64_t signal_ndim, int normalized, int onesided, int64_t *signal_sizes_data, int signal_sizes_len); +C_API int atg_isclose(tensor *, tensor self, tensor other, double rtol, double atol, int equal_nan); +C_API int atg_isfinite(tensor *, tensor self); +C_API int atg_isnan(tensor *, tensor self); +C_API int atg_kl_div(tensor *, tensor self, tensor target, int64_t reduction); +C_API int atg_kl_div_backward(tensor *, tensor grad_output, tensor self, tensor target, int64_t reduction); +C_API int atg_kthvalue(tensor *, tensor self, int64_t k, int64_t dim, int keepdim); +C_API int atg_kthvalue_out(tensor *, tensor values, tensor indices, tensor self, int64_t k, int64_t dim, int keepdim); +C_API int atg_l1_loss(tensor *, tensor self, tensor target, int64_t reduction); +C_API int atg_l1_loss_backward(tensor *, tensor grad_output, tensor self, tensor target, int64_t reduction); +C_API int atg_l1_loss_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, tensor target, int64_t reduction); +C_API int atg_l1_loss_out(tensor *, tensor out, tensor self, tensor target, int64_t reduction); +C_API int atg_layer_norm(tensor *, tensor input, int64_t *normalized_shape_data, int normalized_shape_len, tensor weight, tensor bias, double eps, int cudnn_enable); +C_API int atg_le(tensor *, tensor self, scalar other); +C_API int atg_le1(tensor *, tensor self, tensor other); +C_API int atg_le_(tensor *, tensor self, scalar other); +C_API int atg_le_1(tensor *, tensor self, tensor other); +C_API int atg_le_out(tensor *, tensor out, tensor self, scalar other); +C_API int atg_le_out1(tensor *, tensor out, tensor self, tensor other); +C_API int atg_leaky_relu(tensor *, tensor self); +C_API int atg_leaky_relu_(tensor *, tensor self); +C_API int atg_leaky_relu_backward(tensor *, tensor grad_output, tensor self, scalar negative_slope); +C_API int atg_leaky_relu_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, scalar negative_slope); +C_API int atg_leaky_relu_out(tensor *, tensor out, tensor self); +C_API int atg_lerp(tensor *, tensor self, tensor end, scalar weight); +C_API int atg_lerp1(tensor *, tensor self, tensor end, tensor weight); +C_API int atg_lerp_(tensor *, tensor self, tensor end, scalar weight); +C_API int atg_lerp_1(tensor *, tensor self, tensor end, tensor weight); +C_API int atg_lerp_out(tensor *, tensor out, tensor self, tensor end, scalar weight); +C_API int atg_lerp_out1(tensor *, tensor out, tensor self, tensor end, tensor weight); +C_API int atg_lgamma(tensor *, tensor self); +C_API int atg_lgamma_(tensor *, tensor self); +C_API int atg_lgamma_out(tensor *, tensor out, tensor self); +C_API int atg_linear(tensor *, tensor input, tensor weight, tensor bias); +C_API int atg_linspace(tensor *, scalar start, scalar end, int64_t steps, int options_kind, int options_device); +C_API int atg_linspace_out(tensor *, tensor out, scalar start, scalar end, int64_t steps); +C_API int atg_log(tensor *, tensor self); +C_API int atg_log10(tensor *, tensor self); +C_API int atg_log10_(tensor *, tensor self); +C_API int atg_log10_out(tensor *, tensor out, tensor self); +C_API int atg_log1p(tensor *, tensor self); +C_API int atg_log1p_(tensor *, tensor self); +C_API int atg_log1p_out(tensor *, tensor out, tensor self); +C_API int atg_log2(tensor *, tensor self); +C_API int atg_log2_(tensor *, tensor self); +C_API int atg_log2_out(tensor *, tensor out, tensor self); +C_API int atg_log_(tensor *, tensor self); +C_API int atg_log_normal_(tensor *, tensor self, double mean, double std); +C_API int atg_log_out(tensor *, tensor out, tensor self); +C_API int atg_log_sigmoid(tensor *, tensor self); +C_API int atg_log_sigmoid_backward(tensor *, tensor grad_output, tensor self, tensor buffer); +C_API int atg_log_sigmoid_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, tensor buffer); +C_API int atg_log_sigmoid_out(tensor *, tensor out, tensor self); +C_API int atg_log_softmax(tensor *, tensor self, int64_t dim, int dtype); +C_API int atg_logdet(tensor *, tensor self); +C_API int atg_logical_not(tensor *, tensor self); +C_API int atg_logical_not_(tensor *, tensor self); +C_API int atg_logical_not_out(tensor *, tensor out, tensor self); +C_API int atg_logical_xor(tensor *, tensor self, tensor other); +C_API int atg_logical_xor_(tensor *, tensor self, tensor other); +C_API int atg_logical_xor_out(tensor *, tensor out, tensor self, tensor other); +C_API int atg_logspace(tensor *, scalar start, scalar end, int64_t steps, double base, int options_kind, int options_device); +C_API int atg_logspace_out(tensor *, tensor out, scalar start, scalar end, int64_t steps, double base); +C_API int atg_logsumexp(tensor *, tensor self, int64_t *dim_data, int dim_len, int keepdim); +C_API int atg_logsumexp_out(tensor *, tensor out, tensor self, int64_t *dim_data, int dim_len, int keepdim); +C_API int atg_lstm(tensor *, tensor input, tensor *hx_data, int hx_len, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional, int batch_first); +C_API int atg_lstm1(tensor *, tensor data, tensor batch_sizes, tensor *hx_data, int hx_len, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional); +C_API int atg_lstm_cell(tensor *, tensor input, tensor *hx_data, int hx_len, tensor w_ih, tensor w_hh, tensor b_ih, tensor b_hh); +C_API int atg_lstsq(tensor *, tensor self, tensor A); +C_API int atg_lstsq_out(tensor *, tensor X, tensor qr, tensor self, tensor A); +C_API int atg_lt(tensor *, tensor self, scalar other); +C_API int atg_lt1(tensor *, tensor self, tensor other); +C_API int atg_lt_(tensor *, tensor self, scalar other); +C_API int atg_lt_1(tensor *, tensor self, tensor other); +C_API int atg_lt_out(tensor *, tensor out, tensor self, scalar other); +C_API int atg_lt_out1(tensor *, tensor out, tensor self, tensor other); +C_API int atg_lu_solve(tensor *, tensor self, tensor LU_data, tensor LU_pivots); +C_API int atg_lu_solve_out(tensor *, tensor out, tensor self, tensor LU_data, tensor LU_pivots); +C_API int atg_margin_ranking_loss(tensor *, tensor input1, tensor input2, tensor target, double margin, int64_t reduction); +C_API int atg_masked_fill(tensor *, tensor self, tensor mask, scalar value); +C_API int atg_masked_fill1(tensor *, tensor self, tensor mask, tensor value); +C_API int atg_masked_fill_(tensor *, tensor self, tensor mask, scalar value); +C_API int atg_masked_fill_1(tensor *, tensor self, tensor mask, tensor value); +C_API int atg_masked_scatter(tensor *, tensor self, tensor mask, tensor source); +C_API int atg_masked_scatter_(tensor *, tensor self, tensor mask, tensor source); +C_API int atg_masked_select(tensor *, tensor self, tensor mask); +C_API int atg_masked_select_out(tensor *, tensor out, tensor self, tensor mask); +C_API int atg_matmul(tensor *, tensor self, tensor other); +C_API int atg_matmul_out(tensor *, tensor out, tensor self, tensor other); +C_API int atg_matrix_power(tensor *, tensor self, int64_t n); +C_API int atg_matrix_rank(tensor *, tensor self, int symmetric); +C_API int atg_matrix_rank1(tensor *, tensor self, double tol, int symmetric); +C_API int atg_max(tensor *, tensor self); +C_API int atg_max1(tensor *, tensor self, tensor other); +C_API int atg_max2(tensor *, tensor self, int64_t dim, int keepdim); +C_API int atg_max_out(tensor *, tensor out, tensor self, tensor other); +C_API int atg_max_out1(tensor *, tensor max, tensor max_values, tensor self, int64_t dim, int keepdim); +C_API int atg_max_pool1d(tensor *, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode); +C_API int atg_max_pool1d_with_indices(tensor *, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode); +C_API int atg_max_pool2d(tensor *, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode); +C_API int atg_max_pool2d_with_indices(tensor *, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode); +C_API int atg_max_pool2d_with_indices_backward(tensor *, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode, tensor indices); +C_API int atg_max_pool2d_with_indices_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode, tensor indices); +C_API int atg_max_pool2d_with_indices_out(tensor *, tensor out, tensor indices, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode); +C_API int atg_max_pool3d(tensor *, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode); +C_API int atg_max_pool3d_with_indices(tensor *, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode); +C_API int atg_max_pool3d_with_indices_backward(tensor *, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode, tensor indices); +C_API int atg_max_pool3d_with_indices_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode, tensor indices); +C_API int atg_max_pool3d_with_indices_out(tensor *, tensor out, tensor indices, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode); +C_API int atg_max_unpool2d(tensor *, tensor self, tensor indices, int64_t *output_size_data, int output_size_len); +C_API int atg_max_unpool2d_backward(tensor *, tensor grad_output, tensor self, tensor indices, int64_t *output_size_data, int output_size_len); +C_API int atg_max_unpool2d_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, tensor indices, int64_t *output_size_data, int output_size_len); +C_API int atg_max_unpool2d_out(tensor *, tensor out, tensor self, tensor indices, int64_t *output_size_data, int output_size_len); +C_API int atg_max_unpool3d(tensor *, tensor self, tensor indices, int64_t *output_size_data, int output_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len); +C_API int atg_max_unpool3d_backward(tensor *, tensor grad_output, tensor self, tensor indices, int64_t *output_size_data, int output_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len); +C_API int atg_max_unpool3d_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, tensor indices, int64_t *output_size_data, int output_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len); +C_API int atg_max_unpool3d_out(tensor *, tensor out, tensor self, tensor indices, int64_t *output_size_data, int output_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len); +C_API int atg_max_values(tensor *, tensor self, int64_t *dim_data, int dim_len, int keepdim); +C_API int atg_mean(tensor *, tensor self, int dtype); +C_API int atg_mean1(tensor *, tensor self, int64_t *dim_data, int dim_len, int keepdim, int dtype); +C_API int atg_mean_out(tensor *, tensor out, tensor self, int64_t *dim_data, int dim_len, int keepdim, int dtype); +C_API int atg_median(tensor *, tensor self); +C_API int atg_median1(tensor *, tensor self, int64_t dim, int keepdim); +C_API int atg_median_out(tensor *, tensor values, tensor indices, tensor self, int64_t dim, int keepdim); // tensor *atg_meshgrid(tensor *tensors_data, int tensors_len); -int atg_meshgrid(tensor *, tensor *tensors_data, int tensors_len); -int atg_min(tensor *, tensor self); -int atg_min1(tensor *, tensor self, tensor other); -int atg_min2(tensor *, tensor self, int64_t dim, int keepdim); -int atg_min_out(tensor *, tensor out, tensor self, tensor other); -int atg_min_out1(tensor *, tensor min, tensor min_indices, tensor self, int64_t dim, int keepdim); -int atg_min_values(tensor *, tensor self, int64_t *dim_data, int dim_len, int keepdim); -int atg_miopen_batch_norm(tensor *, tensor input, tensor weight, tensor bias, tensor running_mean, tensor running_var, int training, double exponential_average_factor, double epsilon); -int atg_miopen_batch_norm_backward(tensor *, tensor input, tensor grad_output, tensor weight, tensor running_mean, tensor running_var, tensor save_mean, tensor save_var, double epsilon); -int atg_miopen_convolution(tensor *, tensor self, tensor weight, tensor bias, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic); -int atg_miopen_convolution_backward_bias(tensor *, tensor grad_output); -int atg_miopen_convolution_backward_input(tensor *, int64_t *self_size_data, int self_size_len, tensor grad_output, tensor weight, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic); -int atg_miopen_convolution_backward_weight(tensor *, int64_t *weight_size_data, int weight_size_len, tensor grad_output, tensor self, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic); -int atg_miopen_convolution_transpose(tensor *, tensor self, tensor weight, tensor bias, int64_t *padding_data, int padding_len, int64_t *output_padding_data, int output_padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic); -int atg_miopen_convolution_transpose_backward_input(tensor *, tensor grad_output, tensor weight, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic); -int atg_miopen_convolution_transpose_backward_weight(tensor *, int64_t *weight_size_data, int weight_size_len, tensor grad_output, tensor self, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic); -int atg_miopen_depthwise_convolution(tensor *, tensor self, tensor weight, tensor bias, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic); -int atg_miopen_depthwise_convolution_backward_input(tensor *, int64_t *self_size_data, int self_size_len, tensor grad_output, tensor weight, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic); -int atg_miopen_depthwise_convolution_backward_weight(tensor *, int64_t *weight_size_data, int weight_size_len, tensor grad_output, tensor self, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic); -int atg_miopen_rnn(tensor *, tensor input, tensor *weight_data, int weight_len, int64_t weight_stride0, tensor hx, tensor cx, int64_t mode, int64_t hidden_size, int64_t num_layers, int batch_first, double dropout, int train, int bidirectional, int64_t *batch_sizes_data, int batch_sizes_len, tensor dropout_state); -int atg_mkldnn_adaptive_avg_pool2d(tensor *, tensor self, int64_t *output_size_data, int output_size_len); -int atg_mkldnn_convolution(tensor *, tensor self, tensor weight, tensor bias, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups); -int atg_mkldnn_convolution_backward_input(tensor *, int64_t *self_size_data, int self_size_len, tensor grad_output, tensor weight, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int bias_defined); -int atg_mkldnn_convolution_backward_weights(tensor *, int64_t *weight_size_data, int weight_size_len, tensor grad_output, tensor self, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int bias_defined); -int atg_mkldnn_linear(tensor *, tensor input, tensor weight, tensor bias); -int atg_mkldnn_max_pool2d(tensor *, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode); -int atg_mkldnn_reorder_conv2d_weight(tensor *, tensor self, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups); -int atg_mm(tensor *, tensor self, tensor mat2); -int atg_mm_out(tensor *, tensor out, tensor self, tensor mat2); -int atg_mode(tensor *, tensor self, int64_t dim, int keepdim); -int atg_mode_out(tensor *, tensor values, tensor indices, tensor self, int64_t dim, int keepdim); -int atg_mse_loss(tensor *, tensor self, tensor target, int64_t reduction); -int atg_mse_loss_backward(tensor *, tensor grad_output, tensor self, tensor target, int64_t reduction); -int atg_mse_loss_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, tensor target, int64_t reduction); -int atg_mse_loss_out(tensor *, tensor out, tensor self, tensor target, int64_t reduction); -int atg_mul(tensor *, tensor self, tensor other); -int atg_mul1(tensor *, tensor self, scalar other); -int atg_mul_(tensor *, tensor self, tensor other); -int atg_mul_1(tensor *, tensor self, scalar other); -int atg_mul_out(tensor *, tensor out, tensor self, tensor other); -int atg_multi_margin_loss_backward(tensor *, tensor grad_output, tensor self, tensor target, scalar p, scalar margin, tensor weight, int64_t reduction); -int atg_multi_margin_loss_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, tensor target, scalar p, scalar margin, tensor weight, int64_t reduction); -int atg_multilabel_margin_loss(tensor *, tensor self, tensor target, int64_t reduction); -int atg_multilabel_margin_loss_backward(tensor *, tensor grad_output, tensor self, tensor target, int64_t reduction, tensor is_target); -int atg_multilabel_margin_loss_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, tensor target, int64_t reduction, tensor is_target); -int atg_multilabel_margin_loss_out(tensor *, tensor out, tensor self, tensor target, int64_t reduction); -int atg_multinomial(tensor *, tensor self, int64_t num_samples, int replacement); -int atg_multinomial_out(tensor *, tensor out, tensor self, int64_t num_samples, int replacement); -int atg_mv(tensor *, tensor self, tensor vec); -int atg_mv_out(tensor *, tensor out, tensor self, tensor vec); -int atg_mvlgamma(tensor *, tensor self, int64_t p); -int atg_mvlgamma_(tensor *, tensor self, int64_t p); -int atg_narrow(tensor *, tensor self, int64_t dim, int64_t start, int64_t length); -int atg_narrow_copy(tensor *, tensor self, int64_t dim, int64_t start, int64_t length); -int atg_native_batch_norm(tensor *, tensor input, tensor weight, tensor bias, tensor running_mean, tensor running_var, int training, double momentum, double eps); -int atg_native_layer_norm(tensor *, tensor input, tensor weight, tensor bias, int64_t M, int64_t n, double eps); -int atg_native_norm(tensor *, tensor self); -int atg_ne(tensor *, tensor self, scalar other); -int atg_ne1(tensor *, tensor self, tensor other); -int atg_ne_(tensor *, tensor self, scalar other); -int atg_ne_1(tensor *, tensor self, tensor other); -int atg_ne_out(tensor *, tensor out, tensor self, scalar other); -int atg_ne_out1(tensor *, tensor out, tensor self, tensor other); -int atg_neg(tensor *, tensor self); -int atg_neg_(tensor *, tensor self); -int atg_neg_out(tensor *, tensor out, tensor self); -int atg_new_empty(tensor *, tensor self, int64_t *size_data, int size_len, int options_kind, int options_device); -int atg_new_full(tensor *, tensor self, int64_t *size_data, int size_len, scalar fill_value, int options_kind, int options_device); -int atg_new_zeros(tensor *, tensor self, int64_t *size_data, int size_len, int options_kind, int options_device); -int atg_nll_loss(tensor *, tensor self, tensor target, tensor weight, int64_t reduction, int64_t ignore_index); -int atg_nll_loss2d(tensor *, tensor self, tensor target, tensor weight, int64_t reduction, int64_t ignore_index); -int atg_nll_loss2d_backward(tensor *, tensor grad_output, tensor self, tensor target, tensor weight, int64_t reduction, int64_t ignore_index, tensor total_weight); -int atg_nll_loss2d_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, tensor target, tensor weight, int64_t reduction, int64_t ignore_index, tensor total_weight); -int atg_nll_loss2d_out(tensor *, tensor out, tensor self, tensor target, tensor weight, int64_t reduction, int64_t ignore_index); -int atg_nll_loss_backward(tensor *, tensor grad_output, tensor self, tensor target, tensor weight, int64_t reduction, int64_t ignore_index, tensor total_weight); -int atg_nll_loss_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, tensor target, tensor weight, int64_t reduction, int64_t ignore_index, tensor total_weight); -int atg_nll_loss_out(tensor *, tensor out, tensor self, tensor target, tensor weight, int64_t reduction, int64_t ignore_index); -int atg_nonzero(tensor *, tensor self); +C_API int atg_meshgrid(tensor *, tensor *tensors_data, int tensors_len); +C_API int atg_min(tensor *, tensor self); +C_API int atg_min1(tensor *, tensor self, tensor other); +C_API int atg_min2(tensor *, tensor self, int64_t dim, int keepdim); +C_API int atg_min_out(tensor *, tensor out, tensor self, tensor other); +C_API int atg_min_out1(tensor *, tensor min, tensor min_indices, tensor self, int64_t dim, int keepdim); +C_API int atg_min_values(tensor *, tensor self, int64_t *dim_data, int dim_len, int keepdim); +C_API int atg_miopen_batch_norm(tensor *, tensor input, tensor weight, tensor bias, tensor running_mean, tensor running_var, int training, double exponential_average_factor, double epsilon); +C_API int atg_miopen_batch_norm_backward(tensor *, tensor input, tensor grad_output, tensor weight, tensor running_mean, tensor running_var, tensor save_mean, tensor save_var, double epsilon); +C_API int atg_miopen_convolution(tensor *, tensor self, tensor weight, tensor bias, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic); +C_API int atg_miopen_convolution_backward_bias(tensor *, tensor grad_output); +C_API int atg_miopen_convolution_backward_input(tensor *, int64_t *self_size_data, int self_size_len, tensor grad_output, tensor weight, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic); +C_API int atg_miopen_convolution_backward_weight(tensor *, int64_t *weight_size_data, int weight_size_len, tensor grad_output, tensor self, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic); +C_API int atg_miopen_convolution_transpose(tensor *, tensor self, tensor weight, tensor bias, int64_t *padding_data, int padding_len, int64_t *output_padding_data, int output_padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic); +C_API int atg_miopen_convolution_transpose_backward_input(tensor *, tensor grad_output, tensor weight, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic); +C_API int atg_miopen_convolution_transpose_backward_weight(tensor *, int64_t *weight_size_data, int weight_size_len, tensor grad_output, tensor self, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic); +C_API int atg_miopen_depthwise_convolution(tensor *, tensor self, tensor weight, tensor bias, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic); +C_API int atg_miopen_depthwise_convolution_backward_input(tensor *, int64_t *self_size_data, int self_size_len, tensor grad_output, tensor weight, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic); +C_API int atg_miopen_depthwise_convolution_backward_weight(tensor *, int64_t *weight_size_data, int weight_size_len, tensor grad_output, tensor self, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int benchmark, int deterministic); +C_API int atg_miopen_rnn(tensor *, tensor input, tensor *weight_data, int weight_len, int64_t weight_stride0, tensor hx, tensor cx, int64_t mode, int64_t hidden_size, int64_t num_layers, int batch_first, double dropout, int train, int bidirectional, int64_t *batch_sizes_data, int batch_sizes_len, tensor dropout_state); +C_API int atg_mkldnn_adaptive_avg_pool2d(tensor *, tensor self, int64_t *output_size_data, int output_size_len); +C_API int atg_mkldnn_convolution(tensor *, tensor self, tensor weight, tensor bias, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups); +C_API int atg_mkldnn_convolution_backward_input(tensor *, int64_t *self_size_data, int self_size_len, tensor grad_output, tensor weight, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int bias_defined); +C_API int atg_mkldnn_convolution_backward_weights(tensor *, int64_t *weight_size_data, int weight_size_len, tensor grad_output, tensor self, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups, int bias_defined); +C_API int atg_mkldnn_linear(tensor *, tensor input, tensor weight, tensor bias); +C_API int atg_mkldnn_max_pool2d(tensor *, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode); +C_API int atg_mkldnn_reorder_conv2d_weight(tensor *, tensor self, int64_t *padding_data, int padding_len, int64_t *stride_data, int stride_len, int64_t *dilation_data, int dilation_len, int64_t groups); +C_API int atg_mm(tensor *, tensor self, tensor mat2); +C_API int atg_mm_out(tensor *, tensor out, tensor self, tensor mat2); +C_API int atg_mode(tensor *, tensor self, int64_t dim, int keepdim); +C_API int atg_mode_out(tensor *, tensor values, tensor indices, tensor self, int64_t dim, int keepdim); +C_API int atg_mse_loss(tensor *, tensor self, tensor target, int64_t reduction); +C_API int atg_mse_loss_backward(tensor *, tensor grad_output, tensor self, tensor target, int64_t reduction); +C_API int atg_mse_loss_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, tensor target, int64_t reduction); +C_API int atg_mse_loss_out(tensor *, tensor out, tensor self, tensor target, int64_t reduction); +C_API int atg_mul(tensor *, tensor self, tensor other); +C_API int atg_mul1(tensor *, tensor self, scalar other); +C_API int atg_mul_(tensor *, tensor self, tensor other); +C_API int atg_mul_1(tensor *, tensor self, scalar other); +C_API int atg_mul_out(tensor *, tensor out, tensor self, tensor other); +C_API int atg_multi_margin_loss_backward(tensor *, tensor grad_output, tensor self, tensor target, scalar p, scalar margin, tensor weight, int64_t reduction); +C_API int atg_multi_margin_loss_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, tensor target, scalar p, scalar margin, tensor weight, int64_t reduction); +C_API int atg_multilabel_margin_loss(tensor *, tensor self, tensor target, int64_t reduction); +C_API int atg_multilabel_margin_loss_backward(tensor *, tensor grad_output, tensor self, tensor target, int64_t reduction, tensor is_target); +C_API int atg_multilabel_margin_loss_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, tensor target, int64_t reduction, tensor is_target); +C_API int atg_multilabel_margin_loss_out(tensor *, tensor out, tensor self, tensor target, int64_t reduction); +C_API int atg_multinomial(tensor *, tensor self, int64_t num_samples, int replacement); +C_API int atg_multinomial_out(tensor *, tensor out, tensor self, int64_t num_samples, int replacement); +C_API int atg_mv(tensor *, tensor self, tensor vec); +C_API int atg_mv_out(tensor *, tensor out, tensor self, tensor vec); +C_API int atg_mvlgamma(tensor *, tensor self, int64_t p); +C_API int atg_mvlgamma_(tensor *, tensor self, int64_t p); +C_API int atg_narrow(tensor *, tensor self, int64_t dim, int64_t start, int64_t length); +C_API int atg_narrow_copy(tensor *, tensor self, int64_t dim, int64_t start, int64_t length); +C_API int atg_native_batch_norm(tensor *, tensor input, tensor weight, tensor bias, tensor running_mean, tensor running_var, int training, double momentum, double eps); +C_API int atg_native_layer_norm(tensor *, tensor input, tensor weight, tensor bias, int64_t M, int64_t n, double eps); +C_API int atg_native_norm(tensor *, tensor self); +C_API int atg_ne(tensor *, tensor self, scalar other); +C_API int atg_ne1(tensor *, tensor self, tensor other); +C_API int atg_ne_(tensor *, tensor self, scalar other); +C_API int atg_ne_1(tensor *, tensor self, tensor other); +C_API int atg_ne_out(tensor *, tensor out, tensor self, scalar other); +C_API int atg_ne_out1(tensor *, tensor out, tensor self, tensor other); +C_API int atg_neg(tensor *, tensor self); +C_API int atg_neg_(tensor *, tensor self); +C_API int atg_neg_out(tensor *, tensor out, tensor self); +C_API int atg_new_empty(tensor *, tensor self, int64_t *size_data, int size_len, int options_kind, int options_device); +C_API int atg_new_full(tensor *, tensor self, int64_t *size_data, int size_len, scalar fill_value, int options_kind, int options_device); +C_API int atg_new_zeros(tensor *, tensor self, int64_t *size_data, int size_len, int options_kind, int options_device); +C_API int atg_nll_loss(tensor *, tensor self, tensor target, tensor weight, int64_t reduction, int64_t ignore_index); +C_API int atg_nll_loss2d(tensor *, tensor self, tensor target, tensor weight, int64_t reduction, int64_t ignore_index); +C_API int atg_nll_loss2d_backward(tensor *, tensor grad_output, tensor self, tensor target, tensor weight, int64_t reduction, int64_t ignore_index, tensor total_weight); +C_API int atg_nll_loss2d_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, tensor target, tensor weight, int64_t reduction, int64_t ignore_index, tensor total_weight); +C_API int atg_nll_loss2d_out(tensor *, tensor out, tensor self, tensor target, tensor weight, int64_t reduction, int64_t ignore_index); +C_API int atg_nll_loss_backward(tensor *, tensor grad_output, tensor self, tensor target, tensor weight, int64_t reduction, int64_t ignore_index, tensor total_weight); +C_API int atg_nll_loss_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, tensor target, tensor weight, int64_t reduction, int64_t ignore_index, tensor total_weight); +C_API int atg_nll_loss_out(tensor *, tensor out, tensor self, tensor target, tensor weight, int64_t reduction, int64_t ignore_index); +C_API int atg_nonzero(tensor *, tensor self); // tensor *atg_nonzero_numpy(tensor self); -int atg_nonzero_numpy(tensor *, tensor self); -int atg_nonzero_out(tensor *, tensor out, tensor self); -int atg_norm(tensor *, tensor self); -int atg_norm1(tensor *, tensor self, scalar p, int dtype); -int atg_norm2(tensor *, tensor self, scalar p, int64_t *dim_data, int dim_len, int keepdim); -int atg_norm3(tensor *, tensor self, scalar p, int64_t *dim_data, int dim_len, int keepdim, int dtype); -int atg_norm_except_dim(tensor *, tensor v, int64_t pow, int64_t dim); -int atg_norm_out(tensor *, tensor out, tensor self, scalar p, int64_t *dim_data, int dim_len, int keepdim); -int atg_norm_out1(tensor *, tensor out, tensor self, scalar p, int64_t *dim_data, int dim_len, int keepdim, int dtype); -int atg_normal_(tensor *, tensor self, double mean, double std); -int atg_normal_out(tensor *, tensor out, tensor mean, double std); -int atg_normal_out1(tensor *, tensor out, double mean, tensor std); -int atg_normal_out2(tensor *, tensor out, tensor mean, tensor std); -int atg_normal_out3(tensor *, tensor out, double mean, double std, int64_t *size_data, int size_len); -int atg_nuclear_norm(tensor *, tensor self, int keepdim); -int atg_nuclear_norm1(tensor *, tensor self, int64_t *dim_data, int dim_len, int keepdim); -int atg_nuclear_norm_out(tensor *, tensor out, tensor self, int keepdim); -int atg_nuclear_norm_out1(tensor *, tensor out, tensor self, int64_t *dim_data, int dim_len, int keepdim); -int atg_numpy_t(tensor *, tensor self); -int atg_one_hot(tensor *, tensor self, int64_t num_classes); -int atg_ones(tensor *, int64_t *size_data, int size_len, int options_kind, int options_device); -int atg_ones_like(tensor *, tensor self); -int atg_ones_like1(tensor *, tensor self, int options_kind, int options_device); -int atg_ones_out(tensor *, tensor out, int64_t *size_data, int size_len); -int atg_orgqr(tensor *, tensor self, tensor input2); -int atg_orgqr_out(tensor *, tensor out, tensor self, tensor input2); -int atg_ormqr(tensor *, tensor self, tensor input2, tensor input3, int left, int transpose); -int atg_ormqr_out(tensor *, tensor out, tensor self, tensor input2, tensor input3, int left, int transpose); -int atg_pairwise_distance(tensor *, tensor x1, tensor x2, double p, double eps, int keepdim); -int atg_pdist(tensor *, tensor self, double p); -int atg_permute(tensor *, tensor self, int64_t *dims_data, int dims_len); -int atg_pin_memory(tensor *, tensor self); -int atg_pinverse(tensor *, tensor self, double rcond); -int atg_pixel_shuffle(tensor *, tensor self, int64_t upscale_factor); -int atg_poisson(tensor *, tensor self); -int atg_poisson_nll_loss(tensor *, tensor input, tensor target, int log_input, int full, double eps, int64_t reduction); -int atg_polygamma(tensor *, int64_t n, tensor self); -int atg_polygamma_(tensor *, tensor self, int64_t n); -int atg_polygamma_out(tensor *, tensor out, int64_t n, tensor self); -int atg_pow(tensor *, tensor self, scalar exponent); -int atg_pow1(tensor *, tensor self, tensor exponent); -int atg_pow2(tensor *, scalar self, tensor exponent); -int atg_pow_(tensor *, tensor self, scalar exponent); -int atg_pow_1(tensor *, tensor self, tensor exponent); -int atg_pow_out(tensor *, tensor out, tensor self, scalar exponent); -int atg_pow_out1(tensor *, tensor out, tensor self, tensor exponent); -int atg_pow_out2(tensor *, tensor out, scalar self, tensor exponent); -int atg_prelu(tensor *, tensor self, tensor weight); -int atg_prelu_backward(tensor *, tensor grad_output, tensor self, tensor weight); -int atg_prod(tensor *, tensor self, int dtype); -int atg_prod1(tensor *, tensor self, int64_t dim, int keepdim, int dtype); -int atg_prod_out(tensor *, tensor out, tensor self, int64_t dim, int keepdim, int dtype); -int atg_put_(tensor *, tensor self, tensor index, tensor source, int accumulate); -int atg_q_per_channel_scales(tensor *, tensor self); -int atg_q_per_channel_zero_points(tensor *, tensor self); -int atg_qr(tensor *, tensor self, int some); -int atg_qr_out(tensor *, tensor Q, tensor R, tensor self, int some); -int atg_quantize_per_channel(tensor *, tensor self, tensor scales, tensor zero_points, int64_t axis, int dtype); -int atg_quantize_per_tensor(tensor *, tensor self, double scale, int64_t zero_point, int dtype); -int atg_quantized_gru(tensor *, tensor input, tensor hx, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional, int batch_first); -int atg_quantized_gru1(tensor *, tensor data, tensor batch_sizes, tensor hx, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional); -int atg_quantized_gru_cell(tensor *, tensor input, tensor hx, tensor w_ih, tensor w_hh, tensor b_ih, tensor b_hh, tensor packed_ih, tensor packed_hh, tensor col_offsets_ih, tensor col_offsets_hh, scalar scale_ih, scalar scale_hh, scalar zero_point_ih, scalar zero_point_hh); -int atg_quantized_lstm(tensor *, tensor input, tensor *hx_data, int hx_len, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional, int batch_first, int dtype, int use_dynamic); -int atg_quantized_lstm1(tensor *, tensor data, tensor batch_sizes, tensor *hx_data, int hx_len, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional, int dtype, int use_dynamic); -int atg_quantized_lstm_cell(tensor *, tensor input, tensor *hx_data, int hx_len, tensor w_ih, tensor w_hh, tensor b_ih, tensor b_hh, tensor packed_ih, tensor packed_hh, tensor col_offsets_ih, tensor col_offsets_hh, scalar scale_ih, scalar scale_hh, scalar zero_point_ih, scalar zero_point_hh); -int atg_quantized_max_pool2d(tensor *, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode); -int atg_quantized_rnn_relu_cell(tensor *, tensor input, tensor hx, tensor w_ih, tensor w_hh, tensor b_ih, tensor b_hh, tensor packed_ih, tensor packed_hh, tensor col_offsets_ih, tensor col_offsets_hh, scalar scale_ih, scalar scale_hh, scalar zero_point_ih, scalar zero_point_hh); -int atg_quantized_rnn_tanh_cell(tensor *, tensor input, tensor hx, tensor w_ih, tensor w_hh, tensor b_ih, tensor b_hh, tensor packed_ih, tensor packed_hh, tensor col_offsets_ih, tensor col_offsets_hh, scalar scale_ih, scalar scale_hh, scalar zero_point_ih, scalar zero_point_hh); -int atg_rand(tensor *, int64_t *size_data, int size_len, int options_kind, int options_device); -int atg_rand_like(tensor *, tensor self); -int atg_rand_like1(tensor *, tensor self, int options_kind, int options_device); -int atg_rand_out(tensor *, tensor out, int64_t *size_data, int size_len); -int atg_randint(tensor *, int64_t high, int64_t *size_data, int size_len, int options_kind, int options_device); -int atg_randint1(tensor *, int64_t low, int64_t high, int64_t *size_data, int size_len, int options_kind, int options_device); -int atg_randint_like(tensor *, tensor self, int64_t high); -int atg_randint_like1(tensor *, tensor self, int64_t low, int64_t high); -int atg_randint_like2(tensor *, tensor self, int64_t high, int options_kind, int options_device); -int atg_randint_like3(tensor *, tensor self, int64_t low, int64_t high, int options_kind, int options_device); -int atg_randint_out(tensor *, tensor out, int64_t high, int64_t *size_data, int size_len); -int atg_randint_out1(tensor *, tensor out, int64_t low, int64_t high, int64_t *size_data, int size_len); -int atg_randn(tensor *, int64_t *size_data, int size_len, int options_kind, int options_device); -int atg_randn_like(tensor *, tensor self); -int atg_randn_like1(tensor *, tensor self, int options_kind, int options_device); -int atg_randn_out(tensor *, tensor out, int64_t *size_data, int size_len); -int atg_random_(tensor *, tensor self); -int atg_random_1(tensor *, tensor self, int64_t to); -int atg_random_2(tensor *, tensor self, int64_t from, int64_t to); -int atg_randperm(tensor *, int64_t n, int options_kind, int options_device); -int atg_randperm_out(tensor *, tensor out, int64_t n); -int atg_range(tensor *, scalar start, scalar end, int options_kind, int options_device); -int atg_range1(tensor *, scalar start, scalar end, int options_kind, int options_device); -int atg_range_out(tensor *, tensor out, scalar start, scalar end); -int atg_real(tensor *, tensor self); -int atg_real_out(tensor *, tensor out, tensor self); -int atg_reciprocal(tensor *, tensor self); -int atg_reciprocal_(tensor *, tensor self); -int atg_reciprocal_out(tensor *, tensor out, tensor self); -int atg_reflection_pad1d(tensor *, tensor self, int64_t *padding_data, int padding_len); -int atg_reflection_pad1d_backward(tensor *, tensor grad_output, tensor self, int64_t *padding_data, int padding_len); -int atg_reflection_pad1d_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, int64_t *padding_data, int padding_len); -int atg_reflection_pad1d_out(tensor *, tensor out, tensor self, int64_t *padding_data, int padding_len); -int atg_reflection_pad2d(tensor *, tensor self, int64_t *padding_data, int padding_len); -int atg_reflection_pad2d_backward(tensor *, tensor grad_output, tensor self, int64_t *padding_data, int padding_len); -int atg_reflection_pad2d_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, int64_t *padding_data, int padding_len); -int atg_reflection_pad2d_out(tensor *, tensor out, tensor self, int64_t *padding_data, int padding_len); -int atg_relu(tensor *, tensor self); -int atg_relu_(tensor *, tensor self); -int atg_remainder(tensor *, tensor self, scalar other); -int atg_remainder1(tensor *, tensor self, tensor other); -int atg_remainder_(tensor *, tensor self, scalar other); -int atg_remainder_1(tensor *, tensor self, tensor other); -int atg_remainder_out(tensor *, tensor out, tensor self, scalar other); -int atg_remainder_out1(tensor *, tensor out, tensor self, tensor other); -int atg_renorm(tensor *, tensor self, scalar p, int64_t dim, scalar maxnorm); -int atg_renorm_(tensor *, tensor self, scalar p, int64_t dim, scalar maxnorm); -int atg_renorm_out(tensor *, tensor out, tensor self, scalar p, int64_t dim, scalar maxnorm); -int atg_repeat(tensor *, tensor self, int64_t *repeats_data, int repeats_len); -int atg_repeat_interleave(tensor *, tensor repeats); -int atg_repeat_interleave1(tensor *, tensor self, tensor repeats, int64_t dim); -int atg_repeat_interleave2(tensor *, tensor self, int64_t repeats, int64_t dim); -int atg_replication_pad1d(tensor *, tensor self, int64_t *padding_data, int padding_len); -int atg_replication_pad1d_backward(tensor *, tensor grad_output, tensor self, int64_t *padding_data, int padding_len); -int atg_replication_pad1d_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, int64_t *padding_data, int padding_len); -int atg_replication_pad1d_out(tensor *, tensor out, tensor self, int64_t *padding_data, int padding_len); -int atg_replication_pad2d(tensor *, tensor self, int64_t *padding_data, int padding_len); -int atg_replication_pad2d_backward(tensor *, tensor grad_output, tensor self, int64_t *padding_data, int padding_len); -int atg_replication_pad2d_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, int64_t *padding_data, int padding_len); -int atg_replication_pad2d_out(tensor *, tensor out, tensor self, int64_t *padding_data, int padding_len); -int atg_replication_pad3d(tensor *, tensor self, int64_t *padding_data, int padding_len); -int atg_replication_pad3d_backward(tensor *, tensor grad_output, tensor self, int64_t *padding_data, int padding_len); -int atg_replication_pad3d_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, int64_t *padding_data, int padding_len); -int atg_replication_pad3d_out(tensor *, tensor out, tensor self, int64_t *padding_data, int padding_len); -int atg_requires_grad_(tensor *, tensor self, int _requires_grad); -int atg_reshape(tensor *, tensor self, int64_t *shape_data, int shape_len); -int atg_reshape_as(tensor *, tensor self, tensor other); -int atg_resize_(tensor *, tensor self, int64_t *size_data, int size_len); -int atg_resize_as_(tensor *, tensor self, tensor the_template); -int atg_rfft(tensor *, tensor self, int64_t signal_ndim, int normalized, int onesided); -int atg_rnn_relu(tensor *, tensor input, tensor hx, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional, int batch_first); -int atg_rnn_relu1(tensor *, tensor data, tensor batch_sizes, tensor hx, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional); -int atg_rnn_relu_cell(tensor *, tensor input, tensor hx, tensor w_ih, tensor w_hh, tensor b_ih, tensor b_hh); -int atg_rnn_tanh(tensor *, tensor input, tensor hx, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional, int batch_first); -int atg_rnn_tanh1(tensor *, tensor data, tensor batch_sizes, tensor hx, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional); -int atg_rnn_tanh_cell(tensor *, tensor input, tensor hx, tensor w_ih, tensor w_hh, tensor b_ih, tensor b_hh); -int atg_roll(tensor *, tensor self, int64_t *shifts_data, int shifts_len, int64_t *dims_data, int dims_len); -int atg_rot90(tensor *, tensor self, int64_t k, int64_t *dims_data, int dims_len); -int atg_round(tensor *, tensor self); -int atg_round_(tensor *, tensor self); -int atg_round_out(tensor *, tensor out, tensor self); -int atg_rrelu(tensor *, tensor self, int training); -int atg_rrelu_(tensor *, tensor self, int training); -int atg_rrelu_with_noise(tensor *, tensor self, tensor noise, int training); -int atg_rrelu_with_noise_(tensor *, tensor self, tensor noise, int training); -int atg_rrelu_with_noise_backward(tensor *, tensor grad_output, tensor self, tensor noise, scalar lower, scalar upper, int training); -int atg_rrelu_with_noise_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, tensor noise, scalar lower, scalar upper, int training); -int atg_rrelu_with_noise_out(tensor *, tensor out, tensor self, tensor noise, int training); -int atg_rsqrt(tensor *, tensor self); -int atg_rsqrt_(tensor *, tensor self); -int atg_rsqrt_out(tensor *, tensor out, tensor self); -int atg_rsub(tensor *, tensor self, tensor other); -int atg_rsub1(tensor *, tensor self, scalar other); -int atg_scalar_tensor(tensor *, scalar s, int options_kind, int options_device); -int atg_scatter(tensor *, tensor self, int64_t dim, tensor index, tensor src); -int atg_scatter1(tensor *, tensor self, int64_t dim, tensor index, scalar value); -int atg_scatter_(tensor *, tensor self, int64_t dim, tensor index, tensor src); -int atg_scatter_1(tensor *, tensor self, int64_t dim, tensor index, scalar value); -int atg_scatter_add(tensor *, tensor self, int64_t dim, tensor index, tensor src); -int atg_scatter_add_(tensor *, tensor self, int64_t dim, tensor index, tensor src); -int atg_select(tensor *, tensor self, int64_t dim, int64_t index); -int atg_selu(tensor *, tensor self); -int atg_selu_(tensor *, tensor self); -int atg_set_(tensor *, tensor self); -int atg_set_1(tensor *, tensor self, tensor source); -int atg_set_requires_grad(tensor *, tensor self, int r); -int atg_sigmoid(tensor *, tensor self); -int atg_sigmoid_(tensor *, tensor self); -int atg_sigmoid_backward(tensor *, tensor grad_output, tensor output); -int atg_sigmoid_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor output); -int atg_sigmoid_out(tensor *, tensor out, tensor self); -int atg_sign(tensor *, tensor self); -int atg_sign_(tensor *, tensor self); -int atg_sign_out(tensor *, tensor out, tensor self); -int atg_sin(tensor *, tensor self); -int atg_sin_(tensor *, tensor self); -int atg_sin_out(tensor *, tensor out, tensor self); -int atg_sinh(tensor *, tensor self); -int atg_sinh_(tensor *, tensor self); -int atg_sinh_out(tensor *, tensor out, tensor self); -int atg_slice(tensor *, tensor self, int64_t dim, int64_t start, int64_t end, int64_t step); -int atg_slogdet(tensor *, tensor self); -int atg_slow_conv3d(tensor *, tensor self, tensor weight, int64_t *kernel_size_data, int kernel_size_len, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len); -int atg_slow_conv3d_out(tensor *, tensor out, tensor self, tensor weight, int64_t *kernel_size_data, int kernel_size_len, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len); -int atg_slow_conv_dilated2d(tensor *, tensor self, tensor weight, int64_t *kernel_size_data, int kernel_size_len, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len); -int atg_slow_conv_dilated3d(tensor *, tensor self, tensor weight, int64_t *kernel_size_data, int kernel_size_len, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len); -int atg_slow_conv_transpose2d(tensor *, tensor self, tensor weight, int64_t *kernel_size_data, int kernel_size_len, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *output_padding_data, int output_padding_len, int64_t *dilation_data, int dilation_len); -int atg_slow_conv_transpose2d_out(tensor *, tensor out, tensor self, tensor weight, int64_t *kernel_size_data, int kernel_size_len, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *output_padding_data, int output_padding_len, int64_t *dilation_data, int dilation_len); -int atg_slow_conv_transpose3d(tensor *, tensor self, tensor weight, int64_t *kernel_size_data, int kernel_size_len, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *output_padding_data, int output_padding_len, int64_t *dilation_data, int dilation_len); -int atg_slow_conv_transpose3d_out(tensor *, tensor out, tensor self, tensor weight, int64_t *kernel_size_data, int kernel_size_len, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *output_padding_data, int output_padding_len, int64_t *dilation_data, int dilation_len); -int atg_smm(tensor *, tensor self, tensor mat2); -int atg_smooth_l1_loss(tensor *, tensor self, tensor target, int64_t reduction); -int atg_smooth_l1_loss_backward(tensor *, tensor grad_output, tensor self, tensor target, int64_t reduction); -int atg_smooth_l1_loss_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, tensor target, int64_t reduction); -int atg_smooth_l1_loss_out(tensor *, tensor out, tensor self, tensor target, int64_t reduction); -int atg_soft_margin_loss(tensor *, tensor self, tensor target, int64_t reduction); -int atg_soft_margin_loss_backward(tensor *, tensor grad_output, tensor self, tensor target, int64_t reduction); -int atg_soft_margin_loss_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, tensor target, int64_t reduction); -int atg_soft_margin_loss_out(tensor *, tensor out, tensor self, tensor target, int64_t reduction); -int atg_softmax(tensor *, tensor self, int64_t dim, int dtype); -int atg_softplus(tensor *, tensor self); -int atg_softplus_backward(tensor *, tensor grad_output, tensor self, scalar beta, scalar threshold, tensor output); -int atg_softplus_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, scalar beta, scalar threshold, tensor output); -int atg_softplus_out(tensor *, tensor out, tensor self); -int atg_softshrink(tensor *, tensor self); -int atg_softshrink_backward(tensor *, tensor grad_output, tensor self, scalar lambd); -int atg_softshrink_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, scalar lambd); -int atg_softshrink_out(tensor *, tensor out, tensor self); -int atg_solve(tensor *, tensor self, tensor A); -int atg_solve_out(tensor *, tensor solution, tensor lu, tensor self, tensor A); -int atg_sort(tensor *, tensor self, int64_t dim, int descending); -int atg_sort_out(tensor *, tensor values, tensor indices, tensor self, int64_t dim, int descending); -int atg_sparse_coo_tensor(tensor *, int64_t *size_data, int size_len, int options_kind, int options_device); -int atg_sparse_coo_tensor1(tensor *, tensor indices, tensor values, int options_kind, int options_device); -int atg_sparse_coo_tensor2(tensor *, tensor indices, tensor values, int64_t *size_data, int size_len, int options_kind, int options_device); -int atg_sparse_mask(tensor *, tensor self, tensor mask); -int atg_sparse_resize_(tensor *, tensor self, int64_t *size_data, int size_len, int64_t sparse_dim, int64_t dense_dim); -int atg_sparse_resize_and_clear_(tensor *, tensor self, int64_t *size_data, int size_len, int64_t sparse_dim, int64_t dense_dim); +C_API int atg_nonzero_numpy(tensor *, tensor self); +C_API int atg_nonzero_out(tensor *, tensor out, tensor self); +C_API int atg_norm(tensor *, tensor self); +C_API int atg_norm1(tensor *, tensor self, scalar p, int dtype); +C_API int atg_norm2(tensor *, tensor self, scalar p, int64_t *dim_data, int dim_len, int keepdim); +C_API int atg_norm3(tensor *, tensor self, scalar p, int64_t *dim_data, int dim_len, int keepdim, int dtype); +C_API int atg_norm_except_dim(tensor *, tensor v, int64_t pow, int64_t dim); +C_API int atg_norm_out(tensor *, tensor out, tensor self, scalar p, int64_t *dim_data, int dim_len, int keepdim); +C_API int atg_norm_out1(tensor *, tensor out, tensor self, scalar p, int64_t *dim_data, int dim_len, int keepdim, int dtype); +C_API int atg_normal_(tensor *, tensor self, double mean, double std); +C_API int atg_normal_out(tensor *, tensor out, tensor mean, double std); +C_API int atg_normal_out1(tensor *, tensor out, double mean, tensor std); +C_API int atg_normal_out2(tensor *, tensor out, tensor mean, tensor std); +C_API int atg_normal_out3(tensor *, tensor out, double mean, double std, int64_t *size_data, int size_len); +C_API int atg_nuclear_norm(tensor *, tensor self, int keepdim); +C_API int atg_nuclear_norm1(tensor *, tensor self, int64_t *dim_data, int dim_len, int keepdim); +C_API int atg_nuclear_norm_out(tensor *, tensor out, tensor self, int keepdim); +C_API int atg_nuclear_norm_out1(tensor *, tensor out, tensor self, int64_t *dim_data, int dim_len, int keepdim); +C_API int atg_numpy_t(tensor *, tensor self); +C_API int atg_one_hot(tensor *, tensor self, int64_t num_classes); +C_API int atg_ones(tensor *, int64_t *size_data, int size_len, int options_kind, int options_device); +C_API int atg_ones_like(tensor *, tensor self); +C_API int atg_ones_like1(tensor *, tensor self, int options_kind, int options_device); +C_API int atg_ones_out(tensor *, tensor out, int64_t *size_data, int size_len); +C_API int atg_orgqr(tensor *, tensor self, tensor input2); +C_API int atg_orgqr_out(tensor *, tensor out, tensor self, tensor input2); +C_API int atg_ormqr(tensor *, tensor self, tensor input2, tensor input3, int left, int transpose); +C_API int atg_ormqr_out(tensor *, tensor out, tensor self, tensor input2, tensor input3, int left, int transpose); +C_API int atg_pairwise_distance(tensor *, tensor x1, tensor x2, double p, double eps, int keepdim); +C_API int atg_pdist(tensor *, tensor self, double p); +C_API int atg_permute(tensor *, tensor self, int64_t *dims_data, int dims_len); +C_API int atg_pin_memory(tensor *, tensor self); +C_API int atg_pinverse(tensor *, tensor self, double rcond); +C_API int atg_pixel_shuffle(tensor *, tensor self, int64_t upscale_factor); +C_API int atg_poisson(tensor *, tensor self); +C_API int atg_poisson_nll_loss(tensor *, tensor input, tensor target, int log_input, int full, double eps, int64_t reduction); +C_API int atg_polygamma(tensor *, int64_t n, tensor self); +C_API int atg_polygamma_(tensor *, tensor self, int64_t n); +C_API int atg_polygamma_out(tensor *, tensor out, int64_t n, tensor self); +C_API int atg_pow(tensor *, tensor self, scalar exponent); +C_API int atg_pow1(tensor *, tensor self, tensor exponent); +C_API int atg_pow2(tensor *, scalar self, tensor exponent); +C_API int atg_pow_(tensor *, tensor self, scalar exponent); +C_API int atg_pow_1(tensor *, tensor self, tensor exponent); +C_API int atg_pow_out(tensor *, tensor out, tensor self, scalar exponent); +C_API int atg_pow_out1(tensor *, tensor out, tensor self, tensor exponent); +C_API int atg_pow_out2(tensor *, tensor out, scalar self, tensor exponent); +C_API int atg_prelu(tensor *, tensor self, tensor weight); +C_API int atg_prelu_backward(tensor *, tensor grad_output, tensor self, tensor weight); +C_API int atg_prod(tensor *, tensor self, int dtype); +C_API int atg_prod1(tensor *, tensor self, int64_t dim, int keepdim, int dtype); +C_API int atg_prod_out(tensor *, tensor out, tensor self, int64_t dim, int keepdim, int dtype); +C_API int atg_put_(tensor *, tensor self, tensor index, tensor source, int accumulate); +C_API int atg_q_per_channel_scales(tensor *, tensor self); +C_API int atg_q_per_channel_zero_points(tensor *, tensor self); +C_API int atg_qr(tensor *, tensor self, int some); +C_API int atg_qr_out(tensor *, tensor Q, tensor R, tensor self, int some); +C_API int atg_quantize_per_channel(tensor *, tensor self, tensor scales, tensor zero_points, int64_t axis, int dtype); +C_API int atg_quantize_per_tensor(tensor *, tensor self, double scale, int64_t zero_point, int dtype); +C_API int atg_quantized_gru(tensor *, tensor input, tensor hx, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional, int batch_first); +C_API int atg_quantized_gru1(tensor *, tensor data, tensor batch_sizes, tensor hx, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional); +C_API int atg_quantized_gru_cell(tensor *, tensor input, tensor hx, tensor w_ih, tensor w_hh, tensor b_ih, tensor b_hh, tensor packed_ih, tensor packed_hh, tensor col_offsets_ih, tensor col_offsets_hh, scalar scale_ih, scalar scale_hh, scalar zero_point_ih, scalar zero_point_hh); +C_API int atg_quantized_lstm(tensor *, tensor input, tensor *hx_data, int hx_len, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional, int batch_first, int dtype, int use_dynamic); +C_API int atg_quantized_lstm1(tensor *, tensor data, tensor batch_sizes, tensor *hx_data, int hx_len, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional, int dtype, int use_dynamic); +C_API int atg_quantized_lstm_cell(tensor *, tensor input, tensor *hx_data, int hx_len, tensor w_ih, tensor w_hh, tensor b_ih, tensor b_hh, tensor packed_ih, tensor packed_hh, tensor col_offsets_ih, tensor col_offsets_hh, scalar scale_ih, scalar scale_hh, scalar zero_point_ih, scalar zero_point_hh); +C_API int atg_quantized_max_pool2d(tensor *, tensor self, int64_t *kernel_size_data, int kernel_size_len, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len, int ceil_mode); +C_API int atg_quantized_rnn_relu_cell(tensor *, tensor input, tensor hx, tensor w_ih, tensor w_hh, tensor b_ih, tensor b_hh, tensor packed_ih, tensor packed_hh, tensor col_offsets_ih, tensor col_offsets_hh, scalar scale_ih, scalar scale_hh, scalar zero_point_ih, scalar zero_point_hh); +C_API int atg_quantized_rnn_tanh_cell(tensor *, tensor input, tensor hx, tensor w_ih, tensor w_hh, tensor b_ih, tensor b_hh, tensor packed_ih, tensor packed_hh, tensor col_offsets_ih, tensor col_offsets_hh, scalar scale_ih, scalar scale_hh, scalar zero_point_ih, scalar zero_point_hh); +C_API int atg_rand(tensor *, int64_t *size_data, int size_len, int options_kind, int options_device); +C_API int atg_rand_like(tensor *, tensor self); +C_API int atg_rand_like1(tensor *, tensor self, int options_kind, int options_device); +C_API int atg_rand_out(tensor *, tensor out, int64_t *size_data, int size_len); +C_API int atg_randint(tensor *, int64_t high, int64_t *size_data, int size_len, int options_kind, int options_device); +C_API int atg_randint1(tensor *, int64_t low, int64_t high, int64_t *size_data, int size_len, int options_kind, int options_device); +C_API int atg_randint_like(tensor *, tensor self, int64_t high); +C_API int atg_randint_like1(tensor *, tensor self, int64_t low, int64_t high); +C_API int atg_randint_like2(tensor *, tensor self, int64_t high, int options_kind, int options_device); +C_API int atg_randint_like3(tensor *, tensor self, int64_t low, int64_t high, int options_kind, int options_device); +C_API int atg_randint_out(tensor *, tensor out, int64_t high, int64_t *size_data, int size_len); +C_API int atg_randint_out1(tensor *, tensor out, int64_t low, int64_t high, int64_t *size_data, int size_len); +C_API int atg_randn(tensor *, int64_t *size_data, int size_len, int options_kind, int options_device); +C_API int atg_randn_like(tensor *, tensor self); +C_API int atg_randn_like1(tensor *, tensor self, int options_kind, int options_device); +C_API int atg_randn_out(tensor *, tensor out, int64_t *size_data, int size_len); +C_API int atg_random_(tensor *, tensor self); +C_API int atg_random_1(tensor *, tensor self, int64_t to); +C_API int atg_random_2(tensor *, tensor self, int64_t from, int64_t to); +C_API int atg_randperm(tensor *, int64_t n, int options_kind, int options_device); +C_API int atg_randperm_out(tensor *, tensor out, int64_t n); +C_API int atg_range(tensor *, scalar start, scalar end, int options_kind, int options_device); +C_API int atg_range1(tensor *, scalar start, scalar end, int options_kind, int options_device); +C_API int atg_range_out(tensor *, tensor out, scalar start, scalar end); +C_API int atg_real(tensor *, tensor self); +C_API int atg_real_out(tensor *, tensor out, tensor self); +C_API int atg_reciprocal(tensor *, tensor self); +C_API int atg_reciprocal_(tensor *, tensor self); +C_API int atg_reciprocal_out(tensor *, tensor out, tensor self); +C_API int atg_reflection_pad1d(tensor *, tensor self, int64_t *padding_data, int padding_len); +C_API int atg_reflection_pad1d_backward(tensor *, tensor grad_output, tensor self, int64_t *padding_data, int padding_len); +C_API int atg_reflection_pad1d_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, int64_t *padding_data, int padding_len); +C_API int atg_reflection_pad1d_out(tensor *, tensor out, tensor self, int64_t *padding_data, int padding_len); +C_API int atg_reflection_pad2d(tensor *, tensor self, int64_t *padding_data, int padding_len); +C_API int atg_reflection_pad2d_backward(tensor *, tensor grad_output, tensor self, int64_t *padding_data, int padding_len); +C_API int atg_reflection_pad2d_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, int64_t *padding_data, int padding_len); +C_API int atg_reflection_pad2d_out(tensor *, tensor out, tensor self, int64_t *padding_data, int padding_len); +C_API int atg_relu(tensor *, tensor self); +C_API int atg_relu_(tensor *, tensor self); +C_API int atg_remainder(tensor *, tensor self, scalar other); +C_API int atg_remainder1(tensor *, tensor self, tensor other); +C_API int atg_remainder_(tensor *, tensor self, scalar other); +C_API int atg_remainder_1(tensor *, tensor self, tensor other); +C_API int atg_remainder_out(tensor *, tensor out, tensor self, scalar other); +C_API int atg_remainder_out1(tensor *, tensor out, tensor self, tensor other); +C_API int atg_renorm(tensor *, tensor self, scalar p, int64_t dim, scalar maxnorm); +C_API int atg_renorm_(tensor *, tensor self, scalar p, int64_t dim, scalar maxnorm); +C_API int atg_renorm_out(tensor *, tensor out, tensor self, scalar p, int64_t dim, scalar maxnorm); +C_API int atg_repeat(tensor *, tensor self, int64_t *repeats_data, int repeats_len); +C_API int atg_repeat_interleave(tensor *, tensor repeats); +C_API int atg_repeat_interleave1(tensor *, tensor self, tensor repeats, int64_t dim); +C_API int atg_repeat_interleave2(tensor *, tensor self, int64_t repeats, int64_t dim); +C_API int atg_replication_pad1d(tensor *, tensor self, int64_t *padding_data, int padding_len); +C_API int atg_replication_pad1d_backward(tensor *, tensor grad_output, tensor self, int64_t *padding_data, int padding_len); +C_API int atg_replication_pad1d_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, int64_t *padding_data, int padding_len); +C_API int atg_replication_pad1d_out(tensor *, tensor out, tensor self, int64_t *padding_data, int padding_len); +C_API int atg_replication_pad2d(tensor *, tensor self, int64_t *padding_data, int padding_len); +C_API int atg_replication_pad2d_backward(tensor *, tensor grad_output, tensor self, int64_t *padding_data, int padding_len); +C_API int atg_replication_pad2d_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, int64_t *padding_data, int padding_len); +C_API int atg_replication_pad2d_out(tensor *, tensor out, tensor self, int64_t *padding_data, int padding_len); +C_API int atg_replication_pad3d(tensor *, tensor self, int64_t *padding_data, int padding_len); +C_API int atg_replication_pad3d_backward(tensor *, tensor grad_output, tensor self, int64_t *padding_data, int padding_len); +C_API int atg_replication_pad3d_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, int64_t *padding_data, int padding_len); +C_API int atg_replication_pad3d_out(tensor *, tensor out, tensor self, int64_t *padding_data, int padding_len); +C_API int atg_requires_grad_(tensor *, tensor self, int _requires_grad); +C_API int atg_reshape(tensor *, tensor self, int64_t *shape_data, int shape_len); +C_API int atg_reshape_as(tensor *, tensor self, tensor other); +C_API int atg_resize_(tensor *, tensor self, int64_t *size_data, int size_len); +C_API int atg_resize_as_(tensor *, tensor self, tensor the_template); +C_API int atg_rfft(tensor *, tensor self, int64_t signal_ndim, int normalized, int onesided); +C_API int atg_rnn_relu(tensor *, tensor input, tensor hx, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional, int batch_first); +C_API int atg_rnn_relu1(tensor *, tensor data, tensor batch_sizes, tensor hx, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional); +C_API int atg_rnn_relu_cell(tensor *, tensor input, tensor hx, tensor w_ih, tensor w_hh, tensor b_ih, tensor b_hh); +C_API int atg_rnn_tanh(tensor *, tensor input, tensor hx, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional, int batch_first); +C_API int atg_rnn_tanh1(tensor *, tensor data, tensor batch_sizes, tensor hx, tensor *params_data, int params_len, int has_biases, int64_t num_layers, double dropout, int train, int bidirectional); +C_API int atg_rnn_tanh_cell(tensor *, tensor input, tensor hx, tensor w_ih, tensor w_hh, tensor b_ih, tensor b_hh); +C_API int atg_roll(tensor *, tensor self, int64_t *shifts_data, int shifts_len, int64_t *dims_data, int dims_len); +C_API int atg_rot90(tensor *, tensor self, int64_t k, int64_t *dims_data, int dims_len); +C_API int atg_round(tensor *, tensor self); +C_API int atg_round_(tensor *, tensor self); +C_API int atg_round_out(tensor *, tensor out, tensor self); +C_API int atg_rrelu(tensor *, tensor self, int training); +C_API int atg_rrelu_(tensor *, tensor self, int training); +C_API int atg_rrelu_with_noise(tensor *, tensor self, tensor noise, int training); +C_API int atg_rrelu_with_noise_(tensor *, tensor self, tensor noise, int training); +C_API int atg_rrelu_with_noise_backward(tensor *, tensor grad_output, tensor self, tensor noise, scalar lower, scalar upper, int training); +C_API int atg_rrelu_with_noise_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, tensor noise, scalar lower, scalar upper, int training); +C_API int atg_rrelu_with_noise_out(tensor *, tensor out, tensor self, tensor noise, int training); +C_API int atg_rsqrt(tensor *, tensor self); +C_API int atg_rsqrt_(tensor *, tensor self); +C_API int atg_rsqrt_out(tensor *, tensor out, tensor self); +C_API int atg_rsub(tensor *, tensor self, tensor other); +C_API int atg_rsub1(tensor *, tensor self, scalar other); +C_API int atg_scalar_tensor(tensor *, scalar s, int options_kind, int options_device); +C_API int atg_scatter(tensor *, tensor self, int64_t dim, tensor index, tensor src); +C_API int atg_scatter1(tensor *, tensor self, int64_t dim, tensor index, scalar value); +C_API int atg_scatter_(tensor *, tensor self, int64_t dim, tensor index, tensor src); +C_API int atg_scatter_1(tensor *, tensor self, int64_t dim, tensor index, scalar value); +C_API int atg_scatter_add(tensor *, tensor self, int64_t dim, tensor index, tensor src); +C_API int atg_scatter_add_(tensor *, tensor self, int64_t dim, tensor index, tensor src); +C_API int atg_select(tensor *, tensor self, int64_t dim, int64_t index); +C_API int atg_selu(tensor *, tensor self); +C_API int atg_selu_(tensor *, tensor self); +C_API int atg_set_(tensor *, tensor self); +C_API int atg_set_1(tensor *, tensor self, tensor source); +C_API int atg_set_requires_grad(tensor *, tensor self, int r); +C_API int atg_sigmoid(tensor *, tensor self); +C_API int atg_sigmoid_(tensor *, tensor self); +C_API int atg_sigmoid_backward(tensor *, tensor grad_output, tensor output); +C_API int atg_sigmoid_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor output); +C_API int atg_sigmoid_out(tensor *, tensor out, tensor self); +C_API int atg_sign(tensor *, tensor self); +C_API int atg_sign_(tensor *, tensor self); +C_API int atg_sign_out(tensor *, tensor out, tensor self); +C_API int atg_sin(tensor *, tensor self); +C_API int atg_sin_(tensor *, tensor self); +C_API int atg_sin_out(tensor *, tensor out, tensor self); +C_API int atg_sinh(tensor *, tensor self); +C_API int atg_sinh_(tensor *, tensor self); +C_API int atg_sinh_out(tensor *, tensor out, tensor self); +C_API int atg_slice(tensor *, tensor self, int64_t dim, int64_t start, int64_t end, int64_t step); +C_API int atg_slogdet(tensor *, tensor self); +C_API int atg_slow_conv3d(tensor *, tensor self, tensor weight, int64_t *kernel_size_data, int kernel_size_len, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len); +C_API int atg_slow_conv3d_out(tensor *, tensor out, tensor self, tensor weight, int64_t *kernel_size_data, int kernel_size_len, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len); +C_API int atg_slow_conv_dilated2d(tensor *, tensor self, tensor weight, int64_t *kernel_size_data, int kernel_size_len, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len); +C_API int atg_slow_conv_dilated3d(tensor *, tensor self, tensor weight, int64_t *kernel_size_data, int kernel_size_len, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *dilation_data, int dilation_len); +C_API int atg_slow_conv_transpose2d(tensor *, tensor self, tensor weight, int64_t *kernel_size_data, int kernel_size_len, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *output_padding_data, int output_padding_len, int64_t *dilation_data, int dilation_len); +C_API int atg_slow_conv_transpose2d_out(tensor *, tensor out, tensor self, tensor weight, int64_t *kernel_size_data, int kernel_size_len, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *output_padding_data, int output_padding_len, int64_t *dilation_data, int dilation_len); +C_API int atg_slow_conv_transpose3d(tensor *, tensor self, tensor weight, int64_t *kernel_size_data, int kernel_size_len, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *output_padding_data, int output_padding_len, int64_t *dilation_data, int dilation_len); +C_API int atg_slow_conv_transpose3d_out(tensor *, tensor out, tensor self, tensor weight, int64_t *kernel_size_data, int kernel_size_len, tensor bias, int64_t *stride_data, int stride_len, int64_t *padding_data, int padding_len, int64_t *output_padding_data, int output_padding_len, int64_t *dilation_data, int dilation_len); +C_API int atg_smm(tensor *, tensor self, tensor mat2); +C_API int atg_smooth_l1_loss(tensor *, tensor self, tensor target, int64_t reduction); +C_API int atg_smooth_l1_loss_backward(tensor *, tensor grad_output, tensor self, tensor target, int64_t reduction); +C_API int atg_smooth_l1_loss_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, tensor target, int64_t reduction); +C_API int atg_smooth_l1_loss_out(tensor *, tensor out, tensor self, tensor target, int64_t reduction); +C_API int atg_soft_margin_loss(tensor *, tensor self, tensor target, int64_t reduction); +C_API int atg_soft_margin_loss_backward(tensor *, tensor grad_output, tensor self, tensor target, int64_t reduction); +C_API int atg_soft_margin_loss_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, tensor target, int64_t reduction); +C_API int atg_soft_margin_loss_out(tensor *, tensor out, tensor self, tensor target, int64_t reduction); +C_API int atg_softmax(tensor *, tensor self, int64_t dim, int dtype); +C_API int atg_softplus(tensor *, tensor self); +C_API int atg_softplus_backward(tensor *, tensor grad_output, tensor self, scalar beta, scalar threshold, tensor output); +C_API int atg_softplus_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, scalar beta, scalar threshold, tensor output); +C_API int atg_softplus_out(tensor *, tensor out, tensor self); +C_API int atg_softshrink(tensor *, tensor self); +C_API int atg_softshrink_backward(tensor *, tensor grad_output, tensor self, scalar lambd); +C_API int atg_softshrink_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor self, scalar lambd); +C_API int atg_softshrink_out(tensor *, tensor out, tensor self); +C_API int atg_solve(tensor *, tensor self, tensor A); +C_API int atg_solve_out(tensor *, tensor solution, tensor lu, tensor self, tensor A); +C_API int atg_sort(tensor *, tensor self, int64_t dim, int descending); +C_API int atg_sort_out(tensor *, tensor values, tensor indices, tensor self, int64_t dim, int descending); +C_API int atg_sparse_coo_tensor(tensor *, int64_t *size_data, int size_len, int options_kind, int options_device); +C_API int atg_sparse_coo_tensor1(tensor *, tensor indices, tensor values, int options_kind, int options_device); +C_API int atg_sparse_coo_tensor2(tensor *, tensor indices, tensor values, int64_t *size_data, int size_len, int options_kind, int options_device); +C_API int atg_sparse_mask(tensor *, tensor self, tensor mask); +C_API int atg_sparse_resize_(tensor *, tensor self, int64_t *size_data, int size_len, int64_t sparse_dim, int64_t dense_dim); +C_API int atg_sparse_resize_and_clear_(tensor *, tensor self, int64_t *size_data, int size_len, int64_t sparse_dim, int64_t dense_dim); // tensor *atg_split(tensor self, int64_t split_size, int64_t dim); -int atg_split(tensor *, tensor self, int64_t split_size, int64_t dim); +C_API int atg_split(tensor *, tensor self, int64_t split_size, int64_t dim); // tensor *atg_split_with_sizes(tensor self, int64_t *split_sizes_data, int split_sizes_len, int64_t dim); -int atg_split_with_sizes(tensor *, tensor self, int64_t *split_sizes_data, int split_sizes_len, int64_t dim); -int atg_sqrt(tensor *, tensor self); -int atg_sqrt_(tensor *, tensor self); -int atg_sqrt_out(tensor *, tensor out, tensor self); -int atg_squeeze(tensor *, tensor self); -int atg_squeeze1(tensor *, tensor self, int64_t dim); -int atg_squeeze_(tensor *, tensor self); -int atg_squeeze_1(tensor *, tensor self, int64_t dim); -int atg_sspaddmm(tensor *, tensor self, tensor mat1, tensor mat2); -int atg_sspaddmm_out(tensor *, tensor out, tensor self, tensor mat1, tensor mat2); -int atg_stack(tensor *, tensor *tensors_data, int tensors_len, int64_t dim); -int atg_stack_out(tensor *, tensor out, tensor *tensors_data, int tensors_len, int64_t dim); -int atg_std(tensor *, tensor self, int unbiased); -int atg_std1(tensor *, tensor self, int64_t *dim_data, int dim_len, int unbiased, int keepdim); -int atg_std_mean(tensor *, tensor self, int unbiased); -int atg_std_mean1(tensor *, tensor self, int64_t *dim_data, int dim_len, int unbiased, int keepdim); -int atg_std_out(tensor *, tensor out, tensor self, int64_t *dim_data, int dim_len, int unbiased, int keepdim); -int atg_stft(tensor *, tensor self, int64_t n_fft, int64_t hop_length, int64_t win_length, tensor window, int normalized, int onesided); -int atg_sub(tensor *, tensor self, tensor other); -int atg_sub1(tensor *, tensor self, scalar other); -int atg_sub_(tensor *, tensor self, tensor other); -int atg_sub_1(tensor *, tensor self, scalar other); -int atg_sub_out(tensor *, tensor out, tensor self, tensor other); -int atg_sum(tensor *, tensor self, int dtype); -int atg_sum1(tensor *, tensor self, int64_t *dim_data, int dim_len, int keepdim, int dtype); -int atg_sum_out(tensor *, tensor out, tensor self, int64_t *dim_data, int dim_len, int keepdim, int dtype); -int atg_sum_to_size(tensor *, tensor self, int64_t *size_data, int size_len); -int atg_svd(tensor *, tensor self, int some, int compute_uv); -int atg_svd_out(tensor *, tensor U, tensor S, tensor V, tensor self, int some, int compute_uv); -int atg_symeig(tensor *, tensor self, int eigenvectors, int upper); -int atg_symeig_out(tensor *, tensor e, tensor V, tensor self, int eigenvectors, int upper); -int atg_t(tensor *, tensor self); -int atg_t_(tensor *, tensor self); -int atg_take(tensor *, tensor self, tensor index); -int atg_take_out(tensor *, tensor out, tensor self, tensor index); -int atg_tan(tensor *, tensor self); -int atg_tan_(tensor *, tensor self); -int atg_tan_out(tensor *, tensor out, tensor self); -int atg_tanh(tensor *, tensor self); -int atg_tanh_(tensor *, tensor self); -int atg_tanh_backward(tensor *, tensor grad_output, tensor output); -int atg_tanh_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor output); -int atg_tanh_out(tensor *, tensor out, tensor self); -int atg_tensordot(tensor *, tensor self, tensor other, int64_t *dims_self_data, int dims_self_len, int64_t *dims_other_data, int dims_other_len); -int atg_threshold(tensor *, tensor self, scalar threshold, scalar value); -int atg_threshold_(tensor *, tensor self, scalar threshold, scalar value); -int atg_threshold_backward(tensor *, tensor grad_output, tensor self, scalar threshold); -int atg_threshold_out(tensor *, tensor out, tensor self, scalar threshold, scalar value); -int atg_to(tensor *, tensor self, int device); -int atg_to1(tensor *, tensor self, int options_kind, int options_device, int non_blocking, int copy); -int atg_to2(tensor *, tensor self, int dtype, int non_blocking, int copy); -int atg_to3(tensor *, tensor self, tensor other, int non_blocking, int copy); -int atg_to4(tensor *, tensor self, int device, int dtype, int non_blocking, int copy); -int atg_to_dense(tensor *, tensor self); -int atg_to_dense_backward(tensor *, tensor grad, tensor input); -int atg_to_mkldnn(tensor *, tensor self); -int atg_to_mkldnn_backward(tensor *, tensor grad, tensor input); -int atg_to_sparse(tensor *, tensor self); -int atg_to_sparse1(tensor *, tensor self, int64_t sparse_dim); -int atg_topk(tensor *, tensor self, int64_t k, int64_t dim, int largest, int sorted); -int atg_topk_out(tensor *, tensor values, tensor indices, tensor self, int64_t k, int64_t dim, int largest, int sorted); -int atg_totype(tensor *, tensor self, int scalar_type); -int atg_trace(tensor *, tensor self); -int atg_transpose(tensor *, tensor self, int64_t dim0, int64_t dim1); -int atg_transpose_(tensor *, tensor self, int64_t dim0, int64_t dim1); -int atg_trapz(tensor *, tensor y, tensor x, int64_t dim); -int atg_trapz1(tensor *, tensor y, double dx, int64_t dim); -int atg_triangular_solve(tensor *, tensor self, tensor A, int upper, int transpose, int unitriangular); -int atg_triangular_solve_out(tensor *, tensor X, tensor M, tensor self, tensor A, int upper, int transpose, int unitriangular); -int atg_tril(tensor *, tensor self, int64_t diagonal); -int atg_tril_(tensor *, tensor self, int64_t diagonal); -int atg_tril_indices(tensor *, int64_t row, int64_t col, int64_t offset, int options_kind, int options_device); -int atg_tril_out(tensor *, tensor out, tensor self, int64_t diagonal); -int atg_triplet_margin_loss(tensor *, tensor anchor, tensor positive, tensor negative, double margin, double p, double eps, int swap, int64_t reduction); -int atg_triu(tensor *, tensor self, int64_t diagonal); -int atg_triu_(tensor *, tensor self, int64_t diagonal); -int atg_triu_indices(tensor *, int64_t row, int64_t col, int64_t offset, int options_kind, int options_device); -int atg_triu_out(tensor *, tensor out, tensor self, int64_t diagonal); -int atg_trunc(tensor *, tensor self); -int atg_trunc_(tensor *, tensor self); -int atg_trunc_out(tensor *, tensor out, tensor self); -int atg_type_as(tensor *, tensor self, tensor other); +C_API int atg_split_with_sizes(tensor *, tensor self, int64_t *split_sizes_data, int split_sizes_len, int64_t dim); +C_API int atg_sqrt(tensor *, tensor self); +C_API int atg_sqrt_(tensor *, tensor self); +C_API int atg_sqrt_out(tensor *, tensor out, tensor self); +C_API int atg_squeeze(tensor *, tensor self); +C_API int atg_squeeze1(tensor *, tensor self, int64_t dim); +C_API int atg_squeeze_(tensor *, tensor self); +C_API int atg_squeeze_1(tensor *, tensor self, int64_t dim); +C_API int atg_sspaddmm(tensor *, tensor self, tensor mat1, tensor mat2); +C_API int atg_sspaddmm_out(tensor *, tensor out, tensor self, tensor mat1, tensor mat2); +C_API int atg_stack(tensor *, tensor *tensors_data, int tensors_len, int64_t dim); +C_API int atg_stack_out(tensor *, tensor out, tensor *tensors_data, int tensors_len, int64_t dim); +C_API int atg_std(tensor *, tensor self, int unbiased); +C_API int atg_std1(tensor *, tensor self, int64_t *dim_data, int dim_len, int unbiased, int keepdim); +C_API int atg_std_mean(tensor *, tensor self, int unbiased); +C_API int atg_std_mean1(tensor *, tensor self, int64_t *dim_data, int dim_len, int unbiased, int keepdim); +C_API int atg_std_out(tensor *, tensor out, tensor self, int64_t *dim_data, int dim_len, int unbiased, int keepdim); +C_API int atg_stft(tensor *, tensor self, int64_t n_fft, int64_t hop_length, int64_t win_length, tensor window, int normalized, int onesided); +C_API int atg_sub(tensor *, tensor self, tensor other); +C_API int atg_sub1(tensor *, tensor self, scalar other); +C_API int atg_sub_(tensor *, tensor self, tensor other); +C_API int atg_sub_1(tensor *, tensor self, scalar other); +C_API int atg_sub_out(tensor *, tensor out, tensor self, tensor other); +C_API int atg_sum(tensor *, tensor self, int dtype); +C_API int atg_sum1(tensor *, tensor self, int64_t *dim_data, int dim_len, int keepdim, int dtype); +C_API int atg_sum_out(tensor *, tensor out, tensor self, int64_t *dim_data, int dim_len, int keepdim, int dtype); +C_API int atg_sum_to_size(tensor *, tensor self, int64_t *size_data, int size_len); +C_API int atg_svd(tensor *, tensor self, int some, int compute_uv); +C_API int atg_svd_out(tensor *, tensor U, tensor S, tensor V, tensor self, int some, int compute_uv); +C_API int atg_symeig(tensor *, tensor self, int eigenvectors, int upper); +C_API int atg_symeig_out(tensor *, tensor e, tensor V, tensor self, int eigenvectors, int upper); +C_API int atg_t(tensor *, tensor self); +C_API int atg_t_(tensor *, tensor self); +C_API int atg_take(tensor *, tensor self, tensor index); +C_API int atg_take_out(tensor *, tensor out, tensor self, tensor index); +C_API int atg_tan(tensor *, tensor self); +C_API int atg_tan_(tensor *, tensor self); +C_API int atg_tan_out(tensor *, tensor out, tensor self); +C_API int atg_tanh(tensor *, tensor self); +C_API int atg_tanh_(tensor *, tensor self); +C_API int atg_tanh_backward(tensor *, tensor grad_output, tensor output); +C_API int atg_tanh_backward_out(tensor *, tensor grad_input, tensor grad_output, tensor output); +C_API int atg_tanh_out(tensor *, tensor out, tensor self); +C_API int atg_tensordot(tensor *, tensor self, tensor other, int64_t *dims_self_data, int dims_self_len, int64_t *dims_other_data, int dims_other_len); +C_API int atg_threshold(tensor *, tensor self, scalar threshold, scalar value); +C_API int atg_threshold_(tensor *, tensor self, scalar threshold, scalar value); +C_API int atg_threshold_backward(tensor *, tensor grad_output, tensor self, scalar threshold); +C_API int atg_threshold_out(tensor *, tensor out, tensor self, scalar threshold, scalar value); +C_API int atg_to(tensor *, tensor self, int device); +C_API int atg_to1(tensor *, tensor self, int options_kind, int options_device, int non_blocking, int copy); +C_API int atg_to2(tensor *, tensor self, int dtype, int non_blocking, int copy); +C_API int atg_to3(tensor *, tensor self, tensor other, int non_blocking, int copy); +C_API int atg_to4(tensor *, tensor self, int device, int dtype, int non_blocking, int copy); +C_API int atg_to_dense(tensor *, tensor self); +C_API int atg_to_dense_backward(tensor *, tensor grad, tensor input); +C_API int atg_to_mkldnn(tensor *, tensor self); +C_API int atg_to_mkldnn_backward(tensor *, tensor grad, tensor input); +C_API int atg_to_sparse(tensor *, tensor self); +C_API int atg_to_sparse1(tensor *, tensor self, int64_t sparse_dim); +C_API int atg_topk(tensor *, tensor self, int64_t k, int64_t dim, int largest, int sorted); +C_API int atg_topk_out(tensor *, tensor values, tensor indices, tensor self, int64_t k, int64_t dim, int largest, int sorted); +C_API int atg_totype(tensor *, tensor self, int scalar_type); +C_API int atg_trace(tensor *, tensor self); +C_API int atg_transpose(tensor *, tensor self, int64_t dim0, int64_t dim1); +C_API int atg_transpose_(tensor *, tensor self, int64_t dim0, int64_t dim1); +C_API int atg_trapz(tensor *, tensor y, tensor x, int64_t dim); +C_API int atg_trapz1(tensor *, tensor y, double dx, int64_t dim); +C_API int atg_triangular_solve(tensor *, tensor self, tensor A, int upper, int transpose, int unitriangular); +C_API int atg_triangular_solve_out(tensor *, tensor X, tensor M, tensor self, tensor A, int upper, int transpose, int unitriangular); +C_API int atg_tril(tensor *, tensor self, int64_t diagonal); +C_API int atg_tril_(tensor *, tensor self, int64_t diagonal); +C_API int atg_tril_indices(tensor *, int64_t row, int64_t col, int64_t offset, int options_kind, int options_device); +C_API int atg_tril_out(tensor *, tensor out, tensor self, int64_t diagonal); +C_API int atg_triplet_margin_loss(tensor *, tensor anchor, tensor positive, tensor negative, double margin, double p, double eps, int swap, int64_t reduction); +C_API int atg_triu(tensor *, tensor self, int64_t diagonal); +C_API int atg_triu_(tensor *, tensor self, int64_t diagonal); +C_API int atg_triu_indices(tensor *, int64_t row, int64_t col, int64_t offset, int options_kind, int options_device); +C_API int atg_triu_out(tensor *, tensor out, tensor self, int64_t diagonal); +C_API int atg_trunc(tensor *, tensor self); +C_API int atg_trunc_(tensor *, tensor self); +C_API int atg_trunc_out(tensor *, tensor out, tensor self); +C_API int atg_type_as(tensor *, tensor self, tensor other); // tensor *atg_unbind(tensor self, int64_t dim); -int atg_unbind(tensor *, tensor self, int64_t dim); -int atg_unfold(tensor *, tensor self, int64_t dimension, int64_t size, int64_t step); -int atg_uniform_(tensor *, tensor self, double from, double to); -int atg_unique_consecutive(tensor *, tensor self, int return_inverse, int return_counts, int64_t dim); -int atg_unique_dim(tensor *, tensor self, int64_t dim, int sorted, int return_inverse, int return_counts); -int atg_unique_dim_consecutive(tensor *, tensor self, int64_t dim, int return_inverse, int return_counts); -int atg_unsqueeze(tensor *, tensor self, int64_t dim); -int atg_unsqueeze_(tensor *, tensor self, int64_t dim); -int atg_upsample_bicubic2d(tensor *, tensor self, int64_t *output_size_data, int output_size_len, int align_corners); -int atg_upsample_bicubic2d_backward(tensor *, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners); -int atg_upsample_bicubic2d_backward_out(tensor *, tensor grad_input, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners); -int atg_upsample_bicubic2d_out(tensor *, tensor out, tensor self, int64_t *output_size_data, int output_size_len, int align_corners); -int atg_upsample_bilinear2d(tensor *, tensor self, int64_t *output_size_data, int output_size_len, int align_corners); -int atg_upsample_bilinear2d_backward(tensor *, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners); -int atg_upsample_bilinear2d_backward_out(tensor *, tensor grad_input, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners); -int atg_upsample_bilinear2d_out(tensor *, tensor out, tensor self, int64_t *output_size_data, int output_size_len, int align_corners); -int atg_upsample_linear1d(tensor *, tensor self, int64_t *output_size_data, int output_size_len, int align_corners); -int atg_upsample_linear1d_backward(tensor *, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners); -int atg_upsample_linear1d_backward_out(tensor *, tensor grad_input, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners); -int atg_upsample_linear1d_out(tensor *, tensor out, tensor self, int64_t *output_size_data, int output_size_len, int align_corners); -int atg_upsample_nearest1d(tensor *, tensor self, int64_t *output_size_data, int output_size_len); -int atg_upsample_nearest1d_backward(tensor *, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len); -int atg_upsample_nearest1d_backward_out(tensor *, tensor grad_input, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len); -int atg_upsample_nearest1d_out(tensor *, tensor out, tensor self, int64_t *output_size_data, int output_size_len); -int atg_upsample_nearest2d(tensor *, tensor self, int64_t *output_size_data, int output_size_len); -int atg_upsample_nearest2d_backward(tensor *, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len); -int atg_upsample_nearest2d_backward_out(tensor *, tensor grad_input, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len); -int atg_upsample_nearest2d_out(tensor *, tensor out, tensor self, int64_t *output_size_data, int output_size_len); -int atg_upsample_nearest3d(tensor *, tensor self, int64_t *output_size_data, int output_size_len); -int atg_upsample_nearest3d_backward(tensor *, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len); -int atg_upsample_nearest3d_backward_out(tensor *, tensor grad_input, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len); -int atg_upsample_nearest3d_out(tensor *, tensor out, tensor self, int64_t *output_size_data, int output_size_len); -int atg_upsample_trilinear3d(tensor *, tensor self, int64_t *output_size_data, int output_size_len, int align_corners); -int atg_upsample_trilinear3d_backward(tensor *, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners); -int atg_upsample_trilinear3d_backward_out(tensor *, tensor grad_input, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners); -int atg_upsample_trilinear3d_out(tensor *, tensor out, tensor self, int64_t *output_size_data, int output_size_len, int align_corners); -int atg_values(tensor *, tensor self); -int atg_var(tensor *, tensor self, int unbiased); -int atg_var1(tensor *, tensor self, int64_t *dim_data, int dim_len, int unbiased, int keepdim); -int atg_var_mean(tensor *, tensor self, int unbiased); -int atg_var_mean1(tensor *, tensor self, int64_t *dim_data, int dim_len, int unbiased, int keepdim); -int atg_var_out(tensor *, tensor out, tensor self, int64_t *dim_data, int dim_len, int unbiased, int keepdim); -int atg_view(tensor *, tensor self, int64_t *size_data, int size_len); -int atg_view_as(tensor *, tensor self, tensor other); +C_API int atg_unbind(tensor *, tensor self, int64_t dim); +C_API int atg_unfold(tensor *, tensor self, int64_t dimension, int64_t size, int64_t step); +C_API int atg_uniform_(tensor *, tensor self, double from, double to); +C_API int atg_unique_consecutive(tensor *, tensor self, int return_inverse, int return_counts, int64_t dim); +C_API int atg_unique_dim(tensor *, tensor self, int64_t dim, int sorted, int return_inverse, int return_counts); +C_API int atg_unique_dim_consecutive(tensor *, tensor self, int64_t dim, int return_inverse, int return_counts); +C_API int atg_unsqueeze(tensor *, tensor self, int64_t dim); +C_API int atg_unsqueeze_(tensor *, tensor self, int64_t dim); +C_API int atg_upsample_bicubic2d(tensor *, tensor self, int64_t *output_size_data, int output_size_len, int align_corners); +C_API int atg_upsample_bicubic2d_backward(tensor *, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners); +C_API int atg_upsample_bicubic2d_backward_out(tensor *, tensor grad_input, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners); +C_API int atg_upsample_bicubic2d_out(tensor *, tensor out, tensor self, int64_t *output_size_data, int output_size_len, int align_corners); +C_API int atg_upsample_bilinear2d(tensor *, tensor self, int64_t *output_size_data, int output_size_len, int align_corners); +C_API int atg_upsample_bilinear2d_backward(tensor *, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners); +C_API int atg_upsample_bilinear2d_backward_out(tensor *, tensor grad_input, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners); +C_API int atg_upsample_bilinear2d_out(tensor *, tensor out, tensor self, int64_t *output_size_data, int output_size_len, int align_corners); +C_API int atg_upsample_linear1d(tensor *, tensor self, int64_t *output_size_data, int output_size_len, int align_corners); +C_API int atg_upsample_linear1d_backward(tensor *, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners); +C_API int atg_upsample_linear1d_backward_out(tensor *, tensor grad_input, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners); +C_API int atg_upsample_linear1d_out(tensor *, tensor out, tensor self, int64_t *output_size_data, int output_size_len, int align_corners); +C_API int atg_upsample_nearest1d(tensor *, tensor self, int64_t *output_size_data, int output_size_len); +C_API int atg_upsample_nearest1d_backward(tensor *, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len); +C_API int atg_upsample_nearest1d_backward_out(tensor *, tensor grad_input, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len); +C_API int atg_upsample_nearest1d_out(tensor *, tensor out, tensor self, int64_t *output_size_data, int output_size_len); +C_API int atg_upsample_nearest2d(tensor *, tensor self, int64_t *output_size_data, int output_size_len); +C_API int atg_upsample_nearest2d_backward(tensor *, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len); +C_API int atg_upsample_nearest2d_backward_out(tensor *, tensor grad_input, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len); +C_API int atg_upsample_nearest2d_out(tensor *, tensor out, tensor self, int64_t *output_size_data, int output_size_len); +C_API int atg_upsample_nearest3d(tensor *, tensor self, int64_t *output_size_data, int output_size_len); +C_API int atg_upsample_nearest3d_backward(tensor *, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len); +C_API int atg_upsample_nearest3d_backward_out(tensor *, tensor grad_input, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len); +C_API int atg_upsample_nearest3d_out(tensor *, tensor out, tensor self, int64_t *output_size_data, int output_size_len); +C_API int atg_upsample_trilinear3d(tensor *, tensor self, int64_t *output_size_data, int output_size_len, int align_corners); +C_API int atg_upsample_trilinear3d_backward(tensor *, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners); +C_API int atg_upsample_trilinear3d_backward_out(tensor *, tensor grad_input, tensor grad_output, int64_t *output_size_data, int output_size_len, int64_t *input_size_data, int input_size_len, int align_corners); +C_API int atg_upsample_trilinear3d_out(tensor *, tensor out, tensor self, int64_t *output_size_data, int output_size_len, int align_corners); +C_API int atg_values(tensor *, tensor self); +C_API int atg_var(tensor *, tensor self, int unbiased); +C_API int atg_var1(tensor *, tensor self, int64_t *dim_data, int dim_len, int unbiased, int keepdim); +C_API int atg_var_mean(tensor *, tensor self, int unbiased); +C_API int atg_var_mean1(tensor *, tensor self, int64_t *dim_data, int dim_len, int unbiased, int keepdim); +C_API int atg_var_out(tensor *, tensor out, tensor self, int64_t *dim_data, int dim_len, int unbiased, int keepdim); +C_API int atg_view(tensor *, tensor self, int64_t *size_data, int size_len); +C_API int atg_view_as(tensor *, tensor self, tensor other); // tensor *atg_where(tensor condition); -int atg_where(tensor *, tensor condition); -int atg_where1(tensor *, tensor condition, tensor self, tensor other); -int atg_zero_(tensor *, tensor self); -int atg_zeros(tensor *, int64_t *size_data, int size_len, int options_kind, int options_device); -int atg_zeros_like(tensor *, tensor self); -int atg_zeros_like1(tensor *, tensor self, int options_kind, int options_device); -int atg_zeros_out(tensor *, tensor out, int64_t *size_data, int size_len); +C_API int atg_where(tensor *, tensor condition); +C_API int atg_where1(tensor *, tensor condition, tensor self, tensor other); +C_API int atg_zero_(tensor *, tensor self); +C_API int atg_zeros(tensor *, int64_t *size_data, int size_len, int options_kind, int options_device); +C_API int atg_zeros_like(tensor *, tensor self); +C_API int atg_zeros_like1(tensor *, tensor self, int options_kind, int options_device); +C_API int atg_zeros_out(tensor *, tensor out, int64_t *size_data, int size_len);