I am done

This commit is contained in:
2024-10-30 22:14:35 +01:00
parent 720dc28c09
commit 40e2a747cf
36901 changed files with 5011519 additions and 0 deletions

View File

@ -0,0 +1,38 @@
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include "torch/csrc/autograd/VariableTypeUtils.h"
#include "torch/csrc/autograd/generated/ViewFuncs.h"
#include <torch/library.h>
#include <ATen/FunctionalInverses.h>
#include <ATen/FunctionalTensorWrapper.h>
// ${generated_comment}
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Operators.h>
#else
$ops_headers
#endif
using namespace at;
using torch::autograd::CreationMeta;
using torch::autograd::as_view;
using torch::autograd::increment_version;
namespace torch {
namespace ADInplaceOrView {
namespace {
${inplace_or_view_method_definitions}
} // namespace
} // namespace ADInplaceOrView
namespace {
TORCH_LIBRARY_IMPL(aten, ADInplaceOrView, m) {
${inplace_or_view_wrapper_registrations};
}
} // namespace
} // namespace torch

View File

@ -0,0 +1,20 @@
#include "torch/csrc/autograd/FunctionsManual.h"
#include "torch/csrc/dynamo/compiled_autograd.h"
// ${generated_comment}
// The manual function definitions that used to be here are now in torch/csrc/autograd/FunctionsManual.cpp
// This speeds up re-compilation and allow to share these implementations so that they can be
// used for forward mode AD formulas as well.
using namespace torch::autograd::generated::details;
using at::Tensor;
using at::Scalar;
using at::IntArrayRef;
using at::TensorList;
namespace torch::autograd::generated {
${autograd_function_definitions}
} // namespace torch::autograd::generated

View File

@ -0,0 +1,51 @@
#pragma once
// ${generated_comment}
#include <ATen/ATen.h>
#include <ATen/core/functional.h>
#include <ATen/TensorGeometry.h>
#include "torch/csrc/autograd/function.h"
#include "torch/csrc/autograd/variable.h"
#include "torch/csrc/autograd/saved_variable.h"
#include <torch/csrc/Export.h>
#include <c10/core/SymIntArrayRef.h>
namespace torch { namespace autograd { namespace generated {
using at::Scalar;
using at::Tensor;
using at::IntArrayRef;
using at::ArrayRef;
using at::Type;
using at::TensorGeometry;
using at::ScalarType;
using std::optional;
using c10::fmap;
inline std::vector<Tensor> unpack_list(at::ArrayRef<SavedVariable> xs, std::shared_ptr<Node> saved_for = nullptr) {
// NB: we must explicitly do the conversion in the lambda, otherwise template
// deduction will give a Tensor of Variable which is not convertible
return fmap(xs, [&saved_for](const SavedVariable& x) {
// TODO(crcrpar): Use `std::move(saved_for)` to avoid incrementing refcount, which would need refactoring.
return static_cast<Tensor>(x.unpack(saved_for));
});
}
inline c10::List<std::optional<Tensor>> unpack_opt_list(at::ArrayRef<SavedVariable> xs, std::shared_ptr<Node> saved_for = nullptr) {
torch::List<std::optional<Tensor>> result;
result.reserve(xs.size());
for (const SavedVariable& v : xs) {
auto var = v.unpack(saved_for);
result.push_back(var.defined() ? std::optional<Tensor>(var) : ::std::nullopt);
}
return result;
}
using torch::autograd::TypeAndSize;
${autograd_function_declarations}
}}} // namespace torch::autograd::generated

View File

@ -0,0 +1,40 @@
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
#include "torch/csrc/jit/frontend/tracer.h"
#include <torch/library.h>
#include "torch/csrc/autograd/function.h"
#include "ATen/quantized/Quantizer.h"
// ${generated_comment}
// See the `Tracer` section in `torch/csrc/jit/OVERVIEW.md`.
// NOTE See [Sharded File] comment in VariableType
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Operators.h>
#else
$ops_headers
#endif
using namespace at;
namespace torch {
namespace TraceType {
namespace {
${trace_method_definitions}
} // namespace
} // namespace TraceType
namespace {
TORCH_LIBRARY_IMPL(aten, Tracer, m) {
${trace_wrapper_registrations};
}
} // namespace
} // namespace torch

View File

@ -0,0 +1,65 @@
#include "torch/csrc/autograd/VariableTypeUtils.h"
#include "torch/csrc/autograd/generated/VariableType.h"
#include "torch/csrc/autograd/FunctionsManual.h"
#include <ATen/RedispatchFunctions.h>
#include <c10/core/impl/TorchDispatchModeTLS.h>
#include <ATen/core/TorchDispatchUtils.h>
#include <torch/library.h>
#include <ATen/SparseCsrTensorUtils.h>
// ${generated_comment}
// NOTE [Sharded File]: on this file's split-into-shards state
//
// Back in the good old days, VariableType.cpp was generated as one
// file with every function in it, and everything was great and
// simple.
//
// However, this file was also very large (over 36,000 lines), and
// compiling it was very slow, and in fact was a significant
// bottleneck for incremental rebuilds. To address this, we now
// generate the file split across multiple shards, named
// VariableType_0.cpp and so on, which can be compiled in parallel.
//
// For ease of inspection and debugging, so that it's not necessary to
// go rooting around in multiple files, we also generate all the
// functions together in VariableTypeEverything.cpp. This generated
// file is only for convenience; it's not actually used in the
// build. If the file you're looking at now is one of the shards, you
// may want to switch over to the Everything variant to make you
// grepping smoother.
using namespace at;
using namespace torch::autograd::generated;
using namespace torch::autograd::generated::details;
namespace torch::autograd {
namespace VariableType {
namespace{
C10_UNUSED void reset_grad_accumulator(Variable & self) {
AutogradMeta* meta = torch::autograd::impl::get_autograd_meta(self);
if (meta != nullptr) {
meta->grad_accumulator_.reset();
}
}
}
namespace {
${type_derived_method_definitions}
}
}
namespace {
${wrapper_registrations}
}
} // namespace torch::autograd

View File

@ -0,0 +1,59 @@
#pragma once
// ${generated_comment}
#include <ATen/core/Tensor.h>
#include <ATen/Context.h>
#include <c10/util/intrusive_ptr.h>
#include <torch/csrc/Export.h>
#include <torch/csrc/autograd/autograd_not_implemented_fallback.h>
#include <cstdint> // for size_t
#include <functional> // for function
#include <memory> // for unique_ptr
#include <string>
#include <vector>
namespace at {
struct Quantizer;
};
namespace torch { namespace autograd {
using Variable = at::Tensor;
using at::Context;
using at::Device;
using at::Dimname;
using at::DimnameList;
using at::Generator;
using at::IntArrayRef;
using at::MemoryFormat;
using at::QScheme;
using at::Scalar;
using at::ScalarType;
using at::Storage;
using at::Tensor;
using at::TensorList;
using at::TensorOptions;
using at::Quantizer;
// This is temporary typedef to enable Quantizer in aten native function API
// we'll remove them when we are actually exposing Quantizer class
// to frontend
using ConstQuantizerPtr = const c10::intrusive_ptr<Quantizer>&;
using std::optional;
namespace VariableType {
TORCH_API std::vector<at::DeprecatedTypeProperties*> allCUDATypes();
TORCH_API std::vector<at::DeprecatedTypeProperties*> allXPUTypes();
TORCH_API std::vector<at::DeprecatedTypeProperties*> allCPUTypes();
TORCH_API std::vector<at::DeprecatedTypeProperties*> allPrivateUser1Types();
at::Tensor & unpack(Tensor & t, const char * name, int pos);
const at::Tensor & unpack(const Tensor & t, const char * name, int pos);
at::Tensor unpack_opt(const Tensor & t, const char * name, int pos);
std::vector<at::Tensor> unpack(const at::ITensorListRef& tl, const char *name, int pos);
};
}} // namespace torch::autograd

View File

@ -0,0 +1,14 @@
#include <torch/csrc/autograd/generated/ViewFuncs.h>
// ${generated_comment}
using at::Tensor;
using at::Scalar;
using at::IntArrayRef;
using at::TensorList;
namespace torch::autograd::generated {
${view_func_definitions}
} // namespace torch::autograd::generated

View File

@ -0,0 +1,28 @@
#pragma once
// ${generated_comment}
#include <torch/library.h>
#include <torch/csrc/autograd/variable.h>
#include <c10/core/SymIntArrayRef.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Operators.h>
#else
$ops_headers
#endif
namespace torch::autograd::generated {
using at::Scalar;
using at::Tensor;
using at::IntArrayRef;
using at::ArrayRef;
using at::Type;
using at::ScalarType;
using std::optional;
using c10::fmap;
${view_func_declarations}
} // namespace torch::autograd::generated

View File

@ -0,0 +1,11 @@
"""
This file is needed for generating procedural tests required for
testing __torch_function__. See tests/test_overrides.py.
"""
# flake8: noqa
import torch
annotated_args = {
${annotated_args}
}

View File

@ -0,0 +1,15 @@
#include <torch/csrc/autograd/python_enum_tag.h>
#include <torch/csrc/utils/pybind.h>
#include <pybind11/pybind11.h>
#include <ATen/core/enum_tag.h>
namespace py = pybind11;
namespace torch {
namespace autograd {
void initEnumTag(PyObject* module) {
auto m = py::handle(module).cast<py::module>();
py::enum_<at::Tag>(m, "Tag")
${enum_of_valid_tags};
m.doc() = "An Enum that contains tags that can be assigned to an operator registered in C++.";
}
}}

View File

@ -0,0 +1,81 @@
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
// ${generated_comment}
#include "torch/csrc/Device.h"
#include "torch/csrc/DynamicTypes.h"
#include "torch/csrc/Exceptions.h"
#include "torch/csrc/autograd/python_fft_functions.h"
#include "torch/csrc/autograd/generated/python_return_types.h"
#include "torch/csrc/autograd/python_variable.h"
#include "torch/csrc/autograd/utils/wrap_outputs.h"
#include "torch/csrc/autograd/utils/python_arg_parsing.h"
#include "torch/csrc/autograd/generated/variable_factories.h"
#include "torch/csrc/utils/out_types.h"
#include "torch/csrc/utils/pycfunction_helpers.h"
#include "torch/csrc/utils/python_arg_parser.h"
#include "torch/csrc/utils/structseq.h"
#include "torch/csrc/utils/device_lazy_init.h"
#include <ATen/core/Tensor.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#else
$ops_headers
#endif
using at::Tensor;
using at::Device;
using at::Layout;
using at::Scalar;
using at::ScalarType;
using at::Backend;
using at::OptionalDeviceGuard;
using at::DeviceGuard;
using at::TensorOptions;
using at::IntArrayRef;
using at::Generator;
using at::TensorList;
using at::Dimname;
using at::DimnameList;
using torch::utils::check_out_type_matches;
using namespace torch::autograd::utils;
namespace torch::autograd {
// generated forward declarations start here
${py_forwards}
static PyMethodDef fft_functions[] = {
${py_method_defs}
{NULL}
};
static PyObject* THPFFTVariableFunctionsModule = NULL;
void initFFTFunctions(PyObject* module) {
static struct PyModuleDef def = {
PyModuleDef_HEAD_INIT,
"torch._C._fft",
NULL,
-1,
fft_functions
};
PyObject* fft = PyModule_Create(&def);
THPFFTVariableFunctionsModule = fft;
if (!fft) {
throw python_error();
}
// steals a reference to fft
if (PyModule_AddObject(module, "_fft", fft) != 0) {
throw python_error();
}
}
// generated methods start here
${py_methods}
} // namespace torch::autograd

View File

@ -0,0 +1,37 @@
#include <torch/csrc/autograd/generated/python_functions.h>
// ${generated_comment}
#include <Python.h>
#include <ATen/ATen.h>
#include <c10/core/SymNodeImpl.h>
#include "torch/csrc/autograd/generated/Functions.h"
#include "torch/csrc/autograd/python_cpp_function.h"
#include <torch/csrc/autograd/python_variable.h>
#include <torch/csrc/autograd/saved_variable.h>
#include <torch/csrc/utils/pybind.h>
#include <pybind11/pybind11.h>
#include <torch/csrc/utils/pybind.h>
// NOTE: See [Sharded File] comment in VariableType
namespace torch::autograd::generated {
template<typename C>
static void addClass(PyObject* module, PyTypeObject& type, const char* name,
PyGetSetDef* function_properties=NULL, PyMethodDef* function_methods=NULL)
{
_initFunctionPyTypeObject(type, name, function_properties, function_methods);
Py_INCREF(&type);
PyModule_AddObject(module, name, (PyObject*)&type);
registerCppFunction(typeid(C), &type);
}
${py_function_props_and_getters}
void initialize_autogenerated_functions${shard_id}(PyObject* module) {
${py_function_initializers}
}
} // namespace torch::autograd::generated

View File

@ -0,0 +1,17 @@
#pragma once
#include <Python.h>
// ${generated_comment}
// Python bindings for automatically generated autograd functions
namespace torch { namespace autograd { namespace generated {
${shard_forward_declare}
inline void initialize_autogenerated_functions(PyObject* module) {
${shard_call}
}
}}} // namespace torch::autograd::generated

View File

@ -0,0 +1,68 @@
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
// ${generated_comment}
#include "torch/csrc/Device.h"
#include "torch/csrc/DynamicTypes.h"
#include "torch/csrc/Exceptions.h"
#include "torch/csrc/autograd/python_linalg_functions.h"
#include "torch/csrc/autograd/generated/python_return_types.h"
#include "torch/csrc/autograd/python_variable.h"
#include "torch/csrc/autograd/utils/wrap_outputs.h"
#include "torch/csrc/autograd/utils/python_arg_parsing.h"
#include "torch/csrc/utils/pycfunction_helpers.h"
#include "torch/csrc/utils/python_arg_parser.h"
#include "torch/csrc/utils/structseq.h"
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#else
$ops_headers
#endif
using at::Tensor;
using at::Scalar;
using at::ScalarType;
using at::MemoryFormat;
using at::Generator;
using at::IntArrayRef;
using at::TensorList;
using namespace torch::autograd::utils;
namespace torch::autograd {
// generated forward declarations start here
${py_forwards}
static PyMethodDef linalg_functions[] = {
${py_method_defs}
{NULL}
};
static PyObject* THPLinalgVariableFunctionsModule = NULL;
void initLinalgFunctions(PyObject* module) {
static struct PyModuleDef def = {
PyModuleDef_HEAD_INIT,
"torch._C._linalg",
NULL,
-1,
linalg_functions
};
PyObject* linalg = PyModule_Create(&def);
THPLinalgVariableFunctionsModule = linalg;
if (!linalg) {
throw python_error();
}
// steals a reference to linalg
if (PyModule_AddObject(module, "_linalg", linalg) != 0) {
throw python_error();
}
}
// generated methods start here
${py_methods}
} // namespace torch::autograd

View File

@ -0,0 +1,81 @@
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
// ${generated_comment}
#include "torch/csrc/Device.h"
#include "torch/csrc/DynamicTypes.h"
#include "torch/csrc/Exceptions.h"
#include "torch/csrc/autograd/python_nested_functions.h"
#include "torch/csrc/autograd/generated/python_return_types.h"
#include "torch/csrc/autograd/python_variable.h"
#include "torch/csrc/autograd/utils/wrap_outputs.h"
#include "torch/csrc/autograd/utils/python_arg_parsing.h"
#include "torch/csrc/autograd/generated/variable_factories.h"
#include "torch/csrc/utils/out_types.h"
#include "torch/csrc/utils/pycfunction_helpers.h"
#include "torch/csrc/utils/python_arg_parser.h"
#include "torch/csrc/utils/structseq.h"
#include "torch/csrc/utils/device_lazy_init.h"
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#else
$ops_headers
#endif
using at::Tensor;
using at::Device;
using at::Layout;
using at::Scalar;
using at::ScalarType;
using at::Backend;
using at::OptionalDeviceGuard;
using at::DeviceGuard;
using at::TensorOptions;
using at::IntArrayRef;
using at::OptionalIntArrayRef;
using at::Generator;
using at::TensorList;
using at::Dimname;
using at::DimnameList;
using namespace torch::autograd::utils;
namespace torch::autograd {
// generated forward declarations start here
${py_forwards}
static PyMethodDef nested_functions[] = {
{NULL, NULL, 0, NULL},
${py_method_defs}
{NULL}
};
static PyObject* THPNestedVariableFunctionsModule = NULL;
void initNestedFunctions(PyObject* module) {
nested_functions[0] = get_nested_functions_manual()[0];
static struct PyModuleDef def = {
PyModuleDef_HEAD_INIT,
"torch._C._nested",
NULL,
-1,
nested_functions
};
PyObject* nested = PyModule_Create(&def);
THPNestedVariableFunctionsModule = nested;
if (!nested) {
throw python_error();
}
// steals a reference to nested
if (PyModule_AddObject(module, "_nested", nested) != 0) {
throw python_error();
}
}
// generated methods start here
${py_methods}
} // namespace torch::autograd

View File

@ -0,0 +1,113 @@
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
// ${generated_comment}
#include "torch/csrc/Device.h"
#include "torch/csrc/DynamicTypes.h"
#include "torch/csrc/Exceptions.h"
#include "torch/csrc/autograd/python_nn_functions.h"
#include "torch/csrc/autograd/generated/python_return_types.h"
#include "torch/csrc/autograd/python_variable.h"
#include "torch/csrc/autograd/utils/wrap_outputs.h"
#include "torch/csrc/autograd/utils/python_arg_parsing.h"
#include "torch/csrc/utils/pycfunction_helpers.h"
#include "torch/csrc/utils/python_arg_parser.h"
#include "torch/csrc/utils/structseq.h"
#include "torch/csrc/utils/tensor_memoryformats.h"
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#else
$ops_headers
#endif
using at::Tensor;
using at::Scalar;
using at::MemoryFormat;
using at::Generator;
using at::IntArrayRef;
using at::ArrayRef;
using namespace torch::autograd::utils;
namespace torch::autograd {
static PyObject* THPNNVariableFunctionsModule = NULL;
static PyObject * THPVariable__parse_to(PyObject* module, PyObject* args, PyObject* kwargs)
{
HANDLE_TH_ERRORS
static PythonArgParser parser({
"to(Device device=None, ScalarType dtype=None, bool non_blocking=False, bool copy=False, *, MemoryFormat? memory_format=None)",
"to(ScalarType dtype, bool non_blocking=False, bool copy=False, *, MemoryFormat? memory_format=None)",
"to(Tensor tensor, bool non_blocking=False, bool copy=False, *, MemoryFormat? memory_format=None)",
});
ParsedArgs<5> parsed_args;
auto r = parser.parse(args, kwargs, parsed_args);
if (r.has_torch_function()) {
return handle_torch_function(r, args, kwargs, THPNNVariableFunctionsModule, "torch.nn", "_parse_to");
}
auto parsed = parse_to_conversion(r, /*allow_copy*/ false); // we don't want copy for nn.Module.to
auto& device = std::get<0>(parsed);
auto& scalarType = std::get<1>(parsed);
auto non_blocking = std::get<2>(parsed);
auto opt_memory_format = std::get<4>(parsed);
auto tuple = THPObjectPtr{PyTuple_New(4)};
if (!tuple) throw python_error();
if (device) {
PyTuple_SET_ITEM(tuple.get(), 0, THPDevice_New(*device));
} else {
Py_INCREF(Py_None);
PyTuple_SET_ITEM(tuple.get(), 0, Py_None);
}
if (scalarType) {
PyTuple_SET_ITEM(tuple.get(), 1, Py_NewRef(torch::getTHPDtype(*scalarType)));
} else {
Py_INCREF(Py_None);
PyTuple_SET_ITEM(tuple.get(), 1, Py_None);
}
PyTuple_SET_ITEM(tuple.get(), 2, torch::autograd::utils::wrap(non_blocking));
if (opt_memory_format.has_value()) {
PyTuple_SET_ITEM(tuple.get(), 3, Py_NewRef(torch::utils::getTHPMemoryFormat(opt_memory_format.value())));
} else {
Py_INCREF(Py_None);
PyTuple_SET_ITEM(tuple.get(), 3, Py_None);
}
return tuple.release();
END_HANDLE_TH_ERRORS
}
// generated forward declarations start here
${py_forwards}
static PyMethodDef nn_functions[] = {
{"_parse_to", castPyCFunctionWithKeywords(THPVariable__parse_to),
METH_VARARGS | METH_KEYWORDS, nullptr},
${py_method_defs}
{NULL}
};
void initNNFunctions(PyObject* module) {
static struct PyModuleDef def = {
PyModuleDef_HEAD_INIT,
"torch._C._nn",
NULL,
-1,
nn_functions
};
PyObject* nn = PyModule_Create(&def);
THPNNVariableFunctionsModule = nn;
if (!nn) {
throw python_error();
}
// steals a reference to nn
if (PyModule_AddObject(module, "_nn", nn) != 0) {
throw python_error();
}
}
// generated methods start here
${py_methods}
} // namespace torch::autograd

View File

@ -0,0 +1,52 @@
#include <Python.h>
#include <vector>
#include <map>
#include <string>
#include "torch/csrc/autograd/generated/python_return_types.h"
#include "torch/csrc/utils/structseq.h"
#include "torch/csrc/Exceptions.h"
namespace torch { namespace autograd { namespace generated {
${py_return_types}
}}}
namespace torch::autograd {
static void addReturnType(
PyObject* module,
const char* name,
PyTypeObject* type) {
// hold onto the TypeObject for the unlikely case of user
// deleting or overriding it.
Py_INCREF(type);
if (PyModule_AddObject(
module,
name,
(PyObject*)type) != 0) {
Py_DECREF(type);
throw python_error();
}
}
void initReturnTypes(PyObject* module) {
static struct PyModuleDef def = {
PyModuleDef_HEAD_INIT, "torch._C._return_types", nullptr, -1, {}};
PyObject* return_types_module = PyModule_Create(&def);
if (!return_types_module) {
throw python_error();
}
${py_return_types_registrations}
// steals a reference to return_types on success
if (PyModule_AddObject(module, "_return_types", return_types_module) != 0) {
Py_DECREF(return_types_module);
throw python_error();
}
}
} // namespace torch::autograd

View File

@ -0,0 +1,14 @@
#pragma once
namespace torch {
namespace autograd {
namespace generated {
${py_return_types_declarations}
}
void initReturnTypes(PyObject* module);
} // namespace autograd
} // namespace torch

View File

@ -0,0 +1,67 @@
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
// ${generated_comment}
#include "torch/csrc/Device.h"
#include "torch/csrc/DynamicTypes.h"
#include "torch/csrc/Exceptions.h"
#include "torch/csrc/autograd/python_sparse_functions.h"
#include "torch/csrc/autograd/python_variable.h"
#include "torch/csrc/autograd/utils/wrap_outputs.h"
#include "torch/csrc/autograd/utils/python_arg_parsing.h"
#include "torch/csrc/utils/pycfunction_helpers.h"
#include "torch/csrc/utils/python_arg_parser.h"
#include "torch/csrc/utils/structseq.h"
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#else
$ops_headers
#endif
using at::Tensor;
using at::Scalar;
using at::ScalarType;
using at::MemoryFormat;
using at::Generator;
using at::IntArrayRef;
using at::TensorList;
using namespace torch::autograd::utils;
namespace torch::autograd {
// generated forward declarations start here
${py_forwards}
static PyMethodDef sparse_functions[] = {
${py_method_defs}
{NULL}
};
static PyObject* THPSparseVariableFunctionsModule = NULL;
void initSparseFunctions(PyObject* module) {
static struct PyModuleDef def = {
PyModuleDef_HEAD_INIT,
"torch._C._sparse",
NULL,
-1,
sparse_functions
};
PyObject* sparse = PyModule_Create(&def);
THPSparseVariableFunctionsModule = sparse;
if (!sparse) {
throw python_error();
}
// steals a reference to sparse
if (PyModule_AddObject(module, "_sparse", sparse) != 0) {
throw python_error();
}
}
// generated methods start here
${py_methods}
} // namespace torch::autograd

View File

@ -0,0 +1,79 @@
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
// ${generated_comment}
#include "torch/csrc/Device.h"
#include "torch/csrc/DynamicTypes.h"
#include "torch/csrc/Exceptions.h"
#include "torch/csrc/autograd/python_special_functions.h"
#include "torch/csrc/autograd/generated/python_return_types.h"
#include "torch/csrc/autograd/python_variable.h"
#include "torch/csrc/autograd/utils/wrap_outputs.h"
#include "torch/csrc/autograd/utils/python_arg_parsing.h"
#include "torch/csrc/autograd/generated/variable_factories.h"
#include "torch/csrc/utils/out_types.h"
#include "torch/csrc/utils/pycfunction_helpers.h"
#include "torch/csrc/utils/python_arg_parser.h"
#include "torch/csrc/utils/structseq.h"
#include "torch/csrc/utils/device_lazy_init.h"
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#else
$ops_headers
#endif
using at::Tensor;
using at::Device;
using at::Layout;
using at::Scalar;
using at::ScalarType;
using at::Backend;
using at::OptionalDeviceGuard;
using at::DeviceGuard;
using at::TensorOptions;
using at::IntArrayRef;
using at::Generator;
using at::TensorList;
using at::Dimname;
using at::DimnameList;
using torch::utils::check_out_type_matches;
using namespace torch::autograd::utils;
namespace torch::autograd {
// generated forward declarations start here
${py_forwards}
static PyMethodDef special_functions[] = {
${py_method_defs}
{NULL}
};
static PyObject* THPSpecialVariableFunctionsModule = NULL;
void initSpecialFunctions(PyObject* module) {
static struct PyModuleDef def = {
PyModuleDef_HEAD_INIT,
"torch._C._special",
NULL,
-1,
special_functions
};
PyObject* special = PyModule_Create(&def);
THPSpecialVariableFunctionsModule = special;
if (!special) {
throw python_error();
}
// steals a reference to special
if (PyModule_AddObject(module, "_special", special) != 0) {
throw python_error();
}
}
// generated methods start here
${py_methods}
} // namespace torch::autograd

View File

@ -0,0 +1,93 @@
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS
// ${generated_comment}
// Python bindings for torch.* functions implemented through ATen.
//
// The functions are bound as static methods on a class
// torch._C._VariableFunctions which is also aliased as Variable._torch
// and also copied into 'torch' module.
#include <Python.h>
// Undefine the copysign macro so that at::copysign works as intended with MSVC
// https://github.com/python/cpython/blob/c60394c7fc9cc09b16e9675a3eeb5844b6d8523f/PC/pyconfig.h#L196
#ifdef _MSC_VER
#undef copysign
#endif // _MSC_VER
#include "torch/csrc/autograd/python_torch_functions.h"
#include "torch/csrc/autograd/python_variable.h"
#include "torch/csrc/autograd/utils/wrap_outputs.h"
#include "torch/csrc/Dtype.h"
#include "torch/csrc/DynamicTypes.h"
#include "torch/csrc/Exceptions.h"
#include "torch/csrc/utils/out_types.h"
#include "torch/csrc/utils/pybind.h"
#include "torch/csrc/utils/pycfunction_helpers.h"
#include "torch/csrc/utils/python_arg_parser.h"
#include "torch/csrc/utils/tensor_layouts.h"
#include "torch/csrc/utils/tensor_new.h"
#include "torch/csrc/utils/tensor_numpy.h"
#include "torch/csrc/jit/frontend/tracer.h"
#include "torch/csrc/autograd/generated/variable_factories.h"
#include "torch/csrc/utils/structseq.h"
#include "torch/csrc/utils/device_lazy_init.h"
#include "torch/csrc/autograd/generated/python_return_types.h"
#include <ATen/core/Tensor.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#else
$ops_headers
#endif
#include <functional>
#include <initializer_list>
#include <stdexcept>
#include <utility>
using at::Tensor;
using at::Device;
using at::Layout;
using at::Scalar;
using at::ScalarType;
using at::Backend;
using at::OptionalDeviceGuard;
using at::DeviceGuard;
using at::TensorOptions;
using at::IntArrayRef;
using at::Generator;
using at::TensorList;
using at::Dimname;
using at::DimnameList;
using at::ArrayRef;
using torch::utils::check_out_type_matches;
using namespace torch::autograd::utils;
// NOTE: See [Sharded File] comment in VariableType
namespace torch::autograd {
// generated forward declarations start here
${py_forwards}
static PyMethodDef torch_functions_shard[] = {
${py_method_defs}
};
void gatherTorchFunctions${shard_id}(std::vector<PyMethodDef> &torch_functions) {
constexpr size_t num_functions = sizeof(torch_functions_shard) / sizeof(torch_functions_shard[0]);
torch_functions.insert(
torch_functions.end(),
torch_functions_shard,
torch_functions_shard + num_functions);
}
// generated methods start here
${py_methods}
} // namespace torch::autograd

View File

@ -0,0 +1,135 @@
#pragma once
// ${generated_comment}
#include <ATen/core/Tensor.h>
#include <ATen/TracerMode.h>
#include <ATen/core/grad_mode.h>
#include <c10/util/ArrayRef.h>
#include <c10/core/MemoryFormat.h>
#include <torch/csrc/api/include/torch/detail/TensorDataContainer.h>
#include <torch/csrc/autograd/variable.h>
#ifndef AT_PER_OPERATOR_HEADERS
#include <ATen/Functions.h>
#else
#include <ATen/ops/from_blob.h>
$ops_headers
#endif
#include <functional>
#include <initializer_list>
#include <utility>
namespace torch {
/// NOTE: Currently `torch::tensor(...)` doesn't support mixed data types
/// (i.e. `torch::tensor({{bool, 2.0}})` doesn't work). We might be able to
/// support it in the future by iterating over all sub-lists to find
/// the largest data type that can represent all of the elements, or by using
/// variadic templates.
///
/// NOTE: C++ `torch::tensor` with a floating-point type or an `at::ArrayRef` / `std::vector` /
/// (nested) braced-init-list of floating-point types always produces a tensor of dtype
/// `torch::get_default_dtype()`, matching Python `torch.tensor` behavior.
///
/// NOTE: C++ `torch::tensor` with an integer type or an `at::ArrayRef` / `std::vector` /
/// (nested) braced-init-list of integer types always produces a tensor of dtype `at::kLong`
/// (aka. int64_t), matching Python `torch.tensor` behavior.
///
/// NOTE: The following dtypes are not supported by `torch::tensor` currently:
/// - `unsigned int`
/// - `unsigned long int`
/// - `unsigned long long int`
/// - `long long int`
inline at::Tensor tensor(detail::TensorDataContainer tensor_data_container, const at::TensorOptions& options = {}) {
return autograd::make_variable(
// note: we remove the requires_grad setting from the TensorOptions because
// it is ignored anyways (and we actually have an assertion that it isn't set
// which would fail otherwise). We handle requires_grad explicitly here
// instead of passing it through to the kernel.
tensor_data_container.convert_to_tensor(options.requires_grad(::std::nullopt)),
options.requires_grad());
}
/// A generic deleter function.
using Deleter = std::function<void(void*)>;
using at::MemoryFormat;
/// Exposes the given `data` as a `Tensor` without taking ownership of the
/// original data. `sizes` should specify the shape of the tensor, `strides` the
/// stride in each dimension. The `deleter` function (a
/// `std::function<void(void*)>`) will be called on the `data` when the Tensor
/// data would normally be deallocated. The `TensorOptions` specify additional
/// configuration options for the returned tensor, such as what type to
/// interpret the `data` as.
inline at::Tensor from_blob(
void* data,
at::IntArrayRef sizes,
at::IntArrayRef strides,
const Deleter& deleter,
const at::TensorOptions& options = at::TensorOptions()) {
at::Tensor tensor = ([&]() {
at::AutoDispatchBelowAutograd guard; // TODO: remove
at::tracer::impl::NoTracerDispatchMode tracer_guard;
return at::from_blob(data, sizes, strides, deleter, options.requires_grad(::std::nullopt));
})();
return autograd::make_variable(tensor, options.requires_grad());
}
/// Exposes the given `data` as a `Tensor` without taking ownership of the
/// original data. `sizes` should specify the shape of the tensor, `strides` the
/// stride in each dimension. The `TensorOptions`
/// specify additional configuration options for the returned tensor, such as
/// what type to interpret the `data` as.
inline at::Tensor from_blob(
void* data,
at::IntArrayRef sizes,
at::IntArrayRef strides,
const at::TensorOptions& options = at::TensorOptions()) {
at::Tensor tensor = ([&]() {
at::AutoDispatchBelowAutograd guard; // TODO: remove
at::tracer::impl::NoTracerDispatchMode tracer_guard;
return at::from_blob(data, sizes, strides, options.requires_grad(::std::nullopt));
})();
return autograd::make_variable(tensor, options.requires_grad());
}
/// Exposes the given `data` as a `Tensor` without taking ownership of the
/// original data. `sizes` should specify the shape of the tensor. The `deleter`
/// (a `std::function<void(void*)>`) function will be called on the `data` when
/// the Tensor data would normally be deallocated. The `TensorOptions` specify
/// additional configuration options for the returned tensor, such as what type
/// to interpret the `data` as.
inline at::Tensor from_blob(
void* data,
at::IntArrayRef sizes,
const Deleter& deleter,
const at::TensorOptions& options = at::TensorOptions()) {
at::Tensor tensor = ([&]() {
at::AutoDispatchBelowAutograd guard; // TODO: remove
at::tracer::impl::NoTracerDispatchMode tracer_guard;
return at::from_blob(data, sizes, deleter, options.requires_grad(::std::nullopt));
})();
return autograd::make_variable(tensor, options.requires_grad());
}
/// Exposes the given `data` as a `Tensor` without taking ownership of the
/// original data. `sizes` should specify the shape of the tensor. The
/// `TensorOptions` specify additional configuration options for the returned
/// tensor, such as what type to interpret the `data` as.
inline at::Tensor from_blob(
void* data,
at::IntArrayRef sizes,
const at::TensorOptions& options = at::TensorOptions()) {
at::Tensor tensor = ([&]() {
at::AutoDispatchBelowAutograd guard; // TODO: remove
at::tracer::impl::NoTracerDispatchMode tracer_guard;
return at::from_blob(data, sizes, options.requires_grad(::std::nullopt));
})();
return autograd::make_variable(tensor, options.requires_grad());
}
${function_definitions}
} // namespace torch