32615 lines
1.7 MiB
32615 lines
1.7 MiB
|
|
#pragma once
|
|
#include <ATen/Operators.h>
|
|
#include <ATen/functorch/PlumbingHelper.h>
|
|
|
|
namespace at { namespace functorch {
|
|
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _cast_Byte_generated_plumbing(const at::Tensor & self, bool non_blocking) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_cast_Byte::call(self, non_blocking);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, non_blocking);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _cast_Char_generated_plumbing(const at::Tensor & self, bool non_blocking) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_cast_Char::call(self, non_blocking);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, non_blocking);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _cast_Double_generated_plumbing(const at::Tensor & self, bool non_blocking) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_cast_Double::call(self, non_blocking);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, non_blocking);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _cast_Float_generated_plumbing(const at::Tensor & self, bool non_blocking) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_cast_Float::call(self, non_blocking);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, non_blocking);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _cast_Int_generated_plumbing(const at::Tensor & self, bool non_blocking) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_cast_Int::call(self, non_blocking);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, non_blocking);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _cast_Long_generated_plumbing(const at::Tensor & self, bool non_blocking) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_cast_Long::call(self, non_blocking);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, non_blocking);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _cast_Short_generated_plumbing(const at::Tensor & self, bool non_blocking) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_cast_Short::call(self, non_blocking);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, non_blocking);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _cast_Half_generated_plumbing(const at::Tensor & self, bool non_blocking) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_cast_Half::call(self, non_blocking);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, non_blocking);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _backward_generated_plumbing(const at::Tensor & self, at::TensorList inputs, const c10::optional<at::Tensor> & gradient, c10::optional<bool> retain_graph, bool create_graph) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(inputs, cur_level) && !isBatchedAtLevel(gradient, cur_level)) {
|
|
return at::_ops::_backward::call(self, inputs, gradient, retain_graph, create_graph);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
optional<Tensor> gradient_value;
|
|
optional<int64_t> gradient_bdim;
|
|
if (gradient) {
|
|
std::tie(gradient_value, gradient_bdim) = unwrapTensorAtLevel(gradient.value(), cur_level);
|
|
}
|
|
batch_rule(self_value, self_bdim, inputs, gradient_value, gradient_bdim, retain_graph, create_graph);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void set_data_generated_plumbing(at::Tensor & self, const at::Tensor & new_data) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(new_data, cur_level)) {
|
|
return at::_ops::set_data::call(self, new_data);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor new_data_value;
|
|
optional<int64_t> new_data_bdim;
|
|
std::tie(new_data_value, new_data_bdim) = unwrapTensorAtLevel(new_data, cur_level);
|
|
batch_rule(self_value, self_bdim, new_data_value, new_data_bdim);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor data_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::data::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & requires_grad__generated_plumbing(at::Tensor & self, bool requires_grad) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::requires_grad_::call(self, requires_grad);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim, requires_grad);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void retain_grad_generated_plumbing(at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::retain_grad::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _fw_primal_generated_plumbing(const at::Tensor & self, int64_t level) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_fw_primal::call(self, level);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, level);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _make_dual_generated_plumbing(const at::Tensor & primal, const at::Tensor & tangent, int64_t level) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(primal, cur_level) && !isBatchedAtLevel(tangent, cur_level)) {
|
|
return at::_ops::_make_dual::call(primal, tangent, level);
|
|
}
|
|
Tensor primal_value;
|
|
optional<int64_t> primal_bdim;
|
|
std::tie(primal_value, primal_bdim) = unwrapTensorAtLevel(primal, cur_level);
|
|
Tensor tangent_value;
|
|
optional<int64_t> tangent_bdim;
|
|
std::tie(tangent_value, tangent_bdim) = unwrapTensorAtLevel(tangent, cur_level);
|
|
auto results = batch_rule(primal_value, primal_bdim, tangent_value, tangent_bdim, level);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> _unpack_dual_generated_plumbing(const at::Tensor & dual, int64_t level) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(dual, cur_level)) {
|
|
return at::_ops::_unpack_dual::call(dual, level);
|
|
}
|
|
Tensor dual_value;
|
|
optional<int64_t> dual_bdim;
|
|
std::tie(dual_value, dual_bdim) = unwrapTensorAtLevel(dual, cur_level);
|
|
auto results = batch_rule(dual_value, dual_bdim, level);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _new_zeros_with_same_feature_meta_generated_plumbing(const at::Tensor & self, const at::Tensor & other, int64_t self_num_batch_dims) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::_new_zeros_with_same_feature_meta::call(self, other, self_num_batch_dims);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, self_num_batch_dims);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor rename_generated_plumbing(const at::Tensor & self, c10::optional<at::DimnameList> names) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::rename::call(self, names);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, names);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor align_to_generated_plumbing(const at::Tensor & self, at::DimnameList names) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::align_to::call(self, names);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, names);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor align_to_ellipsis_idx_generated_plumbing(const at::Tensor & self, at::DimnameList order, int64_t ellipsis_idx) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::align_to_ellipsis_idx::call(self, order, ellipsis_idx);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, order, ellipsis_idx);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor align_as_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::align_as::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> align_tensors_generated_plumbing(at::TensorList tensors) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(tensors, cur_level)) {
|
|
return at::_ops::align_tensors::call(tensors);
|
|
}
|
|
|
|
auto results = batch_rule(tensors);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _assert_async_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_assert_async::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _assert_async_msg_generated_plumbing(const at::Tensor & self, c10::string_view assert_msg) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_assert_async_msg::call(self, assert_msg);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim, assert_msg);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _functional_assert_scalar_generated_plumbing(const at::Scalar & self, c10::string_view assert_msg, const at::Tensor & dep_token) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(dep_token, cur_level)) {
|
|
return at::_ops::_functional_assert_scalar::call(self, assert_msg, dep_token);
|
|
}
|
|
Tensor dep_token_value;
|
|
optional<int64_t> dep_token_bdim;
|
|
std::tie(dep_token_value, dep_token_bdim) = unwrapTensorAtLevel(dep_token, cur_level);
|
|
auto results = batch_rule(self, assert_msg, dep_token_value, dep_token_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _functional_assert_async_msg_generated_plumbing(const at::Tensor & self, c10::string_view assert_msg, const at::Tensor & dep_token) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(dep_token, cur_level)) {
|
|
return at::_ops::_functional_assert_async_msg::call(self, assert_msg, dep_token);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor dep_token_value;
|
|
optional<int64_t> dep_token_bdim;
|
|
std::tie(dep_token_value, dep_token_bdim) = unwrapTensorAtLevel(dep_token, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, assert_msg, dep_token_value, dep_token_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _assert_tensor_metadata_generated_plumbing(const at::Tensor & a, at::OptionalSymIntArrayRef size, at::OptionalSymIntArrayRef stride, c10::optional<at::ScalarType> dtype) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(a, cur_level)) {
|
|
return at::_ops::_assert_tensor_metadata::call(a, size, stride, dtype);
|
|
}
|
|
Tensor a_value;
|
|
optional<int64_t> a_bdim;
|
|
std::tie(a_value, a_bdim) = unwrapTensorAtLevel(a, cur_level);
|
|
batch_rule(a_value, a_bdim, size, stride, dtype);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _functional_sym_constrain_range_generated_plumbing(const at::Scalar & size, c10::optional<int64_t> min, c10::optional<int64_t> max, const at::Tensor & dep_token) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(dep_token, cur_level)) {
|
|
return at::_ops::_functional_sym_constrain_range::call(size, min, max, dep_token);
|
|
}
|
|
Tensor dep_token_value;
|
|
optional<int64_t> dep_token_bdim;
|
|
std::tie(dep_token_value, dep_token_bdim) = unwrapTensorAtLevel(dep_token, cur_level);
|
|
auto results = batch_rule(size, min, max, dep_token_value, dep_token_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _functional_sym_constrain_range_for_size_generated_plumbing(const at::Scalar & size, c10::optional<int64_t> min, c10::optional<int64_t> max, const at::Tensor & dep_token) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(dep_token, cur_level)) {
|
|
return at::_ops::_functional_sym_constrain_range_for_size::call(size, min, max, dep_token);
|
|
}
|
|
Tensor dep_token_value;
|
|
optional<int64_t> dep_token_bdim;
|
|
std::tie(dep_token_value, dep_token_bdim) = unwrapTensorAtLevel(dep_token, cur_level);
|
|
auto results = batch_rule(size, min, max, dep_token_value, dep_token_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor refine_names_generated_plumbing(const at::Tensor & self, at::DimnameList names) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::refine_names::call(self, names);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, names);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> _cudnn_ctc_loss_generated_plumbing(const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank, bool deterministic, bool zero_infinity) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(log_probs, cur_level) && !isBatchedAtLevel(targets, cur_level)) {
|
|
return at::_ops::_cudnn_ctc_loss::call(log_probs, targets, input_lengths, target_lengths, blank, deterministic, zero_infinity);
|
|
}
|
|
Tensor log_probs_value;
|
|
optional<int64_t> log_probs_bdim;
|
|
std::tie(log_probs_value, log_probs_bdim) = unwrapTensorAtLevel(log_probs, cur_level);
|
|
Tensor targets_value;
|
|
optional<int64_t> targets_bdim;
|
|
std::tie(targets_value, targets_bdim) = unwrapTensorAtLevel(targets, cur_level);
|
|
auto results = batch_rule(log_probs_value, log_probs_bdim, targets_value, targets_bdim, input_lengths, target_lengths, blank, deterministic, zero_infinity);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> _cudnn_ctc_loss_Tensor_generated_plumbing(const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, int64_t blank, bool deterministic, bool zero_infinity) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(log_probs, cur_level) && !isBatchedAtLevel(targets, cur_level) && !isBatchedAtLevel(input_lengths, cur_level) && !isBatchedAtLevel(target_lengths, cur_level)) {
|
|
return at::_ops::_cudnn_ctc_loss_Tensor::call(log_probs, targets, input_lengths, target_lengths, blank, deterministic, zero_infinity);
|
|
}
|
|
Tensor log_probs_value;
|
|
optional<int64_t> log_probs_bdim;
|
|
std::tie(log_probs_value, log_probs_bdim) = unwrapTensorAtLevel(log_probs, cur_level);
|
|
Tensor targets_value;
|
|
optional<int64_t> targets_bdim;
|
|
std::tie(targets_value, targets_bdim) = unwrapTensorAtLevel(targets, cur_level);
|
|
Tensor input_lengths_value;
|
|
optional<int64_t> input_lengths_bdim;
|
|
std::tie(input_lengths_value, input_lengths_bdim) = unwrapTensorAtLevel(input_lengths, cur_level);
|
|
Tensor target_lengths_value;
|
|
optional<int64_t> target_lengths_bdim;
|
|
std::tie(target_lengths_value, target_lengths_bdim) = unwrapTensorAtLevel(target_lengths, cur_level);
|
|
auto results = batch_rule(log_probs_value, log_probs_bdim, targets_value, targets_bdim, input_lengths_value, input_lengths_bdim, target_lengths_value, target_lengths_bdim, blank, deterministic, zero_infinity);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _cudnn_rnn_flatten_weight_generated_plumbing(at::TensorList weight_arr, int64_t weight_stride0, c10::SymInt input_size, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, bool bidirectional) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(weight_arr, cur_level)) {
|
|
return at::_ops::_cudnn_rnn_flatten_weight::call(weight_arr, weight_stride0, input_size, mode, hidden_size, proj_size, num_layers, batch_first, bidirectional);
|
|
}
|
|
|
|
auto results = batch_rule(weight_arr, weight_stride0, input_size, mode, hidden_size, proj_size, num_layers, batch_first, bidirectional);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _cudnn_rnn_generated_plumbing(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const c10::optional<at::Tensor> & weight_buf, const at::Tensor & hx, const c10::optional<at::Tensor> & cx, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const c10::optional<at::Tensor> & dropout_state) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(weight_buf, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(cx, cur_level) && !isBatchedAtLevel(dropout_state, cur_level)) {
|
|
return at::_ops::_cudnn_rnn::call(input, weight, weight_stride0, weight_buf, hx, cx, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state);
|
|
}
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
Tensor hx_value;
|
|
optional<int64_t> hx_bdim;
|
|
std::tie(hx_value, hx_bdim) = unwrapTensorAtLevel(hx, cur_level);
|
|
optional<Tensor> weight_buf_value;
|
|
optional<int64_t> weight_buf_bdim;
|
|
if (weight_buf) {
|
|
std::tie(weight_buf_value, weight_buf_bdim) = unwrapTensorAtLevel(weight_buf.value(), cur_level);
|
|
}
|
|
optional<Tensor> cx_value;
|
|
optional<int64_t> cx_bdim;
|
|
if (cx) {
|
|
std::tie(cx_value, cx_bdim) = unwrapTensorAtLevel(cx.value(), cur_level);
|
|
}
|
|
optional<Tensor> dropout_state_value;
|
|
optional<int64_t> dropout_state_bdim;
|
|
if (dropout_state) {
|
|
std::tie(dropout_state_value, dropout_state_bdim) = unwrapTensorAtLevel(dropout_state.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(input_value, input_bdim, weight, weight_stride0, weight_buf_value, weight_buf_bdim, hx_value, hx_bdim, cx_value, cx_bdim, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state_value, dropout_state_bdim);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level), makeBatched(std::get<8>(results), std::get<9>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor,at::Tensor,::std::vector<at::Tensor>> _cudnn_rnn_backward_generated_plumbing(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional<at::Tensor> & cx, const at::Tensor & output, const c10::optional<at::Tensor> & grad_output, const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const c10::optional<at::Tensor> & dropout_state, const at::Tensor & reserve, ::std::array<bool,4> output_mask) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(weight_buf, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(cx, cur_level) && !isBatchedAtLevel(output, cur_level) && !isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(grad_hy, cur_level) && !isBatchedAtLevel(grad_cy, cur_level) && !isBatchedAtLevel(dropout_state, cur_level) && !isBatchedAtLevel(reserve, cur_level)) {
|
|
return at::_ops::_cudnn_rnn_backward::call(input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, reserve, output_mask);
|
|
}
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
Tensor weight_buf_value;
|
|
optional<int64_t> weight_buf_bdim;
|
|
std::tie(weight_buf_value, weight_buf_bdim) = unwrapTensorAtLevel(weight_buf, cur_level);
|
|
Tensor hx_value;
|
|
optional<int64_t> hx_bdim;
|
|
std::tie(hx_value, hx_bdim) = unwrapTensorAtLevel(hx, cur_level);
|
|
Tensor output_value;
|
|
optional<int64_t> output_bdim;
|
|
std::tie(output_value, output_bdim) = unwrapTensorAtLevel(output, cur_level);
|
|
Tensor reserve_value;
|
|
optional<int64_t> reserve_bdim;
|
|
std::tie(reserve_value, reserve_bdim) = unwrapTensorAtLevel(reserve, cur_level);
|
|
optional<Tensor> cx_value;
|
|
optional<int64_t> cx_bdim;
|
|
if (cx) {
|
|
std::tie(cx_value, cx_bdim) = unwrapTensorAtLevel(cx.value(), cur_level);
|
|
}
|
|
optional<Tensor> grad_output_value;
|
|
optional<int64_t> grad_output_bdim;
|
|
if (grad_output) {
|
|
std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output.value(), cur_level);
|
|
}
|
|
optional<Tensor> grad_hy_value;
|
|
optional<int64_t> grad_hy_bdim;
|
|
if (grad_hy) {
|
|
std::tie(grad_hy_value, grad_hy_bdim) = unwrapTensorAtLevel(grad_hy.value(), cur_level);
|
|
}
|
|
optional<Tensor> grad_cy_value;
|
|
optional<int64_t> grad_cy_bdim;
|
|
if (grad_cy) {
|
|
std::tie(grad_cy_value, grad_cy_bdim) = unwrapTensorAtLevel(grad_cy.value(), cur_level);
|
|
}
|
|
optional<Tensor> dropout_state_value;
|
|
optional<int64_t> dropout_state_bdim;
|
|
if (dropout_state) {
|
|
std::tie(dropout_state_value, dropout_state_bdim) = unwrapTensorAtLevel(dropout_state.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(input_value, input_bdim, weight, weight_stride0, weight_buf_value, weight_buf_bdim, hx_value, hx_bdim, cx_value, cx_bdim, output_value, output_bdim, grad_output_value, grad_output_bdim, grad_hy_value, grad_hy_bdim, grad_cy_value, grad_cy_bdim, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state_value, dropout_state_bdim, reserve_value, reserve_bdim, output_mask);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatchedVector(std::get<6>(results), std::get<7>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> _fused_dropout_generated_plumbing(const at::Tensor & self, double p, c10::optional<at::Generator> generator) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_fused_dropout::call(self, p, generator);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, p, generator);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _masked_scale_generated_plumbing(const at::Tensor & self, const at::Tensor & mask, double scale) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mask, cur_level)) {
|
|
return at::_ops::_masked_scale::call(self, mask, scale);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor mask_value;
|
|
optional<int64_t> mask_bdim;
|
|
std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, mask_value, mask_bdim, scale);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> native_dropout_generated_plumbing(const at::Tensor & input, double p, c10::optional<bool> train) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(input, cur_level)) {
|
|
return at::_ops::native_dropout::call(input, p, train);
|
|
}
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
auto results = batch_rule(input_value, input_bdim, p, train);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor native_dropout_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & mask, double scale) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(mask, cur_level)) {
|
|
return at::_ops::native_dropout_backward::call(grad_output, mask, scale);
|
|
}
|
|
Tensor grad_output_value;
|
|
optional<int64_t> grad_output_bdim;
|
|
std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
|
|
Tensor mask_value;
|
|
optional<int64_t> mask_bdim;
|
|
std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask, cur_level);
|
|
auto results = batch_rule(grad_output_value, grad_output_bdim, mask_value, mask_bdim, scale);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> _sobol_engine_draw_generated_plumbing(const at::Tensor & quasi, int64_t n, const at::Tensor & sobolstate, int64_t dimension, int64_t num_generated, c10::optional<at::ScalarType> dtype) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(quasi, cur_level) && !isBatchedAtLevel(sobolstate, cur_level)) {
|
|
return at::_ops::_sobol_engine_draw::call(quasi, n, sobolstate, dimension, num_generated, dtype);
|
|
}
|
|
Tensor quasi_value;
|
|
optional<int64_t> quasi_bdim;
|
|
std::tie(quasi_value, quasi_bdim) = unwrapTensorAtLevel(quasi, cur_level);
|
|
Tensor sobolstate_value;
|
|
optional<int64_t> sobolstate_bdim;
|
|
std::tie(sobolstate_value, sobolstate_bdim) = unwrapTensorAtLevel(sobolstate, cur_level);
|
|
auto results = batch_rule(quasi_value, quasi_bdim, n, sobolstate_value, sobolstate_bdim, dimension, num_generated, dtype);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & _sobol_engine_ff__generated_plumbing(at::Tensor & self, int64_t n, const at::Tensor & sobolstate, int64_t dimension, int64_t num_generated) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(sobolstate, cur_level)) {
|
|
return at::_ops::_sobol_engine_ff_::call(self, n, sobolstate, dimension, num_generated);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor sobolstate_value;
|
|
optional<int64_t> sobolstate_bdim;
|
|
std::tie(sobolstate_value, sobolstate_bdim) = unwrapTensorAtLevel(sobolstate, cur_level);
|
|
batch_rule(self_value, self_bdim, n, sobolstate_value, sobolstate_bdim, dimension, num_generated);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & _sobol_engine_scramble__generated_plumbing(at::Tensor & self, const at::Tensor & ltm, int64_t dimension) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(ltm, cur_level)) {
|
|
return at::_ops::_sobol_engine_scramble_::call(self, ltm, dimension);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor ltm_value;
|
|
optional<int64_t> ltm_bdim;
|
|
std::tie(ltm_value, ltm_bdim) = unwrapTensorAtLevel(ltm, cur_level);
|
|
batch_rule(self_value, self_bdim, ltm_value, ltm_bdim, dimension);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & _sobol_engine_initialize_state__generated_plumbing(at::Tensor & self, int64_t dimension) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_sobol_engine_initialize_state_::call(self, dimension);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim, dimension);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _reshape_from_tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & shape) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(shape, cur_level)) {
|
|
return at::_ops::_reshape_from_tensor::call(self, shape);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor shape_value;
|
|
optional<int64_t> shape_bdim;
|
|
std::tie(shape_value, shape_bdim) = unwrapTensorAtLevel(shape, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, shape_value, shape_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _shape_as_tensor_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_shape_as_tensor::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor dropout_generated_plumbing(const at::Tensor & input, double p, bool train) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(input, cur_level)) {
|
|
return at::_ops::dropout::call(input, p, train);
|
|
}
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
auto results = batch_rule(input_value, input_bdim, p, train);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & dropout__generated_plumbing(at::Tensor & self, double p, bool train) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::dropout_::call(self, p, train);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim, p, train);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor feature_dropout_generated_plumbing(const at::Tensor & input, double p, bool train) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(input, cur_level)) {
|
|
return at::_ops::feature_dropout::call(input, p, train);
|
|
}
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
auto results = batch_rule(input_value, input_bdim, p, train);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & feature_dropout__generated_plumbing(at::Tensor & self, double p, bool train) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::feature_dropout_::call(self, p, train);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim, p, train);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor alpha_dropout_generated_plumbing(const at::Tensor & input, double p, bool train) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(input, cur_level)) {
|
|
return at::_ops::alpha_dropout::call(input, p, train);
|
|
}
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
auto results = batch_rule(input_value, input_bdim, p, train);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & alpha_dropout__generated_plumbing(at::Tensor & self, double p, bool train) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::alpha_dropout_::call(self, p, train);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim, p, train);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor feature_alpha_dropout_generated_plumbing(const at::Tensor & input, double p, bool train) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(input, cur_level)) {
|
|
return at::_ops::feature_alpha_dropout::call(input, p, train);
|
|
}
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
auto results = batch_rule(input_value, input_bdim, p, train);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & feature_alpha_dropout__generated_plumbing(at::Tensor & self, double p, bool train) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::feature_alpha_dropout_::call(self, p, train);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim, p, train);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor abs_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::abs::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & abs__generated_plumbing(at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::abs_::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor absolute_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::absolute::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & absolute__generated_plumbing(at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::absolute_::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor angle_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::angle::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor view_as_real_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::view_as_real::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor view_as_complex_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::view_as_complex::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor sgn_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::sgn::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & sgn__generated_plumbing(at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::sgn_::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor chalf_generated_plumbing(const at::Tensor & self, c10::optional<at::MemoryFormat> memory_format) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::chalf::call(self, memory_format);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, memory_format);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor real_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::real::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor imag_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::imag::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _conj_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_conj::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor conj_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::conj::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _conj_physical_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_conj_physical::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor conj_physical_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::conj_physical::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & conj_physical__generated_plumbing(at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::conj_physical_::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor resolve_conj_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::resolve_conj::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor resolve_neg_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::resolve_neg::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _neg_view_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_neg_view::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor acos_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::acos::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & acos__generated_plumbing(at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::acos_::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor arccos_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::arccos::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & arccos__generated_plumbing(at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::arccos_::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor avg_pool1d_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::avg_pool1d::call(self, kernel_size, stride, padding, ceil_mode, count_include_pad);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, kernel_size, stride, padding, ceil_mode, count_include_pad);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor adaptive_avg_pool1d_generated_plumbing(const at::Tensor & self, at::IntArrayRef output_size) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::adaptive_avg_pool1d::call(self, output_size);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, output_size);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> adaptive_max_pool1d_generated_plumbing(const at::Tensor & self, at::IntArrayRef output_size) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::adaptive_max_pool1d::call(self, output_size);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, output_size);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor add_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::add_Tensor::call(self, other, alpha);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, alpha);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & add__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::add__Tensor::call(self, other, alpha);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
batch_rule(self_value, self_bdim, other_value, other_bdim, alpha);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _add_relu_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::_add_relu_Tensor::call(self, other, alpha);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, alpha);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & _add_relu__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::_add_relu__Tensor::call(self, other, alpha);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
batch_rule(self_value, self_bdim, other_value, other_bdim, alpha);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _add_relu_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_add_relu_Scalar::call(self, other, alpha);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other, alpha);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & _add_relu__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_add_relu__Scalar::call(self, other, alpha);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim, other, alpha);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor add_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::add_Scalar::call(self, other, alpha);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other, alpha);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & add__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::add__Scalar::call(self, other, alpha);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim, other, alpha);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor addmv_generated_plumbing(const at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec, const at::Scalar & beta, const at::Scalar & alpha) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mat, cur_level) && !isBatchedAtLevel(vec, cur_level)) {
|
|
return at::_ops::addmv::call(self, mat, vec, beta, alpha);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor mat_value;
|
|
optional<int64_t> mat_bdim;
|
|
std::tie(mat_value, mat_bdim) = unwrapTensorAtLevel(mat, cur_level);
|
|
Tensor vec_value;
|
|
optional<int64_t> vec_bdim;
|
|
std::tie(vec_value, vec_bdim) = unwrapTensorAtLevel(vec, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, mat_value, mat_bdim, vec_value, vec_bdim, beta, alpha);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & addmv__generated_plumbing(at::Tensor & self, const at::Tensor & mat, const at::Tensor & vec, const at::Scalar & beta, const at::Scalar & alpha) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mat, cur_level) && !isBatchedAtLevel(vec, cur_level)) {
|
|
return at::_ops::addmv_::call(self, mat, vec, beta, alpha);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor mat_value;
|
|
optional<int64_t> mat_bdim;
|
|
std::tie(mat_value, mat_bdim) = unwrapTensorAtLevel(mat, cur_level);
|
|
Tensor vec_value;
|
|
optional<int64_t> vec_bdim;
|
|
std::tie(vec_value, vec_bdim) = unwrapTensorAtLevel(vec, cur_level);
|
|
batch_rule(self_value, self_bdim, mat_value, mat_bdim, vec_value, vec_bdim, beta, alpha);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor addr_generated_plumbing(const at::Tensor & self, const at::Tensor & vec1, const at::Tensor & vec2, const at::Scalar & beta, const at::Scalar & alpha) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(vec1, cur_level) && !isBatchedAtLevel(vec2, cur_level)) {
|
|
return at::_ops::addr::call(self, vec1, vec2, beta, alpha);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor vec1_value;
|
|
optional<int64_t> vec1_bdim;
|
|
std::tie(vec1_value, vec1_bdim) = unwrapTensorAtLevel(vec1, cur_level);
|
|
Tensor vec2_value;
|
|
optional<int64_t> vec2_bdim;
|
|
std::tie(vec2_value, vec2_bdim) = unwrapTensorAtLevel(vec2, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, vec1_value, vec1_bdim, vec2_value, vec2_bdim, beta, alpha);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & addr__generated_plumbing(at::Tensor & self, const at::Tensor & vec1, const at::Tensor & vec2, const at::Scalar & beta, const at::Scalar & alpha) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(vec1, cur_level) && !isBatchedAtLevel(vec2, cur_level)) {
|
|
return at::_ops::addr_::call(self, vec1, vec2, beta, alpha);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor vec1_value;
|
|
optional<int64_t> vec1_bdim;
|
|
std::tie(vec1_value, vec1_bdim) = unwrapTensorAtLevel(vec1, cur_level);
|
|
Tensor vec2_value;
|
|
optional<int64_t> vec2_bdim;
|
|
std::tie(vec2_value, vec2_bdim) = unwrapTensorAtLevel(vec2, cur_level);
|
|
batch_rule(self_value, self_bdim, vec1_value, vec1_bdim, vec2_value, vec2_bdim, beta, alpha);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor affine_grid_generator_generated_plumbing(const at::Tensor & theta, c10::SymIntArrayRef size, bool align_corners) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(theta, cur_level)) {
|
|
return at::_ops::affine_grid_generator::call(theta, size, align_corners);
|
|
}
|
|
Tensor theta_value;
|
|
optional<int64_t> theta_bdim;
|
|
std::tie(theta_value, theta_bdim) = unwrapTensorAtLevel(theta, cur_level);
|
|
auto results = batch_rule(theta_value, theta_bdim, size, align_corners);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor affine_grid_generator_backward_generated_plumbing(const at::Tensor & grad, c10::SymIntArrayRef size, bool align_corners) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad, cur_level)) {
|
|
return at::_ops::affine_grid_generator_backward::call(grad, size, align_corners);
|
|
}
|
|
Tensor grad_value;
|
|
optional<int64_t> grad_bdim;
|
|
std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
|
|
auto results = batch_rule(grad_value, grad_bdim, size, align_corners);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _is_all_true_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_is_all_true::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _is_any_true_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_is_any_true::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _test_check_tensor_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_test_check_tensor::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _test_functorch_fallback_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::_test_functorch_fallback::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor all_dim_generated_plumbing(const at::Tensor & self, int64_t dim, bool keepdim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::all_dim::call(self, dim, keepdim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, keepdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor all_dims_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::all_dims::call(self, dim, keepdim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, keepdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor all_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, bool keepdim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::all_dimname::call(self, dim, keepdim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, keepdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor any_dim_generated_plumbing(const at::Tensor & self, int64_t dim, bool keepdim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::any_dim::call(self, dim, keepdim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, keepdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor any_dims_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::any_dims::call(self, dim, keepdim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, keepdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor any_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, bool keepdim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::any_dimname::call(self, dim, keepdim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, keepdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _dim_arange_generated_plumbing(const at::Tensor & like, int64_t dim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(like, cur_level)) {
|
|
return at::_ops::_dim_arange::call(like, dim);
|
|
}
|
|
Tensor like_value;
|
|
optional<int64_t> like_bdim;
|
|
std::tie(like_value, like_bdim) = unwrapTensorAtLevel(like, cur_level);
|
|
auto results = batch_rule(like_value, like_bdim, dim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor argmax_generated_plumbing(const at::Tensor & self, c10::optional<int64_t> dim, bool keepdim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::argmax::call(self, dim, keepdim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, keepdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor argmin_generated_plumbing(const at::Tensor & self, c10::optional<int64_t> dim, bool keepdim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::argmin::call(self, dim, keepdim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, keepdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor acosh_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::acosh::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & acosh__generated_plumbing(at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::acosh_::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor arccosh_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::arccosh::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & arccosh__generated_plumbing(at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::arccosh_::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor asinh_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::asinh::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & asinh__generated_plumbing(at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::asinh_::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor arcsinh_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::arcsinh::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & arcsinh__generated_plumbing(at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::arcsinh_::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor atanh_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::atanh::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & atanh__generated_plumbing(at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::atanh_::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor arctanh_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::arctanh::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & arctanh__generated_plumbing(at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::arctanh_::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor as_strided_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<c10::SymInt> storage_offset) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::as_strided::call(self, size, stride, storage_offset);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, size, stride, storage_offset);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor asin_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::asin::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & asin__generated_plumbing(at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::asin_::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor arcsin_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::arcsin::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & arcsin__generated_plumbing(at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::arcsin_::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor atan_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::atan::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & atan__generated_plumbing(at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::atan_::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor arctan_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::arctan::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & arctan__generated_plumbing(at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::arctan_::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor atleast_1d_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::atleast_1d::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> atleast_1d_Sequence_generated_plumbing(at::TensorList tensors) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(tensors, cur_level)) {
|
|
return at::_ops::atleast_1d_Sequence::call(tensors);
|
|
}
|
|
|
|
auto results = batch_rule(tensors);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor atleast_2d_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::atleast_2d::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> atleast_2d_Sequence_generated_plumbing(at::TensorList tensors) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(tensors, cur_level)) {
|
|
return at::_ops::atleast_2d_Sequence::call(tensors);
|
|
}
|
|
|
|
auto results = batch_rule(tensors);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor atleast_3d_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::atleast_3d::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> atleast_3d_Sequence_generated_plumbing(at::TensorList tensors) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(tensors, cur_level)) {
|
|
return at::_ops::atleast_3d_Sequence::call(tensors);
|
|
}
|
|
|
|
auto results = batch_rule(tensors);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor baddbmm_generated_plumbing(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(batch1, cur_level) && !isBatchedAtLevel(batch2, cur_level)) {
|
|
return at::_ops::baddbmm::call(self, batch1, batch2, beta, alpha);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor batch1_value;
|
|
optional<int64_t> batch1_bdim;
|
|
std::tie(batch1_value, batch1_bdim) = unwrapTensorAtLevel(batch1, cur_level);
|
|
Tensor batch2_value;
|
|
optional<int64_t> batch2_bdim;
|
|
std::tie(batch2_value, batch2_bdim) = unwrapTensorAtLevel(batch2, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, batch1_value, batch1_bdim, batch2_value, batch2_bdim, beta, alpha);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & baddbmm__generated_plumbing(at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(batch1, cur_level) && !isBatchedAtLevel(batch2, cur_level)) {
|
|
return at::_ops::baddbmm_::call(self, batch1, batch2, beta, alpha);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor batch1_value;
|
|
optional<int64_t> batch1_bdim;
|
|
std::tie(batch1_value, batch1_bdim) = unwrapTensorAtLevel(batch1, cur_level);
|
|
Tensor batch2_value;
|
|
optional<int64_t> batch2_bdim;
|
|
std::tie(batch2_value, batch2_bdim) = unwrapTensorAtLevel(batch2, cur_level);
|
|
batch_rule(self_value, self_bdim, batch1_value, batch1_bdim, batch2_value, batch2_bdim, beta, alpha);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor batch_norm_generated_plumbing(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, bool training, double momentum, double eps, bool cudnn_enabled) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level) && !isBatchedAtLevel(running_mean, cur_level) && !isBatchedAtLevel(running_var, cur_level)) {
|
|
return at::_ops::batch_norm::call(input, weight, bias, running_mean, running_var, training, momentum, eps, cudnn_enabled);
|
|
}
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
optional<Tensor> weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
if (weight) {
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
|
|
}
|
|
optional<Tensor> bias_value;
|
|
optional<int64_t> bias_bdim;
|
|
if (bias) {
|
|
std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
|
|
}
|
|
optional<Tensor> running_mean_value;
|
|
optional<int64_t> running_mean_bdim;
|
|
if (running_mean) {
|
|
std::tie(running_mean_value, running_mean_bdim) = unwrapTensorAtLevel(running_mean.value(), cur_level);
|
|
}
|
|
optional<Tensor> running_var_value;
|
|
optional<int64_t> running_var_bdim;
|
|
if (running_var) {
|
|
std::tie(running_var_value, running_var_bdim) = unwrapTensorAtLevel(running_var.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, running_mean_value, running_mean_bdim, running_var_value, running_var_bdim, training, momentum, eps, cudnn_enabled);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor quantized_batch_norm_generated_plumbing(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, const at::Tensor & mean, const at::Tensor & var, double eps, double output_scale, int64_t output_zero_point) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level) && !isBatchedAtLevel(mean, cur_level) && !isBatchedAtLevel(var, cur_level)) {
|
|
return at::_ops::quantized_batch_norm::call(input, weight, bias, mean, var, eps, output_scale, output_zero_point);
|
|
}
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
Tensor mean_value;
|
|
optional<int64_t> mean_bdim;
|
|
std::tie(mean_value, mean_bdim) = unwrapTensorAtLevel(mean, cur_level);
|
|
Tensor var_value;
|
|
optional<int64_t> var_bdim;
|
|
std::tie(var_value, var_bdim) = unwrapTensorAtLevel(var, cur_level);
|
|
optional<Tensor> weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
if (weight) {
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
|
|
}
|
|
optional<Tensor> bias_value;
|
|
optional<int64_t> bias_bdim;
|
|
if (bias) {
|
|
std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, mean_value, mean_bdim, var_value, var_bdim, eps, output_scale, output_zero_point);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor,at::Tensor> _batch_norm_impl_index_backward_generated_plumbing(int64_t impl_index, const at::Tensor & input, const at::Tensor & grad_output, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, const c10::optional<at::Tensor> & save_mean, const c10::optional<at::Tensor> & save_var_transform, bool train, double eps, ::std::array<bool,3> output_mask, const at::Tensor & reservedSpace) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(running_mean, cur_level) && !isBatchedAtLevel(running_var, cur_level) && !isBatchedAtLevel(save_mean, cur_level) && !isBatchedAtLevel(save_var_transform, cur_level) && !isBatchedAtLevel(reservedSpace, cur_level)) {
|
|
return at::_ops::_batch_norm_impl_index_backward::call(impl_index, input, grad_output, weight, running_mean, running_var, save_mean, save_var_transform, train, eps, output_mask, reservedSpace);
|
|
}
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
Tensor grad_output_value;
|
|
optional<int64_t> grad_output_bdim;
|
|
std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
|
|
Tensor reservedSpace_value;
|
|
optional<int64_t> reservedSpace_bdim;
|
|
std::tie(reservedSpace_value, reservedSpace_bdim) = unwrapTensorAtLevel(reservedSpace, cur_level);
|
|
optional<Tensor> weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
if (weight) {
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
|
|
}
|
|
optional<Tensor> running_mean_value;
|
|
optional<int64_t> running_mean_bdim;
|
|
if (running_mean) {
|
|
std::tie(running_mean_value, running_mean_bdim) = unwrapTensorAtLevel(running_mean.value(), cur_level);
|
|
}
|
|
optional<Tensor> running_var_value;
|
|
optional<int64_t> running_var_bdim;
|
|
if (running_var) {
|
|
std::tie(running_var_value, running_var_bdim) = unwrapTensorAtLevel(running_var.value(), cur_level);
|
|
}
|
|
optional<Tensor> save_mean_value;
|
|
optional<int64_t> save_mean_bdim;
|
|
if (save_mean) {
|
|
std::tie(save_mean_value, save_mean_bdim) = unwrapTensorAtLevel(save_mean.value(), cur_level);
|
|
}
|
|
optional<Tensor> save_var_transform_value;
|
|
optional<int64_t> save_var_transform_bdim;
|
|
if (save_var_transform) {
|
|
std::tie(save_var_transform_value, save_var_transform_bdim) = unwrapTensorAtLevel(save_var_transform.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(impl_index, input_value, input_bdim, grad_output_value, grad_output_bdim, weight_value, weight_bdim, running_mean_value, running_mean_bdim, running_var_value, running_var_bdim, save_mean_value, save_mean_bdim, save_var_transform_value, save_var_transform_bdim, train, eps, output_mask, reservedSpace_value, reservedSpace_bdim);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor bernoulli_generated_plumbing(const at::Tensor & self, c10::optional<at::Generator> generator) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::bernoulli::call(self, generator);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, generator);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & bernoulli__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & p, c10::optional<at::Generator> generator) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(p, cur_level)) {
|
|
return at::_ops::bernoulli__Tensor::call(self, p, generator);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor p_value;
|
|
optional<int64_t> p_bdim;
|
|
std::tie(p_value, p_bdim) = unwrapTensorAtLevel(p, cur_level);
|
|
batch_rule(self_value, self_bdim, p_value, p_bdim, generator);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & bernoulli__float_generated_plumbing(at::Tensor & self, double p, c10::optional<at::Generator> generator) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::bernoulli__float::call(self, p, generator);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim, p, generator);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor bernoulli_p_generated_plumbing(const at::Tensor & self, double p, c10::optional<at::Generator> generator) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::bernoulli_p::call(self, p, generator);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, p, generator);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor bilinear_generated_plumbing(const at::Tensor & input1, const at::Tensor & input2, const at::Tensor & weight, const c10::optional<at::Tensor> & bias) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(input1, cur_level) && !isBatchedAtLevel(input2, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
|
|
return at::_ops::bilinear::call(input1, input2, weight, bias);
|
|
}
|
|
Tensor input1_value;
|
|
optional<int64_t> input1_bdim;
|
|
std::tie(input1_value, input1_bdim) = unwrapTensorAtLevel(input1, cur_level);
|
|
Tensor input2_value;
|
|
optional<int64_t> input2_bdim;
|
|
std::tie(input2_value, input2_bdim) = unwrapTensorAtLevel(input2, cur_level);
|
|
Tensor weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
|
|
optional<Tensor> bias_value;
|
|
optional<int64_t> bias_bdim;
|
|
if (bias) {
|
|
std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(input1_value, input1_bdim, input2_value, input2_bdim, weight_value, weight_bdim, bias_value, bias_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor binary_cross_entropy_generated_plumbing(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
|
|
return at::_ops::binary_cross_entropy::call(self, target, weight, reduction);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor target_value;
|
|
optional<int64_t> target_bdim;
|
|
std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
|
|
optional<Tensor> weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
if (weight) {
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, weight_value, weight_bdim, reduction);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor binary_cross_entropy_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
|
|
return at::_ops::binary_cross_entropy_backward::call(grad_output, self, target, weight, reduction);
|
|
}
|
|
Tensor grad_output_value;
|
|
optional<int64_t> grad_output_bdim;
|
|
std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor target_value;
|
|
optional<int64_t> target_bdim;
|
|
std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
|
|
optional<Tensor> weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
if (weight) {
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, target_value, target_bdim, weight_value, weight_bdim, reduction);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor binary_cross_entropy_with_logits_generated_plumbing(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & pos_weight, int64_t reduction) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(pos_weight, cur_level)) {
|
|
return at::_ops::binary_cross_entropy_with_logits::call(self, target, weight, pos_weight, reduction);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor target_value;
|
|
optional<int64_t> target_bdim;
|
|
std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
|
|
optional<Tensor> weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
if (weight) {
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
|
|
}
|
|
optional<Tensor> pos_weight_value;
|
|
optional<int64_t> pos_weight_bdim;
|
|
if (pos_weight) {
|
|
std::tie(pos_weight_value, pos_weight_bdim) = unwrapTensorAtLevel(pos_weight.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, weight_value, weight_bdim, pos_weight_value, pos_weight_bdim, reduction);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor bincount_generated_plumbing(const at::Tensor & self, const c10::optional<at::Tensor> & weights, int64_t minlength) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weights, cur_level)) {
|
|
return at::_ops::bincount::call(self, weights, minlength);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
optional<Tensor> weights_value;
|
|
optional<int64_t> weights_bdim;
|
|
if (weights) {
|
|
std::tie(weights_value, weights_bdim) = unwrapTensorAtLevel(weights.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(self_value, self_bdim, weights_value, weights_bdim, minlength);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor bitwise_not_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::bitwise_not::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & bitwise_not__generated_plumbing(at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::bitwise_not_::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor copysign_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::copysign_Tensor::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & copysign__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::copysign__Tensor::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor copysign_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::copysign_Scalar::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & copysign__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::copysign__Scalar::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim, other);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _lazy_clone_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_lazy_clone::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor logical_not_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::logical_not::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & logical_not__generated_plumbing(at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::logical_not_::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor logical_xor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::logical_xor::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & logical_xor__generated_plumbing(at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::logical_xor_::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor logical_and_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::logical_and::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & logical_and__generated_plumbing(at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::logical_and_::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor logical_or_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::logical_or::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & logical_or__generated_plumbing(at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::logical_or_::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor bmm_generated_plumbing(const at::Tensor & self, const at::Tensor & mat2) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mat2, cur_level)) {
|
|
return at::_ops::bmm::call(self, mat2);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor mat2_value;
|
|
optional<int64_t> mat2_bdim;
|
|
std::tie(mat2_value, mat2_bdim) = unwrapTensorAtLevel(mat2, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, mat2_value, mat2_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> broadcast_tensors_generated_plumbing(at::TensorList tensors) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(tensors, cur_level)) {
|
|
return at::_ops::broadcast_tensors::call(tensors);
|
|
}
|
|
|
|
auto results = batch_rule(tensors);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor broadcast_to_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::broadcast_to::call(self, size);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, size);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _sparse_broadcast_to_generated_plumbing(const at::Tensor & self, at::IntArrayRef size) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_sparse_broadcast_to::call(self, size);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, size);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor cat_generated_plumbing(const at::ITensorListRef & tensors, int64_t dim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(tensors, cur_level)) {
|
|
return at::_ops::cat::call(tensors, dim);
|
|
}
|
|
|
|
auto results = batch_rule(tensors, dim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor cat_names_generated_plumbing(at::TensorList tensors, at::Dimname dim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(tensors, cur_level)) {
|
|
return at::_ops::cat_names::call(tensors, dim);
|
|
}
|
|
|
|
auto results = batch_rule(tensors, dim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor concat_generated_plumbing(at::TensorList tensors, int64_t dim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(tensors, cur_level)) {
|
|
return at::_ops::concat::call(tensors, dim);
|
|
}
|
|
|
|
auto results = batch_rule(tensors, dim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor concat_names_generated_plumbing(at::TensorList tensors, at::Dimname dim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(tensors, cur_level)) {
|
|
return at::_ops::concat_names::call(tensors, dim);
|
|
}
|
|
|
|
auto results = batch_rule(tensors, dim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor concatenate_generated_plumbing(at::TensorList tensors, int64_t dim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(tensors, cur_level)) {
|
|
return at::_ops::concatenate::call(tensors, dim);
|
|
}
|
|
|
|
auto results = batch_rule(tensors, dim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor concatenate_names_generated_plumbing(at::TensorList tensors, at::Dimname dim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(tensors, cur_level)) {
|
|
return at::_ops::concatenate_names::call(tensors, dim);
|
|
}
|
|
|
|
auto results = batch_rule(tensors, dim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor block_diag_generated_plumbing(at::TensorList tensors) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(tensors, cur_level)) {
|
|
return at::_ops::block_diag::call(tensors);
|
|
}
|
|
|
|
auto results = batch_rule(tensors);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor ceil_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::ceil::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & ceil__generated_plumbing(at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::ceil_::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor chain_matmul_generated_plumbing(at::TensorList matrices) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(matrices, cur_level)) {
|
|
return at::_ops::chain_matmul::call(matrices);
|
|
}
|
|
|
|
auto results = batch_rule(matrices);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> unsafe_chunk_generated_plumbing(const at::Tensor & self, int64_t chunks, int64_t dim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::unsafe_chunk::call(self, chunks, dim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, chunks, dim);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> chunk_generated_plumbing(const at::Tensor & self, int64_t chunks, int64_t dim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::chunk::call(self, chunks, dim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, chunks, dim);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> tensor_split_sections_generated_plumbing(const at::Tensor & self, c10::SymInt sections, int64_t dim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::tensor_split_sections::call(self, sections, dim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, sections, dim);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> tensor_split_indices_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef indices, int64_t dim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::tensor_split_indices::call(self, indices, dim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, indices, dim);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> tensor_split_tensor_indices_or_sections_generated_plumbing(const at::Tensor & self, const at::Tensor & tensor_indices_or_sections, int64_t dim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor_indices_or_sections, cur_level)) {
|
|
return at::_ops::tensor_split_tensor_indices_or_sections::call(self, tensor_indices_or_sections, dim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor tensor_indices_or_sections_value;
|
|
optional<int64_t> tensor_indices_or_sections_bdim;
|
|
std::tie(tensor_indices_or_sections_value, tensor_indices_or_sections_bdim) = unwrapTensorAtLevel(tensor_indices_or_sections, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, tensor_indices_or_sections_value, tensor_indices_or_sections_bdim, dim);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor clamp_generated_plumbing(const at::Tensor & self, const c10::optional<at::Scalar> & min, const c10::optional<at::Scalar> & max) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::clamp::call(self, min, max);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, min, max);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor clamp_Tensor_generated_plumbing(const at::Tensor & self, const c10::optional<at::Tensor> & min, const c10::optional<at::Tensor> & max) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(min, cur_level) && !isBatchedAtLevel(max, cur_level)) {
|
|
return at::_ops::clamp_Tensor::call(self, min, max);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
optional<Tensor> min_value;
|
|
optional<int64_t> min_bdim;
|
|
if (min) {
|
|
std::tie(min_value, min_bdim) = unwrapTensorAtLevel(min.value(), cur_level);
|
|
}
|
|
optional<Tensor> max_value;
|
|
optional<int64_t> max_bdim;
|
|
if (max) {
|
|
std::tie(max_value, max_bdim) = unwrapTensorAtLevel(max.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(self_value, self_bdim, min_value, min_bdim, max_value, max_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & clamp__generated_plumbing(at::Tensor & self, const c10::optional<at::Scalar> & min, const c10::optional<at::Scalar> & max) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::clamp_::call(self, min, max);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim, min, max);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & clamp__Tensor_generated_plumbing(at::Tensor & self, const c10::optional<at::Tensor> & min, const c10::optional<at::Tensor> & max) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(min, cur_level) && !isBatchedAtLevel(max, cur_level)) {
|
|
return at::_ops::clamp__Tensor::call(self, min, max);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
optional<Tensor> min_value;
|
|
optional<int64_t> min_bdim;
|
|
if (min) {
|
|
std::tie(min_value, min_bdim) = unwrapTensorAtLevel(min.value(), cur_level);
|
|
}
|
|
optional<Tensor> max_value;
|
|
optional<int64_t> max_bdim;
|
|
if (max) {
|
|
std::tie(max_value, max_bdim) = unwrapTensorAtLevel(max.value(), cur_level);
|
|
}
|
|
batch_rule(self_value, self_bdim, min_value, min_bdim, max_value, max_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor clamp_max_generated_plumbing(const at::Tensor & self, const at::Scalar & max) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::clamp_max::call(self, max);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, max);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor clamp_max_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & max) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(max, cur_level)) {
|
|
return at::_ops::clamp_max_Tensor::call(self, max);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor max_value;
|
|
optional<int64_t> max_bdim;
|
|
std::tie(max_value, max_bdim) = unwrapTensorAtLevel(max, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, max_value, max_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & clamp_max__generated_plumbing(at::Tensor & self, const at::Scalar & max) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::clamp_max_::call(self, max);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim, max);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & clamp_max__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & max) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(max, cur_level)) {
|
|
return at::_ops::clamp_max__Tensor::call(self, max);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor max_value;
|
|
optional<int64_t> max_bdim;
|
|
std::tie(max_value, max_bdim) = unwrapTensorAtLevel(max, cur_level);
|
|
batch_rule(self_value, self_bdim, max_value, max_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor clamp_min_generated_plumbing(const at::Tensor & self, const at::Scalar & min) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::clamp_min::call(self, min);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, min);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor clamp_min_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & min) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(min, cur_level)) {
|
|
return at::_ops::clamp_min_Tensor::call(self, min);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor min_value;
|
|
optional<int64_t> min_bdim;
|
|
std::tie(min_value, min_bdim) = unwrapTensorAtLevel(min, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, min_value, min_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & clamp_min__generated_plumbing(at::Tensor & self, const at::Scalar & min) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::clamp_min_::call(self, min);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim, min);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & clamp_min__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & min) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(min, cur_level)) {
|
|
return at::_ops::clamp_min__Tensor::call(self, min);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor min_value;
|
|
optional<int64_t> min_bdim;
|
|
std::tie(min_value, min_bdim) = unwrapTensorAtLevel(min, cur_level);
|
|
batch_rule(self_value, self_bdim, min_value, min_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor clip_generated_plumbing(const at::Tensor & self, const c10::optional<at::Scalar> & min, const c10::optional<at::Scalar> & max) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::clip::call(self, min, max);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, min, max);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor clip_Tensor_generated_plumbing(const at::Tensor & self, const c10::optional<at::Tensor> & min, const c10::optional<at::Tensor> & max) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(min, cur_level) && !isBatchedAtLevel(max, cur_level)) {
|
|
return at::_ops::clip_Tensor::call(self, min, max);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
optional<Tensor> min_value;
|
|
optional<int64_t> min_bdim;
|
|
if (min) {
|
|
std::tie(min_value, min_bdim) = unwrapTensorAtLevel(min.value(), cur_level);
|
|
}
|
|
optional<Tensor> max_value;
|
|
optional<int64_t> max_bdim;
|
|
if (max) {
|
|
std::tie(max_value, max_bdim) = unwrapTensorAtLevel(max.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(self_value, self_bdim, min_value, min_bdim, max_value, max_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & clip__generated_plumbing(at::Tensor & self, const c10::optional<at::Scalar> & min, const c10::optional<at::Scalar> & max) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::clip_::call(self, min, max);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim, min, max);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & clip__Tensor_generated_plumbing(at::Tensor & self, const c10::optional<at::Tensor> & min, const c10::optional<at::Tensor> & max) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(min, cur_level) && !isBatchedAtLevel(max, cur_level)) {
|
|
return at::_ops::clip__Tensor::call(self, min, max);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
optional<Tensor> min_value;
|
|
optional<int64_t> min_bdim;
|
|
if (min) {
|
|
std::tie(min_value, min_bdim) = unwrapTensorAtLevel(min.value(), cur_level);
|
|
}
|
|
optional<Tensor> max_value;
|
|
optional<int64_t> max_bdim;
|
|
if (max) {
|
|
std::tie(max_value, max_bdim) = unwrapTensorAtLevel(max.value(), cur_level);
|
|
}
|
|
batch_rule(self_value, self_bdim, min_value, min_bdim, max_value, max_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor complex_generated_plumbing(const at::Tensor & real, const at::Tensor & imag) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(real, cur_level) && !isBatchedAtLevel(imag, cur_level)) {
|
|
return at::_ops::complex::call(real, imag);
|
|
}
|
|
Tensor real_value;
|
|
optional<int64_t> real_bdim;
|
|
std::tie(real_value, real_bdim) = unwrapTensorAtLevel(real, cur_level);
|
|
Tensor imag_value;
|
|
optional<int64_t> imag_bdim;
|
|
std::tie(imag_value, imag_bdim) = unwrapTensorAtLevel(imag, cur_level);
|
|
auto results = batch_rule(real_value, real_bdim, imag_value, imag_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor polar_generated_plumbing(const at::Tensor & abs, const at::Tensor & angle) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(abs, cur_level) && !isBatchedAtLevel(angle, cur_level)) {
|
|
return at::_ops::polar::call(abs, angle);
|
|
}
|
|
Tensor abs_value;
|
|
optional<int64_t> abs_bdim;
|
|
std::tie(abs_value, abs_bdim) = unwrapTensorAtLevel(abs, cur_level);
|
|
Tensor angle_value;
|
|
optional<int64_t> angle_bdim;
|
|
std::tie(angle_value, angle_bdim) = unwrapTensorAtLevel(angle, cur_level);
|
|
auto results = batch_rule(abs_value, abs_bdim, angle_value, angle_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor constant_pad_nd_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef pad, const at::Scalar & value) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::constant_pad_nd::call(self, pad, value);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, pad, value);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor contiguous_generated_plumbing(const at::Tensor & self, at::MemoryFormat memory_format) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::contiguous::call(self, memory_format);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, memory_format);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor convolution_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
|
|
return at::_ops::convolution::call(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups);
|
|
}
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
Tensor weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
|
|
optional<Tensor> bias_value;
|
|
optional<int64_t> bias_bdim;
|
|
if (bias) {
|
|
std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, dilation, transposed, output_padding, groups);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor,at::Tensor> convolution_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, at::OptionalSymIntArrayRef bias_sizes, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups, ::std::array<bool,3> output_mask) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
|
|
return at::_ops::convolution_backward::call(grad_output, input, weight, bias_sizes, stride, padding, dilation, transposed, output_padding, groups, output_mask);
|
|
}
|
|
Tensor grad_output_value;
|
|
optional<int64_t> grad_output_bdim;
|
|
std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
Tensor weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
|
|
auto results = batch_rule(grad_output_value, grad_output_bdim, input_value, input_bdim, weight_value, weight_bdim, bias_sizes, stride, padding, dilation, transposed, output_padding, groups, output_mask);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor convolution_overrideable_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
|
|
return at::_ops::convolution_overrideable::call(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups);
|
|
}
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
Tensor weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
|
|
optional<Tensor> bias_value;
|
|
optional<int64_t> bias_bdim;
|
|
if (bias) {
|
|
std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, dilation, transposed, output_padding, groups);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor,at::Tensor> convolution_backward_overrideable_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups, ::std::array<bool,3> output_mask) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
|
|
return at::_ops::convolution_backward_overrideable::call(grad_output, input, weight, stride, padding, dilation, transposed, output_padding, groups, output_mask);
|
|
}
|
|
Tensor grad_output_value;
|
|
optional<int64_t> grad_output_bdim;
|
|
std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
Tensor weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
|
|
auto results = batch_rule(grad_output_value, grad_output_bdim, input_value, input_bdim, weight_value, weight_bdim, stride, padding, dilation, transposed, output_padding, groups, output_mask);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _convolution_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
|
|
return at::_ops::_convolution::call(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups, benchmark, deterministic, cudnn_enabled, allow_tf32);
|
|
}
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
Tensor weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
|
|
optional<Tensor> bias_value;
|
|
optional<int64_t> bias_bdim;
|
|
if (bias) {
|
|
std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, dilation, transposed, output_padding, groups, benchmark, deterministic, cudnn_enabled, allow_tf32);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _convolution_deprecated_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, at::IntArrayRef output_padding, c10::SymInt groups, bool benchmark, bool deterministic, bool cudnn_enabled) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
|
|
return at::_ops::_convolution_deprecated::call(input, weight, bias, stride, padding, dilation, transposed, output_padding, groups, benchmark, deterministic, cudnn_enabled);
|
|
}
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
Tensor weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
|
|
optional<Tensor> bias_value;
|
|
optional<int64_t> bias_bdim;
|
|
if (bias) {
|
|
std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, dilation, transposed, output_padding, groups, benchmark, deterministic, cudnn_enabled);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _convolution_mode_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::string_view padding, c10::SymIntArrayRef dilation, c10::SymInt groups) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
|
|
return at::_ops::_convolution_mode::call(input, weight, bias, stride, padding, dilation, groups);
|
|
}
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
Tensor weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
|
|
optional<Tensor> bias_value;
|
|
optional<int64_t> bias_bdim;
|
|
if (bias) {
|
|
std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, dilation, groups);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor,at::Tensor> _convolution_double_backward_generated_plumbing(const c10::optional<at::Tensor> & ggI, const c10::optional<at::Tensor> & ggW, const c10::optional<at::Tensor> & ggb, const at::Tensor & gO, const at::Tensor & weight, const at::Tensor & self, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, bool transposed, c10::SymIntArrayRef output_padding, c10::SymInt groups, ::std::array<bool,3> output_mask) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(ggI, cur_level) && !isBatchedAtLevel(ggW, cur_level) && !isBatchedAtLevel(ggb, cur_level) && !isBatchedAtLevel(gO, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_convolution_double_backward::call(ggI, ggW, ggb, gO, weight, self, stride, padding, dilation, transposed, output_padding, groups, output_mask);
|
|
}
|
|
Tensor gO_value;
|
|
optional<int64_t> gO_bdim;
|
|
std::tie(gO_value, gO_bdim) = unwrapTensorAtLevel(gO, cur_level);
|
|
Tensor weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
optional<Tensor> ggI_value;
|
|
optional<int64_t> ggI_bdim;
|
|
if (ggI) {
|
|
std::tie(ggI_value, ggI_bdim) = unwrapTensorAtLevel(ggI.value(), cur_level);
|
|
}
|
|
optional<Tensor> ggW_value;
|
|
optional<int64_t> ggW_bdim;
|
|
if (ggW) {
|
|
std::tie(ggW_value, ggW_bdim) = unwrapTensorAtLevel(ggW.value(), cur_level);
|
|
}
|
|
optional<Tensor> ggb_value;
|
|
optional<int64_t> ggb_bdim;
|
|
if (ggb) {
|
|
std::tie(ggb_value, ggb_bdim) = unwrapTensorAtLevel(ggb.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(ggI_value, ggI_bdim, ggW_value, ggW_bdim, ggb_value, ggb_bdim, gO_value, gO_bdim, weight_value, weight_bdim, self_value, self_bdim, stride, padding, dilation, transposed, output_padding, groups, output_mask);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor conv1d_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, c10::SymInt groups) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
|
|
return at::_ops::conv1d::call(input, weight, bias, stride, padding, dilation, groups);
|
|
}
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
Tensor weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
|
|
optional<Tensor> bias_value;
|
|
optional<int64_t> bias_bdim;
|
|
if (bias) {
|
|
std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, dilation, groups);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor conv2d_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, c10::SymInt groups) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
|
|
return at::_ops::conv2d::call(input, weight, bias, stride, padding, dilation, groups);
|
|
}
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
Tensor weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
|
|
optional<Tensor> bias_value;
|
|
optional<int64_t> bias_bdim;
|
|
if (bias) {
|
|
std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, dilation, groups);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor conv3d_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, c10::SymInt groups) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
|
|
return at::_ops::conv3d::call(input, weight, bias, stride, padding, dilation, groups);
|
|
}
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
Tensor weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
|
|
optional<Tensor> bias_value;
|
|
optional<int64_t> bias_bdim;
|
|
if (bias) {
|
|
std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, dilation, groups);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor conv1d_padding_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::string_view padding, c10::SymIntArrayRef dilation, c10::SymInt groups) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
|
|
return at::_ops::conv1d_padding::call(input, weight, bias, stride, padding, dilation, groups);
|
|
}
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
Tensor weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
|
|
optional<Tensor> bias_value;
|
|
optional<int64_t> bias_bdim;
|
|
if (bias) {
|
|
std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, dilation, groups);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor conv2d_padding_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::string_view padding, c10::SymIntArrayRef dilation, c10::SymInt groups) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
|
|
return at::_ops::conv2d_padding::call(input, weight, bias, stride, padding, dilation, groups);
|
|
}
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
Tensor weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
|
|
optional<Tensor> bias_value;
|
|
optional<int64_t> bias_bdim;
|
|
if (bias) {
|
|
std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, dilation, groups);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor conv3d_padding_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::string_view padding, c10::SymIntArrayRef dilation, c10::SymInt groups) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
|
|
return at::_ops::conv3d_padding::call(input, weight, bias, stride, padding, dilation, groups);
|
|
}
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
Tensor weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
|
|
optional<Tensor> bias_value;
|
|
optional<int64_t> bias_bdim;
|
|
if (bias) {
|
|
std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, dilation, groups);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor conv_tbc_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, const at::Tensor & bias, int64_t pad) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
|
|
return at::_ops::conv_tbc::call(self, weight, bias, pad);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
|
|
Tensor bias_value;
|
|
optional<int64_t> bias_bdim;
|
|
std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, bias_value, bias_bdim, pad);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor,at::Tensor> conv_tbc_backward_generated_plumbing(const at::Tensor & self, const at::Tensor & input, const at::Tensor & weight, const at::Tensor & bias, int64_t pad) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
|
|
return at::_ops::conv_tbc_backward::call(self, input, weight, bias, pad);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
Tensor weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
|
|
Tensor bias_value;
|
|
optional<int64_t> bias_bdim;
|
|
std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, pad);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor conv_transpose1d_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymInt groups, c10::SymIntArrayRef dilation) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
|
|
return at::_ops::conv_transpose1d::call(input, weight, bias, stride, padding, output_padding, groups, dilation);
|
|
}
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
Tensor weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
|
|
optional<Tensor> bias_value;
|
|
optional<int64_t> bias_bdim;
|
|
if (bias) {
|
|
std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, output_padding, groups, dilation);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor conv_transpose2d_input_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymInt groups, c10::SymIntArrayRef dilation) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
|
|
return at::_ops::conv_transpose2d_input::call(input, weight, bias, stride, padding, output_padding, groups, dilation);
|
|
}
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
Tensor weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
|
|
optional<Tensor> bias_value;
|
|
optional<int64_t> bias_bdim;
|
|
if (bias) {
|
|
std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, output_padding, groups, dilation);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor conv_transpose3d_input_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymInt groups, c10::SymIntArrayRef dilation) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
|
|
return at::_ops::conv_transpose3d_input::call(input, weight, bias, stride, padding, output_padding, groups, dilation);
|
|
}
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
Tensor weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
|
|
optional<Tensor> bias_value;
|
|
optional<int64_t> bias_bdim;
|
|
if (bias) {
|
|
std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, output_padding, groups, dilation);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor copy_generated_plumbing(const at::Tensor & self, const at::Tensor & src, bool non_blocking) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(src, cur_level)) {
|
|
return at::_ops::copy::call(self, src, non_blocking);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor src_value;
|
|
optional<int64_t> src_bdim;
|
|
std::tie(src_value, src_bdim) = unwrapTensorAtLevel(src, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, src_value, src_bdim, non_blocking);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & copy__generated_plumbing(at::Tensor & self, const at::Tensor & src, bool non_blocking) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(src, cur_level)) {
|
|
return at::_ops::copy_::call(self, src, non_blocking);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor src_value;
|
|
optional<int64_t> src_bdim;
|
|
std::tie(src_value, src_bdim) = unwrapTensorAtLevel(src, cur_level);
|
|
batch_rule(self_value, self_bdim, src_value, src_bdim, non_blocking);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _copy_from_generated_plumbing(const at::Tensor & self, const at::Tensor & dst, bool non_blocking) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(dst, cur_level)) {
|
|
return at::_ops::_copy_from::call(self, dst, non_blocking);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor dst_value;
|
|
optional<int64_t> dst_bdim;
|
|
std::tie(dst_value, dst_bdim) = unwrapTensorAtLevel(dst, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dst_value, dst_bdim, non_blocking);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _copy_from_and_resize_generated_plumbing(const at::Tensor & self, const at::Tensor & dst) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(dst, cur_level)) {
|
|
return at::_ops::_copy_from_and_resize::call(self, dst);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor dst_value;
|
|
optional<int64_t> dst_bdim;
|
|
std::tie(dst_value, dst_bdim) = unwrapTensorAtLevel(dst, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dst_value, dst_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor cos_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::cos::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & cos__generated_plumbing(at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::cos_::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor cosh_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::cosh::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & cosh__generated_plumbing(at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::cosh_::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor cosine_embedding_loss_generated_plumbing(const at::Tensor & input1, const at::Tensor & input2, const at::Tensor & target, double margin, int64_t reduction) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(input1, cur_level) && !isBatchedAtLevel(input2, cur_level) && !isBatchedAtLevel(target, cur_level)) {
|
|
return at::_ops::cosine_embedding_loss::call(input1, input2, target, margin, reduction);
|
|
}
|
|
Tensor input1_value;
|
|
optional<int64_t> input1_bdim;
|
|
std::tie(input1_value, input1_bdim) = unwrapTensorAtLevel(input1, cur_level);
|
|
Tensor input2_value;
|
|
optional<int64_t> input2_bdim;
|
|
std::tie(input2_value, input2_bdim) = unwrapTensorAtLevel(input2, cur_level);
|
|
Tensor target_value;
|
|
optional<int64_t> target_bdim;
|
|
std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
|
|
auto results = batch_rule(input1_value, input1_bdim, input2_value, input2_bdim, target_value, target_bdim, margin, reduction);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor count_nonzero_dim_IntList_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::count_nonzero_dim_IntList::call(self, dim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor count_nonzero_generated_plumbing(const at::Tensor & self, c10::optional<int64_t> dim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::count_nonzero::call(self, dim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor cov_generated_plumbing(const at::Tensor & self, int64_t correction, const c10::optional<at::Tensor> & fweights, const c10::optional<at::Tensor> & aweights) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(fweights, cur_level) && !isBatchedAtLevel(aweights, cur_level)) {
|
|
return at::_ops::cov::call(self, correction, fweights, aweights);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
optional<Tensor> fweights_value;
|
|
optional<int64_t> fweights_bdim;
|
|
if (fweights) {
|
|
std::tie(fweights_value, fweights_bdim) = unwrapTensorAtLevel(fweights.value(), cur_level);
|
|
}
|
|
optional<Tensor> aweights_value;
|
|
optional<int64_t> aweights_bdim;
|
|
if (aweights) {
|
|
std::tie(aweights_value, aweights_bdim) = unwrapTensorAtLevel(aweights.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(self_value, self_bdim, correction, fweights_value, fweights_bdim, aweights_value, aweights_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor corrcoef_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::corrcoef::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor cudnn_affine_grid_generator_generated_plumbing(const at::Tensor & theta, int64_t N, int64_t C, int64_t H, int64_t W) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(theta, cur_level)) {
|
|
return at::_ops::cudnn_affine_grid_generator::call(theta, N, C, H, W);
|
|
}
|
|
Tensor theta_value;
|
|
optional<int64_t> theta_bdim;
|
|
std::tie(theta_value, theta_bdim) = unwrapTensorAtLevel(theta, cur_level);
|
|
auto results = batch_rule(theta_value, theta_bdim, N, C, H, W);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor cudnn_affine_grid_generator_backward_generated_plumbing(const at::Tensor & grad, int64_t N, int64_t C, int64_t H, int64_t W) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad, cur_level)) {
|
|
return at::_ops::cudnn_affine_grid_generator_backward::call(grad, N, C, H, W);
|
|
}
|
|
Tensor grad_value;
|
|
optional<int64_t> grad_bdim;
|
|
std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
|
|
auto results = batch_rule(grad_value, grad_bdim, N, C, H, W);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> cudnn_batch_norm_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, bool training, double exponential_average_factor, double epsilon) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level) && !isBatchedAtLevel(running_mean, cur_level) && !isBatchedAtLevel(running_var, cur_level)) {
|
|
return at::_ops::cudnn_batch_norm::call(input, weight, bias, running_mean, running_var, training, exponential_average_factor, epsilon);
|
|
}
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
Tensor weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
|
|
optional<Tensor> bias_value;
|
|
optional<int64_t> bias_bdim;
|
|
if (bias) {
|
|
std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
|
|
}
|
|
optional<Tensor> running_mean_value;
|
|
optional<int64_t> running_mean_bdim;
|
|
if (running_mean) {
|
|
std::tie(running_mean_value, running_mean_bdim) = unwrapTensorAtLevel(running_mean.value(), cur_level);
|
|
}
|
|
optional<Tensor> running_var_value;
|
|
optional<int64_t> running_var_bdim;
|
|
if (running_var) {
|
|
std::tie(running_var_value, running_var_bdim) = unwrapTensorAtLevel(running_var.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, running_mean_value, running_mean_bdim, running_var_value, running_var_bdim, training, exponential_average_factor, epsilon);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor,at::Tensor> cudnn_batch_norm_backward_generated_plumbing(const at::Tensor & input, const at::Tensor & grad_output, const at::Tensor & weight, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, const c10::optional<at::Tensor> & save_mean, const c10::optional<at::Tensor> & save_var, double epsilon, const at::Tensor & reserveSpace) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(running_mean, cur_level) && !isBatchedAtLevel(running_var, cur_level) && !isBatchedAtLevel(save_mean, cur_level) && !isBatchedAtLevel(save_var, cur_level) && !isBatchedAtLevel(reserveSpace, cur_level)) {
|
|
return at::_ops::cudnn_batch_norm_backward::call(input, grad_output, weight, running_mean, running_var, save_mean, save_var, epsilon, reserveSpace);
|
|
}
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
Tensor grad_output_value;
|
|
optional<int64_t> grad_output_bdim;
|
|
std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
|
|
Tensor weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
|
|
Tensor reserveSpace_value;
|
|
optional<int64_t> reserveSpace_bdim;
|
|
std::tie(reserveSpace_value, reserveSpace_bdim) = unwrapTensorAtLevel(reserveSpace, cur_level);
|
|
optional<Tensor> running_mean_value;
|
|
optional<int64_t> running_mean_bdim;
|
|
if (running_mean) {
|
|
std::tie(running_mean_value, running_mean_bdim) = unwrapTensorAtLevel(running_mean.value(), cur_level);
|
|
}
|
|
optional<Tensor> running_var_value;
|
|
optional<int64_t> running_var_bdim;
|
|
if (running_var) {
|
|
std::tie(running_var_value, running_var_bdim) = unwrapTensorAtLevel(running_var.value(), cur_level);
|
|
}
|
|
optional<Tensor> save_mean_value;
|
|
optional<int64_t> save_mean_bdim;
|
|
if (save_mean) {
|
|
std::tie(save_mean_value, save_mean_bdim) = unwrapTensorAtLevel(save_mean.value(), cur_level);
|
|
}
|
|
optional<Tensor> save_var_value;
|
|
optional<int64_t> save_var_bdim;
|
|
if (save_var) {
|
|
std::tie(save_var_value, save_var_bdim) = unwrapTensorAtLevel(save_var.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(input_value, input_bdim, grad_output_value, grad_output_bdim, weight_value, weight_bdim, running_mean_value, running_mean_bdim, running_var_value, running_var_bdim, save_mean_value, save_mean_bdim, save_var_value, save_var_bdim, epsilon, reserveSpace_value, reserveSpace_bdim);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor cudnn_convolution_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic, bool allow_tf32) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
|
|
return at::_ops::cudnn_convolution::call(self, weight, padding, stride, dilation, groups, benchmark, deterministic, allow_tf32);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, padding, stride, dilation, groups, benchmark, deterministic, allow_tf32);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor cudnn_convolution_transpose_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic, bool allow_tf32) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
|
|
return at::_ops::cudnn_convolution_transpose::call(self, weight, padding, output_padding, stride, dilation, groups, benchmark, deterministic, allow_tf32);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, padding, output_padding, stride, dilation, groups, benchmark, deterministic, allow_tf32);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _mps_convolution_transpose_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
|
|
return at::_ops::_mps_convolution_transpose::call(self, weight, padding, output_padding, stride, dilation, groups);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, padding, output_padding, stride, dilation, groups);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> mps_convolution_transpose_backward_generated_plumbing(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, ::std::array<bool,2> output_mask) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
|
|
return at::_ops::mps_convolution_transpose_backward::call(self, grad_output, weight, padding, output_padding, stride, dilation, groups, output_mask);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor grad_output_value;
|
|
optional<int64_t> grad_output_bdim;
|
|
std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
|
|
Tensor weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, grad_output_value, grad_output_bdim, weight_value, weight_bdim, padding, output_padding, stride, dilation, groups, output_mask);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor cudnn_convolution_relu_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, c10::SymInt groups) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
|
|
return at::_ops::cudnn_convolution_relu::call(self, weight, bias, stride, padding, dilation, groups);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
|
|
optional<Tensor> bias_value;
|
|
optional<int64_t> bias_bdim;
|
|
if (bias) {
|
|
std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, dilation, groups);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor cudnn_convolution_add_relu_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, const at::Tensor & z, const c10::optional<at::Scalar> & alpha, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, c10::SymInt groups) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(z, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
|
|
return at::_ops::cudnn_convolution_add_relu::call(self, weight, z, alpha, bias, stride, padding, dilation, groups);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
|
|
Tensor z_value;
|
|
optional<int64_t> z_bdim;
|
|
std::tie(z_value, z_bdim) = unwrapTensorAtLevel(z, cur_level);
|
|
optional<Tensor> bias_value;
|
|
optional<int64_t> bias_bdim;
|
|
if (bias) {
|
|
std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, z_value, z_bdim, alpha, bias_value, bias_bdim, stride, padding, dilation, groups);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor cudnn_grid_sampler_generated_plumbing(const at::Tensor & self, const at::Tensor & grid) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grid, cur_level)) {
|
|
return at::_ops::cudnn_grid_sampler::call(self, grid);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor grid_value;
|
|
optional<int64_t> grid_bdim;
|
|
std::tie(grid_value, grid_bdim) = unwrapTensorAtLevel(grid, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, grid_value, grid_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> cudnn_grid_sampler_backward_generated_plumbing(const at::Tensor & self, const at::Tensor & grid, const at::Tensor & grad_output) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grid, cur_level) && !isBatchedAtLevel(grad_output, cur_level)) {
|
|
return at::_ops::cudnn_grid_sampler_backward::call(self, grid, grad_output);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor grid_value;
|
|
optional<int64_t> grid_bdim;
|
|
std::tie(grid_value, grid_bdim) = unwrapTensorAtLevel(grid, cur_level);
|
|
Tensor grad_output_value;
|
|
optional<int64_t> grad_output_bdim;
|
|
std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, grid_value, grid_bdim, grad_output_value, grad_output_bdim);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> cummax_generated_plumbing(const at::Tensor & self, int64_t dim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::cummax::call(self, dim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> cummax_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::cummax_dimname::call(self, dim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _cummax_helper_generated_plumbing(const at::Tensor & self, at::Tensor & values, at::Tensor & indices, int64_t dim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(values, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
|
|
return at::_ops::_cummax_helper::call(self, values, indices, dim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor values_value;
|
|
optional<int64_t> values_bdim;
|
|
std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
|
|
Tensor indices_value;
|
|
optional<int64_t> indices_bdim;
|
|
std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
|
|
batch_rule(self_value, self_bdim, values_value, values_bdim, indices_value, indices_bdim, dim);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> cummin_generated_plumbing(const at::Tensor & self, int64_t dim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::cummin::call(self, dim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> cummin_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::cummin_dimname::call(self, dim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _cummin_helper_generated_plumbing(const at::Tensor & self, at::Tensor & values, at::Tensor & indices, int64_t dim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(values, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
|
|
return at::_ops::_cummin_helper::call(self, values, indices, dim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor values_value;
|
|
optional<int64_t> values_bdim;
|
|
std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
|
|
Tensor indices_value;
|
|
optional<int64_t> indices_bdim;
|
|
std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
|
|
batch_rule(self_value, self_bdim, values_value, values_bdim, indices_value, indices_bdim, dim);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor cummaxmin_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & input, const at::Tensor & indices, int64_t dim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
|
|
return at::_ops::cummaxmin_backward::call(grad, input, indices, dim);
|
|
}
|
|
Tensor grad_value;
|
|
optional<int64_t> grad_bdim;
|
|
std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
Tensor indices_value;
|
|
optional<int64_t> indices_bdim;
|
|
std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
|
|
auto results = batch_rule(grad_value, grad_bdim, input_value, input_bdim, indices_value, indices_bdim, dim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor cumprod_generated_plumbing(const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::cumprod::call(self, dim, dtype);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, dtype);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & cumprod__generated_plumbing(at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::cumprod_::call(self, dim, dtype);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim, dim, dtype);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor cumprod_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::cumprod_dimname::call(self, dim, dtype);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, dtype);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & cumprod__dimname_generated_plumbing(at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::cumprod__dimname::call(self, dim, dtype);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim, dim, dtype);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor cumprod_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & input, int64_t dim, const at::Tensor & output) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(output, cur_level)) {
|
|
return at::_ops::cumprod_backward::call(grad, input, dim, output);
|
|
}
|
|
Tensor grad_value;
|
|
optional<int64_t> grad_bdim;
|
|
std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
Tensor output_value;
|
|
optional<int64_t> output_bdim;
|
|
std::tie(output_value, output_bdim) = unwrapTensorAtLevel(output, cur_level);
|
|
auto results = batch_rule(grad_value, grad_bdim, input_value, input_bdim, dim, output_value, output_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor cumsum_generated_plumbing(const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::cumsum::call(self, dim, dtype);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, dtype);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & cumsum__generated_plumbing(at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::cumsum_::call(self, dim, dtype);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim, dim, dtype);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor cumsum_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::cumsum_dimname::call(self, dim, dtype);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, dtype);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & cumsum__dimname_generated_plumbing(at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::cumsum__dimname::call(self, dim, dtype);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim, dim, dtype);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor cumulative_trapezoid_x_generated_plumbing(const at::Tensor & y, const at::Tensor & x, int64_t dim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(y, cur_level) && !isBatchedAtLevel(x, cur_level)) {
|
|
return at::_ops::cumulative_trapezoid_x::call(y, x, dim);
|
|
}
|
|
Tensor y_value;
|
|
optional<int64_t> y_bdim;
|
|
std::tie(y_value, y_bdim) = unwrapTensorAtLevel(y, cur_level);
|
|
Tensor x_value;
|
|
optional<int64_t> x_bdim;
|
|
std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
|
|
auto results = batch_rule(y_value, y_bdim, x_value, x_bdim, dim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor cumulative_trapezoid_dx_generated_plumbing(const at::Tensor & y, const at::Scalar & dx, int64_t dim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(y, cur_level)) {
|
|
return at::_ops::cumulative_trapezoid_dx::call(y, dx, dim);
|
|
}
|
|
Tensor y_value;
|
|
optional<int64_t> y_bdim;
|
|
std::tie(y_value, y_bdim) = unwrapTensorAtLevel(y, cur_level);
|
|
auto results = batch_rule(y_value, y_bdim, dx, dim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor ctc_loss_IntList_generated_plumbing(const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank, int64_t reduction, bool zero_infinity) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(log_probs, cur_level) && !isBatchedAtLevel(targets, cur_level)) {
|
|
return at::_ops::ctc_loss_IntList::call(log_probs, targets, input_lengths, target_lengths, blank, reduction, zero_infinity);
|
|
}
|
|
Tensor log_probs_value;
|
|
optional<int64_t> log_probs_bdim;
|
|
std::tie(log_probs_value, log_probs_bdim) = unwrapTensorAtLevel(log_probs, cur_level);
|
|
Tensor targets_value;
|
|
optional<int64_t> targets_bdim;
|
|
std::tie(targets_value, targets_bdim) = unwrapTensorAtLevel(targets, cur_level);
|
|
auto results = batch_rule(log_probs_value, log_probs_bdim, targets_value, targets_bdim, input_lengths, target_lengths, blank, reduction, zero_infinity);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor ctc_loss_Tensor_generated_plumbing(const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, int64_t blank, int64_t reduction, bool zero_infinity) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(log_probs, cur_level) && !isBatchedAtLevel(targets, cur_level) && !isBatchedAtLevel(input_lengths, cur_level) && !isBatchedAtLevel(target_lengths, cur_level)) {
|
|
return at::_ops::ctc_loss_Tensor::call(log_probs, targets, input_lengths, target_lengths, blank, reduction, zero_infinity);
|
|
}
|
|
Tensor log_probs_value;
|
|
optional<int64_t> log_probs_bdim;
|
|
std::tie(log_probs_value, log_probs_bdim) = unwrapTensorAtLevel(log_probs, cur_level);
|
|
Tensor targets_value;
|
|
optional<int64_t> targets_bdim;
|
|
std::tie(targets_value, targets_bdim) = unwrapTensorAtLevel(targets, cur_level);
|
|
Tensor input_lengths_value;
|
|
optional<int64_t> input_lengths_bdim;
|
|
std::tie(input_lengths_value, input_lengths_bdim) = unwrapTensorAtLevel(input_lengths, cur_level);
|
|
Tensor target_lengths_value;
|
|
optional<int64_t> target_lengths_bdim;
|
|
std::tie(target_lengths_value, target_lengths_bdim) = unwrapTensorAtLevel(target_lengths, cur_level);
|
|
auto results = batch_rule(log_probs_value, log_probs_bdim, targets_value, targets_bdim, input_lengths_value, input_lengths_bdim, target_lengths_value, target_lengths_bdim, blank, reduction, zero_infinity);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> _ctc_loss_generated_plumbing(const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank, bool zero_infinity) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(log_probs, cur_level) && !isBatchedAtLevel(targets, cur_level)) {
|
|
return at::_ops::_ctc_loss::call(log_probs, targets, input_lengths, target_lengths, blank, zero_infinity);
|
|
}
|
|
Tensor log_probs_value;
|
|
optional<int64_t> log_probs_bdim;
|
|
std::tie(log_probs_value, log_probs_bdim) = unwrapTensorAtLevel(log_probs, cur_level);
|
|
Tensor targets_value;
|
|
optional<int64_t> targets_bdim;
|
|
std::tie(targets_value, targets_bdim) = unwrapTensorAtLevel(targets, cur_level);
|
|
auto results = batch_rule(log_probs_value, log_probs_bdim, targets_value, targets_bdim, input_lengths, target_lengths, blank, zero_infinity);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> _ctc_loss_Tensor_generated_plumbing(const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, int64_t blank, bool zero_infinity) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(log_probs, cur_level) && !isBatchedAtLevel(targets, cur_level) && !isBatchedAtLevel(input_lengths, cur_level) && !isBatchedAtLevel(target_lengths, cur_level)) {
|
|
return at::_ops::_ctc_loss_Tensor::call(log_probs, targets, input_lengths, target_lengths, blank, zero_infinity);
|
|
}
|
|
Tensor log_probs_value;
|
|
optional<int64_t> log_probs_bdim;
|
|
std::tie(log_probs_value, log_probs_bdim) = unwrapTensorAtLevel(log_probs, cur_level);
|
|
Tensor targets_value;
|
|
optional<int64_t> targets_bdim;
|
|
std::tie(targets_value, targets_bdim) = unwrapTensorAtLevel(targets, cur_level);
|
|
Tensor input_lengths_value;
|
|
optional<int64_t> input_lengths_bdim;
|
|
std::tie(input_lengths_value, input_lengths_bdim) = unwrapTensorAtLevel(input_lengths, cur_level);
|
|
Tensor target_lengths_value;
|
|
optional<int64_t> target_lengths_bdim;
|
|
std::tie(target_lengths_value, target_lengths_bdim) = unwrapTensorAtLevel(target_lengths, cur_level);
|
|
auto results = batch_rule(log_probs_value, log_probs_bdim, targets_value, targets_bdim, input_lengths_value, input_lengths_bdim, target_lengths_value, target_lengths_bdim, blank, zero_infinity);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _ctc_loss_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, const at::Tensor & neg_log_likelihood, const at::Tensor & log_alpha, int64_t blank, bool zero_infinity) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(log_probs, cur_level) && !isBatchedAtLevel(targets, cur_level) && !isBatchedAtLevel(neg_log_likelihood, cur_level) && !isBatchedAtLevel(log_alpha, cur_level)) {
|
|
return at::_ops::_ctc_loss_backward::call(grad, log_probs, targets, input_lengths, target_lengths, neg_log_likelihood, log_alpha, blank, zero_infinity);
|
|
}
|
|
Tensor grad_value;
|
|
optional<int64_t> grad_bdim;
|
|
std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
|
|
Tensor log_probs_value;
|
|
optional<int64_t> log_probs_bdim;
|
|
std::tie(log_probs_value, log_probs_bdim) = unwrapTensorAtLevel(log_probs, cur_level);
|
|
Tensor targets_value;
|
|
optional<int64_t> targets_bdim;
|
|
std::tie(targets_value, targets_bdim) = unwrapTensorAtLevel(targets, cur_level);
|
|
Tensor neg_log_likelihood_value;
|
|
optional<int64_t> neg_log_likelihood_bdim;
|
|
std::tie(neg_log_likelihood_value, neg_log_likelihood_bdim) = unwrapTensorAtLevel(neg_log_likelihood, cur_level);
|
|
Tensor log_alpha_value;
|
|
optional<int64_t> log_alpha_bdim;
|
|
std::tie(log_alpha_value, log_alpha_bdim) = unwrapTensorAtLevel(log_alpha, cur_level);
|
|
auto results = batch_rule(grad_value, grad_bdim, log_probs_value, log_probs_bdim, targets_value, targets_bdim, input_lengths, target_lengths, neg_log_likelihood_value, neg_log_likelihood_bdim, log_alpha_value, log_alpha_bdim, blank, zero_infinity);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _ctc_loss_backward_Tensor_generated_plumbing(const at::Tensor & grad, const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, const at::Tensor & neg_log_likelihood, const at::Tensor & log_alpha, int64_t blank, bool zero_infinity) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(log_probs, cur_level) && !isBatchedAtLevel(targets, cur_level) && !isBatchedAtLevel(input_lengths, cur_level) && !isBatchedAtLevel(target_lengths, cur_level) && !isBatchedAtLevel(neg_log_likelihood, cur_level) && !isBatchedAtLevel(log_alpha, cur_level)) {
|
|
return at::_ops::_ctc_loss_backward_Tensor::call(grad, log_probs, targets, input_lengths, target_lengths, neg_log_likelihood, log_alpha, blank, zero_infinity);
|
|
}
|
|
Tensor grad_value;
|
|
optional<int64_t> grad_bdim;
|
|
std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
|
|
Tensor log_probs_value;
|
|
optional<int64_t> log_probs_bdim;
|
|
std::tie(log_probs_value, log_probs_bdim) = unwrapTensorAtLevel(log_probs, cur_level);
|
|
Tensor targets_value;
|
|
optional<int64_t> targets_bdim;
|
|
std::tie(targets_value, targets_bdim) = unwrapTensorAtLevel(targets, cur_level);
|
|
Tensor input_lengths_value;
|
|
optional<int64_t> input_lengths_bdim;
|
|
std::tie(input_lengths_value, input_lengths_bdim) = unwrapTensorAtLevel(input_lengths, cur_level);
|
|
Tensor target_lengths_value;
|
|
optional<int64_t> target_lengths_bdim;
|
|
std::tie(target_lengths_value, target_lengths_bdim) = unwrapTensorAtLevel(target_lengths, cur_level);
|
|
Tensor neg_log_likelihood_value;
|
|
optional<int64_t> neg_log_likelihood_bdim;
|
|
std::tie(neg_log_likelihood_value, neg_log_likelihood_bdim) = unwrapTensorAtLevel(neg_log_likelihood, cur_level);
|
|
Tensor log_alpha_value;
|
|
optional<int64_t> log_alpha_bdim;
|
|
std::tie(log_alpha_value, log_alpha_bdim) = unwrapTensorAtLevel(log_alpha, cur_level);
|
|
auto results = batch_rule(grad_value, grad_bdim, log_probs_value, log_probs_bdim, targets_value, targets_bdim, input_lengths_value, input_lengths_bdim, target_lengths_value, target_lengths_bdim, neg_log_likelihood_value, neg_log_likelihood_bdim, log_alpha_value, log_alpha_bdim, blank, zero_infinity);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor diag_embed_generated_plumbing(const at::Tensor & self, int64_t offset, int64_t dim1, int64_t dim2) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::diag_embed::call(self, offset, dim1, dim2);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, offset, dim1, dim2);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor diagflat_generated_plumbing(const at::Tensor & self, int64_t offset) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::diagflat::call(self, offset);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, offset);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor diagonal_generated_plumbing(const at::Tensor & self, int64_t offset, int64_t dim1, int64_t dim2) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::diagonal::call(self, offset, dim1, dim2);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, offset, dim1, dim2);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor linalg_diagonal_generated_plumbing(const at::Tensor & A, int64_t offset, int64_t dim1, int64_t dim2) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(A, cur_level)) {
|
|
return at::_ops::linalg_diagonal::call(A, offset, dim1, dim2);
|
|
}
|
|
Tensor A_value;
|
|
optional<int64_t> A_bdim;
|
|
std::tie(A_value, A_bdim) = unwrapTensorAtLevel(A, cur_level);
|
|
auto results = batch_rule(A_value, A_bdim, offset, dim1, dim2);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor diagonal_Dimname_generated_plumbing(const at::Tensor & self, at::Dimname outdim, at::Dimname dim1, at::Dimname dim2, int64_t offset) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::diagonal_Dimname::call(self, outdim, dim1, dim2, offset);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, outdim, dim1, dim2, offset);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor diagonal_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t offset, int64_t dim1, int64_t dim2) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad_output, cur_level)) {
|
|
return at::_ops::diagonal_backward::call(grad_output, input_sizes, offset, dim1, dim2);
|
|
}
|
|
Tensor grad_output_value;
|
|
optional<int64_t> grad_output_bdim;
|
|
std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
|
|
auto results = batch_rule(grad_output_value, grad_output_bdim, input_sizes, offset, dim1, dim2);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & fill_diagonal__generated_plumbing(at::Tensor & self, const at::Scalar & fill_value, bool wrap) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::fill_diagonal_::call(self, fill_value, wrap);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim, fill_value, wrap);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor diff_generated_plumbing(const at::Tensor & self, int64_t n, int64_t dim, const c10::optional<at::Tensor> & prepend, const c10::optional<at::Tensor> & append) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(prepend, cur_level) && !isBatchedAtLevel(append, cur_level)) {
|
|
return at::_ops::diff::call(self, n, dim, prepend, append);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
optional<Tensor> prepend_value;
|
|
optional<int64_t> prepend_bdim;
|
|
if (prepend) {
|
|
std::tie(prepend_value, prepend_bdim) = unwrapTensorAtLevel(prepend.value(), cur_level);
|
|
}
|
|
optional<Tensor> append_value;
|
|
optional<int64_t> append_bdim;
|
|
if (append) {
|
|
std::tie(append_value, append_bdim) = unwrapTensorAtLevel(append.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(self_value, self_bdim, n, dim, prepend_value, prepend_bdim, append_value, append_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> gradient_scalarint_generated_plumbing(const at::Tensor & self, const c10::optional<at::Scalar> & spacing, c10::optional<int64_t> dim, int64_t edge_order) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::gradient_scalarint::call(self, spacing, dim, edge_order);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, spacing, dim, edge_order);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> gradient_scalararray_generated_plumbing(const at::Tensor & self, const at::Scalar & spacing, at::IntArrayRef dim, int64_t edge_order) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::gradient_scalararray::call(self, spacing, dim, edge_order);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, spacing, dim, edge_order);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> gradient_array_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim, int64_t edge_order) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::gradient_array::call(self, dim, edge_order);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, edge_order);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> gradient_scalarrayint_generated_plumbing(const at::Tensor & self, at::ArrayRef<at::Scalar> spacing, c10::optional<int64_t> dim, int64_t edge_order) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::gradient_scalarrayint::call(self, spacing, dim, edge_order);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, spacing, dim, edge_order);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> gradient_scalarrayarray_generated_plumbing(const at::Tensor & self, at::ArrayRef<at::Scalar> spacing, at::IntArrayRef dim, int64_t edge_order) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::gradient_scalarrayarray::call(self, spacing, dim, edge_order);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, spacing, dim, edge_order);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> gradient_tensorarrayint_generated_plumbing(const at::Tensor & self, at::TensorList spacing, c10::optional<int64_t> dim, int64_t edge_order) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(spacing, cur_level)) {
|
|
return at::_ops::gradient_tensorarrayint::call(self, spacing, dim, edge_order);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, spacing, dim, edge_order);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> gradient_tensorarray_generated_plumbing(const at::Tensor & self, at::TensorList spacing, at::IntArrayRef dim, int64_t edge_order) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(spacing, cur_level)) {
|
|
return at::_ops::gradient_tensorarray::call(self, spacing, dim, edge_order);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, spacing, dim, edge_order);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor div_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::div_Tensor::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & div__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::div__Tensor::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor div_Tensor_mode_generated_plumbing(const at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::div_Tensor_mode::call(self, other, rounding_mode);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, rounding_mode);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & div__Tensor_mode_generated_plumbing(at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::div__Tensor_mode::call(self, other, rounding_mode);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
batch_rule(self_value, self_bdim, other_value, other_bdim, rounding_mode);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor div_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::div_Scalar::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & div__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::div__Scalar::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim, other);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor div_Scalar_mode_generated_plumbing(const at::Tensor & self, const at::Scalar & other, c10::optional<c10::string_view> rounding_mode) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::div_Scalar_mode::call(self, other, rounding_mode);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other, rounding_mode);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & div__Scalar_mode_generated_plumbing(at::Tensor & self, const at::Scalar & other, c10::optional<c10::string_view> rounding_mode) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::div__Scalar_mode::call(self, other, rounding_mode);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim, other, rounding_mode);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor divide_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::divide_Tensor::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & divide__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::divide__Tensor::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor divide_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::divide_Scalar::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & divide__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::divide__Scalar::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim, other);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor divide_Tensor_mode_generated_plumbing(const at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::divide_Tensor_mode::call(self, other, rounding_mode);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, rounding_mode);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & divide__Tensor_mode_generated_plumbing(at::Tensor & self, const at::Tensor & other, c10::optional<c10::string_view> rounding_mode) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::divide__Tensor_mode::call(self, other, rounding_mode);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
batch_rule(self_value, self_bdim, other_value, other_bdim, rounding_mode);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor divide_Scalar_mode_generated_plumbing(const at::Tensor & self, const at::Scalar & other, c10::optional<c10::string_view> rounding_mode) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::divide_Scalar_mode::call(self, other, rounding_mode);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other, rounding_mode);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & divide__Scalar_mode_generated_plumbing(at::Tensor & self, const at::Scalar & other, c10::optional<c10::string_view> rounding_mode) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::divide__Scalar_mode::call(self, other, rounding_mode);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim, other, rounding_mode);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor true_divide_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::true_divide_Tensor::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & true_divide__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::true_divide__Tensor::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor true_divide_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::true_divide_Scalar::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & true_divide__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::true_divide__Scalar::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim, other);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor dot_generated_plumbing(const at::Tensor & self, const at::Tensor & tensor) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor, cur_level)) {
|
|
return at::_ops::dot::call(self, tensor);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor tensor_value;
|
|
optional<int64_t> tensor_bdim;
|
|
std::tie(tensor_value, tensor_bdim) = unwrapTensorAtLevel(tensor, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, tensor_value, tensor_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor vdot_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::vdot::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor einsum_generated_plumbing(c10::string_view equation, at::TensorList tensors, at::OptionalIntArrayRef path) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(tensors, cur_level)) {
|
|
return at::_ops::einsum::call(equation, tensors, path);
|
|
}
|
|
|
|
auto results = batch_rule(equation, tensors, path);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor embedding_generated_plumbing(const at::Tensor & weight, const at::Tensor & indices, c10::SymInt padding_idx, bool scale_grad_by_freq, bool sparse) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
|
|
return at::_ops::embedding::call(weight, indices, padding_idx, scale_grad_by_freq, sparse);
|
|
}
|
|
Tensor weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
|
|
Tensor indices_value;
|
|
optional<int64_t> indices_bdim;
|
|
std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
|
|
auto results = batch_rule(weight_value, weight_bdim, indices_value, indices_bdim, padding_idx, scale_grad_by_freq, sparse);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor embedding_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & indices, c10::SymInt num_weights, c10::SymInt padding_idx, bool scale_grad_by_freq, bool sparse) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
|
|
return at::_ops::embedding_backward::call(grad, indices, num_weights, padding_idx, scale_grad_by_freq, sparse);
|
|
}
|
|
Tensor grad_value;
|
|
optional<int64_t> grad_bdim;
|
|
std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
|
|
Tensor indices_value;
|
|
optional<int64_t> indices_bdim;
|
|
std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
|
|
auto results = batch_rule(grad_value, grad_bdim, indices_value, indices_bdim, num_weights, padding_idx, scale_grad_by_freq, sparse);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor embedding_dense_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & indices, c10::SymInt num_weights, c10::SymInt padding_idx, bool scale_grad_by_freq) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
|
|
return at::_ops::embedding_dense_backward::call(grad_output, indices, num_weights, padding_idx, scale_grad_by_freq);
|
|
}
|
|
Tensor grad_output_value;
|
|
optional<int64_t> grad_output_bdim;
|
|
std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
|
|
Tensor indices_value;
|
|
optional<int64_t> indices_bdim;
|
|
std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
|
|
auto results = batch_rule(grad_output_value, grad_output_bdim, indices_value, indices_bdim, num_weights, padding_idx, scale_grad_by_freq);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & embedding_renorm__generated_plumbing(at::Tensor & self, const at::Tensor & indices, double max_norm, double norm_type) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
|
|
return at::_ops::embedding_renorm_::call(self, indices, max_norm, norm_type);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor indices_value;
|
|
optional<int64_t> indices_bdim;
|
|
std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
|
|
batch_rule(self_value, self_bdim, indices_value, indices_bdim, max_norm, norm_type);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor embedding_sparse_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & indices, int64_t num_weights, int64_t padding_idx, bool scale_grad_by_freq) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
|
|
return at::_ops::embedding_sparse_backward::call(grad, indices, num_weights, padding_idx, scale_grad_by_freq);
|
|
}
|
|
Tensor grad_value;
|
|
optional<int64_t> grad_bdim;
|
|
std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
|
|
Tensor indices_value;
|
|
optional<int64_t> indices_bdim;
|
|
std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
|
|
auto results = batch_rule(grad_value, grad_bdim, indices_value, indices_bdim, num_weights, padding_idx, scale_grad_by_freq);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> _embedding_bag_forward_only_generated_plumbing(const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional<at::Tensor> & per_sample_weights, bool include_last_offset, int64_t padding_idx) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(offsets, cur_level) && !isBatchedAtLevel(per_sample_weights, cur_level)) {
|
|
return at::_ops::_embedding_bag_forward_only::call(weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset, padding_idx);
|
|
}
|
|
Tensor weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
|
|
Tensor indices_value;
|
|
optional<int64_t> indices_bdim;
|
|
std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
|
|
Tensor offsets_value;
|
|
optional<int64_t> offsets_bdim;
|
|
std::tie(offsets_value, offsets_bdim) = unwrapTensorAtLevel(offsets, cur_level);
|
|
optional<Tensor> per_sample_weights_value;
|
|
optional<int64_t> per_sample_weights_bdim;
|
|
if (per_sample_weights) {
|
|
std::tie(per_sample_weights_value, per_sample_weights_bdim) = unwrapTensorAtLevel(per_sample_weights.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(weight_value, weight_bdim, indices_value, indices_bdim, offsets_value, offsets_bdim, scale_grad_by_freq, mode, sparse, per_sample_weights_value, per_sample_weights_bdim, include_last_offset, padding_idx);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> _rowwise_prune_generated_plumbing(const at::Tensor & weight, const at::Tensor & mask, at::ScalarType compressed_indices_dtype) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(mask, cur_level)) {
|
|
return at::_ops::_rowwise_prune::call(weight, mask, compressed_indices_dtype);
|
|
}
|
|
Tensor weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
|
|
Tensor mask_value;
|
|
optional<int64_t> mask_bdim;
|
|
std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask, cur_level);
|
|
auto results = batch_rule(weight_value, weight_bdim, mask_value, mask_bdim, compressed_indices_dtype);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor row_stack_generated_plumbing(at::TensorList tensors) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(tensors, cur_level)) {
|
|
return at::_ops::row_stack::call(tensors);
|
|
}
|
|
|
|
auto results = batch_rule(tensors);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> embedding_bag_generated_plumbing(const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional<at::Tensor> & per_sample_weights, bool include_last_offset) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(offsets, cur_level) && !isBatchedAtLevel(per_sample_weights, cur_level)) {
|
|
return at::_ops::embedding_bag::call(weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset);
|
|
}
|
|
Tensor weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
|
|
Tensor indices_value;
|
|
optional<int64_t> indices_bdim;
|
|
std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
|
|
Tensor offsets_value;
|
|
optional<int64_t> offsets_bdim;
|
|
std::tie(offsets_value, offsets_bdim) = unwrapTensorAtLevel(offsets, cur_level);
|
|
optional<Tensor> per_sample_weights_value;
|
|
optional<int64_t> per_sample_weights_bdim;
|
|
if (per_sample_weights) {
|
|
std::tie(per_sample_weights_value, per_sample_weights_bdim) = unwrapTensorAtLevel(per_sample_weights.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(weight_value, weight_bdim, indices_value, indices_bdim, offsets_value, offsets_bdim, scale_grad_by_freq, mode, sparse, per_sample_weights_value, per_sample_weights_bdim, include_last_offset);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> embedding_bag_padding_idx_generated_plumbing(const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional<at::Tensor> & per_sample_weights, bool include_last_offset, c10::optional<int64_t> padding_idx) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(offsets, cur_level) && !isBatchedAtLevel(per_sample_weights, cur_level)) {
|
|
return at::_ops::embedding_bag_padding_idx::call(weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset, padding_idx);
|
|
}
|
|
Tensor weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
|
|
Tensor indices_value;
|
|
optional<int64_t> indices_bdim;
|
|
std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
|
|
Tensor offsets_value;
|
|
optional<int64_t> offsets_bdim;
|
|
std::tie(offsets_value, offsets_bdim) = unwrapTensorAtLevel(offsets, cur_level);
|
|
optional<Tensor> per_sample_weights_value;
|
|
optional<int64_t> per_sample_weights_bdim;
|
|
if (per_sample_weights) {
|
|
std::tie(per_sample_weights_value, per_sample_weights_bdim) = unwrapTensorAtLevel(per_sample_weights.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(weight_value, weight_bdim, indices_value, indices_bdim, offsets_value, offsets_bdim, scale_grad_by_freq, mode, sparse, per_sample_weights_value, per_sample_weights_bdim, include_last_offset, padding_idx);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> _embedding_bag_generated_plumbing(const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional<at::Tensor> & per_sample_weights, bool include_last_offset, int64_t padding_idx) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(offsets, cur_level) && !isBatchedAtLevel(per_sample_weights, cur_level)) {
|
|
return at::_ops::_embedding_bag::call(weight, indices, offsets, scale_grad_by_freq, mode, sparse, per_sample_weights, include_last_offset, padding_idx);
|
|
}
|
|
Tensor weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
|
|
Tensor indices_value;
|
|
optional<int64_t> indices_bdim;
|
|
std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
|
|
Tensor offsets_value;
|
|
optional<int64_t> offsets_bdim;
|
|
std::tie(offsets_value, offsets_bdim) = unwrapTensorAtLevel(offsets, cur_level);
|
|
optional<Tensor> per_sample_weights_value;
|
|
optional<int64_t> per_sample_weights_bdim;
|
|
if (per_sample_weights) {
|
|
std::tie(per_sample_weights_value, per_sample_weights_bdim) = unwrapTensorAtLevel(per_sample_weights.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(weight_value, weight_bdim, indices_value, indices_bdim, offsets_value, offsets_bdim, scale_grad_by_freq, mode, sparse, per_sample_weights_value, per_sample_weights_bdim, include_last_offset, padding_idx);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _embedding_bag_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, bool sparse, const c10::optional<at::Tensor> & per_sample_weights, int64_t padding_idx) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(offsets, cur_level) && !isBatchedAtLevel(offset2bag, cur_level) && !isBatchedAtLevel(bag_size, cur_level) && !isBatchedAtLevel(maximum_indices, cur_level) && !isBatchedAtLevel(per_sample_weights, cur_level)) {
|
|
return at::_ops::_embedding_bag_backward::call(grad, indices, offsets, offset2bag, bag_size, maximum_indices, num_weights, scale_grad_by_freq, mode, sparse, per_sample_weights, padding_idx);
|
|
}
|
|
Tensor grad_value;
|
|
optional<int64_t> grad_bdim;
|
|
std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
|
|
Tensor indices_value;
|
|
optional<int64_t> indices_bdim;
|
|
std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
|
|
Tensor offsets_value;
|
|
optional<int64_t> offsets_bdim;
|
|
std::tie(offsets_value, offsets_bdim) = unwrapTensorAtLevel(offsets, cur_level);
|
|
Tensor offset2bag_value;
|
|
optional<int64_t> offset2bag_bdim;
|
|
std::tie(offset2bag_value, offset2bag_bdim) = unwrapTensorAtLevel(offset2bag, cur_level);
|
|
Tensor bag_size_value;
|
|
optional<int64_t> bag_size_bdim;
|
|
std::tie(bag_size_value, bag_size_bdim) = unwrapTensorAtLevel(bag_size, cur_level);
|
|
Tensor maximum_indices_value;
|
|
optional<int64_t> maximum_indices_bdim;
|
|
std::tie(maximum_indices_value, maximum_indices_bdim) = unwrapTensorAtLevel(maximum_indices, cur_level);
|
|
optional<Tensor> per_sample_weights_value;
|
|
optional<int64_t> per_sample_weights_bdim;
|
|
if (per_sample_weights) {
|
|
std::tie(per_sample_weights_value, per_sample_weights_bdim) = unwrapTensorAtLevel(per_sample_weights.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(grad_value, grad_bdim, indices_value, indices_bdim, offsets_value, offsets_bdim, offset2bag_value, offset2bag_bdim, bag_size_value, bag_size_bdim, maximum_indices_value, maximum_indices_bdim, num_weights, scale_grad_by_freq, mode, sparse, per_sample_weights_value, per_sample_weights_bdim, padding_idx);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _embedding_bag_sparse_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, const at::Tensor & bag_size, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional<at::Tensor> & per_sample_weights, int64_t padding_idx) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(offsets, cur_level) && !isBatchedAtLevel(offset2bag, cur_level) && !isBatchedAtLevel(bag_size, cur_level) && !isBatchedAtLevel(per_sample_weights, cur_level)) {
|
|
return at::_ops::_embedding_bag_sparse_backward::call(grad, indices, offsets, offset2bag, bag_size, num_weights, scale_grad_by_freq, mode, per_sample_weights, padding_idx);
|
|
}
|
|
Tensor grad_value;
|
|
optional<int64_t> grad_bdim;
|
|
std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
|
|
Tensor indices_value;
|
|
optional<int64_t> indices_bdim;
|
|
std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
|
|
Tensor offsets_value;
|
|
optional<int64_t> offsets_bdim;
|
|
std::tie(offsets_value, offsets_bdim) = unwrapTensorAtLevel(offsets, cur_level);
|
|
Tensor offset2bag_value;
|
|
optional<int64_t> offset2bag_bdim;
|
|
std::tie(offset2bag_value, offset2bag_bdim) = unwrapTensorAtLevel(offset2bag, cur_level);
|
|
Tensor bag_size_value;
|
|
optional<int64_t> bag_size_bdim;
|
|
std::tie(bag_size_value, bag_size_bdim) = unwrapTensorAtLevel(bag_size, cur_level);
|
|
optional<Tensor> per_sample_weights_value;
|
|
optional<int64_t> per_sample_weights_bdim;
|
|
if (per_sample_weights) {
|
|
std::tie(per_sample_weights_value, per_sample_weights_bdim) = unwrapTensorAtLevel(per_sample_weights.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(grad_value, grad_bdim, indices_value, indices_bdim, offsets_value, offsets_bdim, offset2bag_value, offset2bag_bdim, bag_size_value, bag_size_bdim, num_weights, scale_grad_by_freq, mode, per_sample_weights_value, per_sample_weights_bdim, padding_idx);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _embedding_bag_dense_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offset2bag, const at::Tensor & bag_size, const at::Tensor & maximum_indices, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional<at::Tensor> & per_sample_weights, int64_t padding_idx) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(offset2bag, cur_level) && !isBatchedAtLevel(bag_size, cur_level) && !isBatchedAtLevel(maximum_indices, cur_level) && !isBatchedAtLevel(per_sample_weights, cur_level)) {
|
|
return at::_ops::_embedding_bag_dense_backward::call(grad, indices, offset2bag, bag_size, maximum_indices, num_weights, scale_grad_by_freq, mode, per_sample_weights, padding_idx);
|
|
}
|
|
Tensor grad_value;
|
|
optional<int64_t> grad_bdim;
|
|
std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
|
|
Tensor indices_value;
|
|
optional<int64_t> indices_bdim;
|
|
std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
|
|
Tensor offset2bag_value;
|
|
optional<int64_t> offset2bag_bdim;
|
|
std::tie(offset2bag_value, offset2bag_bdim) = unwrapTensorAtLevel(offset2bag, cur_level);
|
|
Tensor bag_size_value;
|
|
optional<int64_t> bag_size_bdim;
|
|
std::tie(bag_size_value, bag_size_bdim) = unwrapTensorAtLevel(bag_size, cur_level);
|
|
Tensor maximum_indices_value;
|
|
optional<int64_t> maximum_indices_bdim;
|
|
std::tie(maximum_indices_value, maximum_indices_bdim) = unwrapTensorAtLevel(maximum_indices, cur_level);
|
|
optional<Tensor> per_sample_weights_value;
|
|
optional<int64_t> per_sample_weights_bdim;
|
|
if (per_sample_weights) {
|
|
std::tie(per_sample_weights_value, per_sample_weights_bdim) = unwrapTensorAtLevel(per_sample_weights.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(grad_value, grad_bdim, indices_value, indices_bdim, offset2bag_value, offset2bag_bdim, bag_size_value, bag_size_bdim, maximum_indices_value, maximum_indices_bdim, num_weights, scale_grad_by_freq, mode, per_sample_weights_value, per_sample_weights_bdim, padding_idx);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _embedding_bag_per_sample_weights_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, int64_t mode, int64_t padding_idx) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(offsets, cur_level) && !isBatchedAtLevel(offset2bag, cur_level)) {
|
|
return at::_ops::_embedding_bag_per_sample_weights_backward::call(grad, weight, indices, offsets, offset2bag, mode, padding_idx);
|
|
}
|
|
Tensor grad_value;
|
|
optional<int64_t> grad_bdim;
|
|
std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
|
|
Tensor weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
|
|
Tensor indices_value;
|
|
optional<int64_t> indices_bdim;
|
|
std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
|
|
Tensor offsets_value;
|
|
optional<int64_t> offsets_bdim;
|
|
std::tie(offsets_value, offsets_bdim) = unwrapTensorAtLevel(offsets, cur_level);
|
|
Tensor offset2bag_value;
|
|
optional<int64_t> offset2bag_bdim;
|
|
std::tie(offset2bag_value, offset2bag_bdim) = unwrapTensorAtLevel(offset2bag, cur_level);
|
|
auto results = batch_rule(grad_value, grad_bdim, weight_value, weight_bdim, indices_value, indices_bdim, offsets_value, offsets_bdim, offset2bag_value, offset2bag_bdim, mode, padding_idx);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor new_empty_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::new_empty::call(self, size, dtype, layout, device, pin_memory);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, size, dtype, layout, device, pin_memory);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor new_empty_strided_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::new_empty_strided::call(self, size, stride, dtype, layout, device, pin_memory);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, size, stride, dtype, layout, device, pin_memory);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor new_full_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size, const at::Scalar & fill_value, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::new_full::call(self, size, fill_value, dtype, layout, device, pin_memory);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, size, fill_value, dtype, layout, device, pin_memory);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor new_zeros_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::new_zeros::call(self, size, dtype, layout, device, pin_memory);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, size, dtype, layout, device, pin_memory);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor new_ones_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::new_ones::call(self, size, dtype, layout, device, pin_memory);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, size, dtype, layout, device, pin_memory);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _empty_per_channel_affine_quantized_generated_plumbing(c10::SymIntArrayRef size, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(scales, cur_level) && !isBatchedAtLevel(zero_points, cur_level)) {
|
|
return at::_ops::_empty_per_channel_affine_quantized::call(size, scales, zero_points, axis, dtype, layout, device, pin_memory, memory_format);
|
|
}
|
|
Tensor scales_value;
|
|
optional<int64_t> scales_bdim;
|
|
std::tie(scales_value, scales_bdim) = unwrapTensorAtLevel(scales, cur_level);
|
|
Tensor zero_points_value;
|
|
optional<int64_t> zero_points_bdim;
|
|
std::tie(zero_points_value, zero_points_bdim) = unwrapTensorAtLevel(zero_points, cur_level);
|
|
auto results = batch_rule(size, scales_value, scales_bdim, zero_points_value, zero_points_bdim, axis, dtype, layout, device, pin_memory, memory_format);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
const at::Tensor & _resize_output__generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size, at::Device device) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_resize_output_::call(self, size, device);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim, size, device);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor empty_quantized_generated_plumbing(at::IntArrayRef size, const at::Tensor & qtensor, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(qtensor, cur_level)) {
|
|
return at::_ops::empty_quantized::call(size, qtensor, dtype, layout, device, pin_memory, memory_format);
|
|
}
|
|
Tensor qtensor_value;
|
|
optional<int64_t> qtensor_bdim;
|
|
std::tie(qtensor_value, qtensor_bdim) = unwrapTensorAtLevel(qtensor, cur_level);
|
|
auto results = batch_rule(size, qtensor_value, qtensor_bdim, dtype, layout, device, pin_memory, memory_format);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor empty_like_generated_plumbing(const at::Tensor & self, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::empty_like::call(self, dtype, layout, device, pin_memory, memory_format);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dtype, layout, device, pin_memory, memory_format);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor erf_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::erf::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & erf__generated_plumbing(at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::erf_::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor erfc_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::erfc::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & erfc__generated_plumbing(at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::erfc_::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor exp_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::exp::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & exp__generated_plumbing(at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::exp_::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor exp2_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::exp2::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & exp2__generated_plumbing(at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::exp2_::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor expm1_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::expm1::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & expm1__generated_plumbing(at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::expm1_::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor expand_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size, bool implicit) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::expand::call(self, size, implicit);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, size, implicit);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor expand_as_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::expand_as::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor flatten_using_ints_generated_plumbing(const at::Tensor & self, int64_t start_dim, int64_t end_dim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::flatten_using_ints::call(self, start_dim, end_dim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, start_dim, end_dim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor flatten_named_out_dim_generated_plumbing(const at::Tensor & self, int64_t start_dim, int64_t end_dim, at::Dimname out_dim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::flatten_named_out_dim::call(self, start_dim, end_dim, out_dim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, start_dim, end_dim, out_dim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor flatten_using_names_generated_plumbing(const at::Tensor & self, at::Dimname start_dim, at::Dimname end_dim, at::Dimname out_dim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::flatten_using_names::call(self, start_dim, end_dim, out_dim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, start_dim, end_dim, out_dim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor flatten_DimnameList_generated_plumbing(const at::Tensor & self, at::DimnameList dims, at::Dimname out_dim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::flatten_DimnameList::call(self, dims, out_dim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dims, out_dim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor unflatten_int_generated_plumbing(const at::Tensor & self, int64_t dim, c10::SymIntArrayRef sizes) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::unflatten_int::call(self, dim, sizes);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, sizes);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor unflatten_Dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, c10::SymIntArrayRef sizes, at::DimnameList names) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::unflatten_Dimname::call(self, dim, sizes, names);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, sizes, names);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor fill_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & value) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::fill_Scalar::call(self, value);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, value);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor fill_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & value) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(value, cur_level)) {
|
|
return at::_ops::fill_Tensor::call(self, value);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor value_value;
|
|
optional<int64_t> value_bdim;
|
|
std::tie(value_value, value_bdim) = unwrapTensorAtLevel(value, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, value_value, value_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & fill__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & value) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::fill__Scalar::call(self, value);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim, value);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & fill__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & value) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(value, cur_level)) {
|
|
return at::_ops::fill__Tensor::call(self, value);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor value_value;
|
|
optional<int64_t> value_bdim;
|
|
std::tie(value_value, value_bdim) = unwrapTensorAtLevel(value, cur_level);
|
|
batch_rule(self_value, self_bdim, value_value, value_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor floor_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::floor::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & floor__generated_plumbing(at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::floor_::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor floor_divide_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::floor_divide::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & floor_divide__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::floor_divide__Tensor::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor floor_divide_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::floor_divide_Scalar::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & floor_divide__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::floor_divide__Scalar::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim, other);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor frac_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::frac::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & frac__generated_plumbing(at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::frac_::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor full_like_generated_plumbing(const at::Tensor & self, const at::Scalar & fill_value, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::full_like::call(self, fill_value, dtype, layout, device, pin_memory, memory_format);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, fill_value, dtype, layout, device, pin_memory, memory_format);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor gcd_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::gcd::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & gcd__generated_plumbing(at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::gcd_::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor lcm_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::lcm::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & lcm__generated_plumbing(at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::lcm_::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor grid_sampler_generated_plumbing(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(grid, cur_level)) {
|
|
return at::_ops::grid_sampler::call(input, grid, interpolation_mode, padding_mode, align_corners);
|
|
}
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
Tensor grid_value;
|
|
optional<int64_t> grid_bdim;
|
|
std::tie(grid_value, grid_bdim) = unwrapTensorAtLevel(grid, cur_level);
|
|
auto results = batch_rule(input_value, input_bdim, grid_value, grid_bdim, interpolation_mode, padding_mode, align_corners);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor grid_sampler_2d_generated_plumbing(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(grid, cur_level)) {
|
|
return at::_ops::grid_sampler_2d::call(input, grid, interpolation_mode, padding_mode, align_corners);
|
|
}
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
Tensor grid_value;
|
|
optional<int64_t> grid_bdim;
|
|
std::tie(grid_value, grid_bdim) = unwrapTensorAtLevel(grid, cur_level);
|
|
auto results = batch_rule(input_value, input_bdim, grid_value, grid_bdim, interpolation_mode, padding_mode, align_corners);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> grid_sampler_2d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array<bool,2> output_mask) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(grid, cur_level)) {
|
|
return at::_ops::grid_sampler_2d_backward::call(grad_output, input, grid, interpolation_mode, padding_mode, align_corners, output_mask);
|
|
}
|
|
Tensor grad_output_value;
|
|
optional<int64_t> grad_output_bdim;
|
|
std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
Tensor grid_value;
|
|
optional<int64_t> grid_bdim;
|
|
std::tie(grid_value, grid_bdim) = unwrapTensorAtLevel(grid, cur_level);
|
|
auto results = batch_rule(grad_output_value, grad_output_bdim, input_value, input_bdim, grid_value, grid_bdim, interpolation_mode, padding_mode, align_corners, output_mask);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _grid_sampler_2d_cpu_fallback_generated_plumbing(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(grid, cur_level)) {
|
|
return at::_ops::_grid_sampler_2d_cpu_fallback::call(input, grid, interpolation_mode, padding_mode, align_corners);
|
|
}
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
Tensor grid_value;
|
|
optional<int64_t> grid_bdim;
|
|
std::tie(grid_value, grid_bdim) = unwrapTensorAtLevel(grid, cur_level);
|
|
auto results = batch_rule(input_value, input_bdim, grid_value, grid_bdim, interpolation_mode, padding_mode, align_corners);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> _grid_sampler_2d_cpu_fallback_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(grid, cur_level)) {
|
|
return at::_ops::_grid_sampler_2d_cpu_fallback_backward::call(grad_output, input, grid, interpolation_mode, padding_mode, align_corners);
|
|
}
|
|
Tensor grad_output_value;
|
|
optional<int64_t> grad_output_bdim;
|
|
std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
Tensor grid_value;
|
|
optional<int64_t> grid_bdim;
|
|
std::tie(grid_value, grid_bdim) = unwrapTensorAtLevel(grid, cur_level);
|
|
auto results = batch_rule(grad_output_value, grad_output_bdim, input_value, input_bdim, grid_value, grid_bdim, interpolation_mode, padding_mode, align_corners);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor grid_sampler_3d_generated_plumbing(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(grid, cur_level)) {
|
|
return at::_ops::grid_sampler_3d::call(input, grid, interpolation_mode, padding_mode, align_corners);
|
|
}
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
Tensor grid_value;
|
|
optional<int64_t> grid_bdim;
|
|
std::tie(grid_value, grid_bdim) = unwrapTensorAtLevel(grid, cur_level);
|
|
auto results = batch_rule(input_value, input_bdim, grid_value, grid_bdim, interpolation_mode, padding_mode, align_corners);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> grid_sampler_3d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, ::std::array<bool,2> output_mask) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(grid, cur_level)) {
|
|
return at::_ops::grid_sampler_3d_backward::call(grad_output, input, grid, interpolation_mode, padding_mode, align_corners, output_mask);
|
|
}
|
|
Tensor grad_output_value;
|
|
optional<int64_t> grad_output_bdim;
|
|
std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
Tensor grid_value;
|
|
optional<int64_t> grid_bdim;
|
|
std::tie(grid_value, grid_bdim) = unwrapTensorAtLevel(grid, cur_level);
|
|
auto results = batch_rule(grad_output_value, grad_output_bdim, input_value, input_bdim, grid_value, grid_bdim, interpolation_mode, padding_mode, align_corners, output_mask);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor hinge_embedding_loss_generated_plumbing(const at::Tensor & self, const at::Tensor & target, double margin, int64_t reduction) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level)) {
|
|
return at::_ops::hinge_embedding_loss::call(self, target, margin, reduction);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor target_value;
|
|
optional<int64_t> target_bdim;
|
|
std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, margin, reduction);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor group_norm_generated_plumbing(const at::Tensor & input, int64_t num_groups, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, double eps, bool cudnn_enabled) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
|
|
return at::_ops::group_norm::call(input, num_groups, weight, bias, eps, cudnn_enabled);
|
|
}
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
optional<Tensor> weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
if (weight) {
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
|
|
}
|
|
optional<Tensor> bias_value;
|
|
optional<int64_t> bias_bdim;
|
|
if (bias) {
|
|
std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(input_value, input_bdim, num_groups, weight_value, weight_bdim, bias_value, bias_bdim, eps, cudnn_enabled);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor,at::Tensor> native_group_norm_generated_plumbing(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, double eps) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
|
|
return at::_ops::native_group_norm::call(input, weight, bias, N, C, HxW, group, eps);
|
|
}
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
optional<Tensor> weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
if (weight) {
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
|
|
}
|
|
optional<Tensor> bias_value;
|
|
optional<int64_t> bias_bdim;
|
|
if (bias) {
|
|
std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, N, C, HxW, group, eps);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor,at::Tensor> native_group_norm_backward_generated_plumbing(const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional<at::Tensor> & weight, c10::SymInt N, c10::SymInt C, c10::SymInt HxW, int64_t group, ::std::array<bool,3> output_mask) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad_out, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(mean, cur_level) && !isBatchedAtLevel(rstd, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
|
|
return at::_ops::native_group_norm_backward::call(grad_out, input, mean, rstd, weight, N, C, HxW, group, output_mask);
|
|
}
|
|
Tensor grad_out_value;
|
|
optional<int64_t> grad_out_bdim;
|
|
std::tie(grad_out_value, grad_out_bdim) = unwrapTensorAtLevel(grad_out, cur_level);
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
Tensor mean_value;
|
|
optional<int64_t> mean_bdim;
|
|
std::tie(mean_value, mean_bdim) = unwrapTensorAtLevel(mean, cur_level);
|
|
Tensor rstd_value;
|
|
optional<int64_t> rstd_bdim;
|
|
std::tie(rstd_value, rstd_bdim) = unwrapTensorAtLevel(rstd, cur_level);
|
|
optional<Tensor> weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
if (weight) {
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(grad_out_value, grad_out_bdim, input_value, input_bdim, mean_value, mean_bdim, rstd_value, rstd_bdim, weight_value, weight_bdim, N, C, HxW, group, output_mask);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _fft_r2c_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, bool onesided) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_fft_r2c::call(self, dim, normalization, onesided);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, normalization, onesided);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _fft_c2r_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, c10::SymInt last_dim_size) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_fft_c2r::call(self, dim, normalization, last_dim_size);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, normalization, last_dim_size);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _fft_c2c_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef dim, int64_t normalization, bool forward) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_fft_c2c::call(self, dim, normalization, forward);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, normalization, forward);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _validate_compressed_sparse_indices_generated_plumbing(bool is_crow, const at::Tensor & compressed_idx, const at::Tensor & plain_idx, int64_t cdim, int64_t dim, int64_t nnz) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(compressed_idx, cur_level) && !isBatchedAtLevel(plain_idx, cur_level)) {
|
|
return at::_ops::_validate_compressed_sparse_indices::call(is_crow, compressed_idx, plain_idx, cdim, dim, nnz);
|
|
}
|
|
Tensor compressed_idx_value;
|
|
optional<int64_t> compressed_idx_bdim;
|
|
std::tie(compressed_idx_value, compressed_idx_bdim) = unwrapTensorAtLevel(compressed_idx, cur_level);
|
|
Tensor plain_idx_value;
|
|
optional<int64_t> plain_idx_bdim;
|
|
std::tie(plain_idx_value, plain_idx_bdim) = unwrapTensorAtLevel(plain_idx, cur_level);
|
|
batch_rule(is_crow, compressed_idx_value, compressed_idx_bdim, plain_idx_value, plain_idx_bdim, cdim, dim, nnz);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor index_Tensor_generated_plumbing(const at::Tensor & self, const c10::List<c10::optional<at::Tensor>> & indices) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
|
|
return at::_ops::index_Tensor::call(self, indices);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, indices);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _unsafe_index_Tensor_generated_plumbing(const at::Tensor & self, const c10::List<c10::optional<at::Tensor>> & indices) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
|
|
return at::_ops::_unsafe_index_Tensor::call(self, indices);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, indices);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & index_copy__generated_plumbing(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(source, cur_level)) {
|
|
return at::_ops::index_copy_::call(self, dim, index, source);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor index_value;
|
|
optional<int64_t> index_bdim;
|
|
std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
|
|
Tensor source_value;
|
|
optional<int64_t> source_bdim;
|
|
std::tie(source_value, source_bdim) = unwrapTensorAtLevel(source, cur_level);
|
|
batch_rule(self_value, self_bdim, dim, index_value, index_bdim, source_value, source_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor index_copy_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(source, cur_level)) {
|
|
return at::_ops::index_copy::call(self, dim, index, source);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor index_value;
|
|
optional<int64_t> index_bdim;
|
|
std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
|
|
Tensor source_value;
|
|
optional<int64_t> source_bdim;
|
|
std::tie(source_value, source_bdim) = unwrapTensorAtLevel(source, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, source_value, source_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & index_copy__dimname_generated_plumbing(at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(source, cur_level)) {
|
|
return at::_ops::index_copy__dimname::call(self, dim, index, source);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor index_value;
|
|
optional<int64_t> index_bdim;
|
|
std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
|
|
Tensor source_value;
|
|
optional<int64_t> source_bdim;
|
|
std::tie(source_value, source_bdim) = unwrapTensorAtLevel(source, cur_level);
|
|
batch_rule(self_value, self_bdim, dim, index_value, index_bdim, source_value, source_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor index_copy_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(source, cur_level)) {
|
|
return at::_ops::index_copy_dimname::call(self, dim, index, source);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor index_value;
|
|
optional<int64_t> index_bdim;
|
|
std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
|
|
Tensor source_value;
|
|
optional<int64_t> source_bdim;
|
|
std::tie(source_value, source_bdim) = unwrapTensorAtLevel(source, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, source_value, source_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & index_put__generated_plumbing(at::Tensor & self, const c10::List<c10::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
|
|
return at::_ops::index_put_::call(self, indices, values, accumulate);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor values_value;
|
|
optional<int64_t> values_bdim;
|
|
std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
|
|
batch_rule(self_value, self_bdim, indices, values_value, values_bdim, accumulate);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor index_put_generated_plumbing(const at::Tensor & self, const c10::List<c10::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
|
|
return at::_ops::index_put::call(self, indices, values, accumulate);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor values_value;
|
|
optional<int64_t> values_bdim;
|
|
std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, indices, values_value, values_bdim, accumulate);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _unsafe_index_put_generated_plumbing(const at::Tensor & self, const c10::List<c10::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
|
|
return at::_ops::_unsafe_index_put::call(self, indices, values, accumulate);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor values_value;
|
|
optional<int64_t> values_bdim;
|
|
std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, indices, values_value, values_bdim, accumulate);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & _index_put_impl__generated_plumbing(at::Tensor & self, const c10::List<c10::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate, bool unsafe) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
|
|
return at::_ops::_index_put_impl_::call(self, indices, values, accumulate, unsafe);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor values_value;
|
|
optional<int64_t> values_bdim;
|
|
std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
|
|
batch_rule(self_value, self_bdim, indices, values_value, values_bdim, accumulate, unsafe);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor instance_norm_generated_plumbing(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, bool use_input_stats, double momentum, double eps, bool cudnn_enabled) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level) && !isBatchedAtLevel(running_mean, cur_level) && !isBatchedAtLevel(running_var, cur_level)) {
|
|
return at::_ops::instance_norm::call(input, weight, bias, running_mean, running_var, use_input_stats, momentum, eps, cudnn_enabled);
|
|
}
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
optional<Tensor> weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
if (weight) {
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
|
|
}
|
|
optional<Tensor> bias_value;
|
|
optional<int64_t> bias_bdim;
|
|
if (bias) {
|
|
std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
|
|
}
|
|
optional<Tensor> running_mean_value;
|
|
optional<int64_t> running_mean_bdim;
|
|
if (running_mean) {
|
|
std::tie(running_mean_value, running_mean_bdim) = unwrapTensorAtLevel(running_mean.value(), cur_level);
|
|
}
|
|
optional<Tensor> running_var_value;
|
|
optional<int64_t> running_var_bdim;
|
|
if (running_var) {
|
|
std::tie(running_var_value, running_var_bdim) = unwrapTensorAtLevel(running_var.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, running_mean_value, running_mean_bdim, running_var_value, running_var_bdim, use_input_stats, momentum, eps, cudnn_enabled);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor isclose_generated_plumbing(const at::Tensor & self, const at::Tensor & other, double rtol, double atol, bool equal_nan) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::isclose::call(self, other, rtol, atol, equal_nan);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, rtol, atol, equal_nan);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor isin_Tensor_Tensor_generated_plumbing(const at::Tensor & elements, const at::Tensor & test_elements, bool assume_unique, bool invert) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(elements, cur_level) && !isBatchedAtLevel(test_elements, cur_level)) {
|
|
return at::_ops::isin_Tensor_Tensor::call(elements, test_elements, assume_unique, invert);
|
|
}
|
|
Tensor elements_value;
|
|
optional<int64_t> elements_bdim;
|
|
std::tie(elements_value, elements_bdim) = unwrapTensorAtLevel(elements, cur_level);
|
|
Tensor test_elements_value;
|
|
optional<int64_t> test_elements_bdim;
|
|
std::tie(test_elements_value, test_elements_bdim) = unwrapTensorAtLevel(test_elements, cur_level);
|
|
auto results = batch_rule(elements_value, elements_bdim, test_elements_value, test_elements_bdim, assume_unique, invert);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor isin_Tensor_Scalar_generated_plumbing(const at::Tensor & elements, const at::Scalar & test_element, bool assume_unique, bool invert) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(elements, cur_level)) {
|
|
return at::_ops::isin_Tensor_Scalar::call(elements, test_element, assume_unique, invert);
|
|
}
|
|
Tensor elements_value;
|
|
optional<int64_t> elements_bdim;
|
|
std::tie(elements_value, elements_bdim) = unwrapTensorAtLevel(elements, cur_level);
|
|
auto results = batch_rule(elements_value, elements_bdim, test_element, assume_unique, invert);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor isin_Scalar_Tensor_generated_plumbing(const at::Scalar & element, const at::Tensor & test_elements, bool assume_unique, bool invert) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(test_elements, cur_level)) {
|
|
return at::_ops::isin_Scalar_Tensor::call(element, test_elements, assume_unique, invert);
|
|
}
|
|
Tensor test_elements_value;
|
|
optional<int64_t> test_elements_bdim;
|
|
std::tie(test_elements_value, test_elements_bdim) = unwrapTensorAtLevel(test_elements, cur_level);
|
|
auto results = batch_rule(element, test_elements_value, test_elements_bdim, assume_unique, invert);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor isnan_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::isnan::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor isreal_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::isreal::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor kl_div_generated_plumbing(const at::Tensor & self, const at::Tensor & target, int64_t reduction, bool log_target) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level)) {
|
|
return at::_ops::kl_div::call(self, target, reduction, log_target);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor target_value;
|
|
optional<int64_t> target_bdim;
|
|
std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, reduction, log_target);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor kron_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::kron::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> kthvalue_generated_plumbing(const at::Tensor & self, int64_t k, int64_t dim, bool keepdim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::kthvalue::call(self, k, dim, keepdim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, k, dim, keepdim);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> kthvalue_dimname_generated_plumbing(const at::Tensor & self, int64_t k, at::Dimname dim, bool keepdim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::kthvalue_dimname::call(self, k, dim, keepdim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, k, dim, keepdim);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor layer_norm_generated_plumbing(const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, double eps, bool cudnn_enable) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
|
|
return at::_ops::layer_norm::call(input, normalized_shape, weight, bias, eps, cudnn_enable);
|
|
}
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
optional<Tensor> weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
if (weight) {
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
|
|
}
|
|
optional<Tensor> bias_value;
|
|
optional<int64_t> bias_bdim;
|
|
if (bias) {
|
|
std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(input_value, input_bdim, normalized_shape, weight_value, weight_bdim, bias_value, bias_bdim, eps, cudnn_enable);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor,at::Tensor> native_layer_norm_generated_plumbing(const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, double eps) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
|
|
return at::_ops::native_layer_norm::call(input, normalized_shape, weight, bias, eps);
|
|
}
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
optional<Tensor> weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
if (weight) {
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
|
|
}
|
|
optional<Tensor> bias_value;
|
|
optional<int64_t> bias_bdim;
|
|
if (bias) {
|
|
std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(input_value, input_bdim, normalized_shape, weight_value, weight_bdim, bias_value, bias_bdim, eps);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor,at::Tensor> native_layer_norm_backward_generated_plumbing(const at::Tensor & grad_out, const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const at::Tensor & mean, const at::Tensor & rstd, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, ::std::array<bool,3> output_mask) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad_out, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(mean, cur_level) && !isBatchedAtLevel(rstd, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
|
|
return at::_ops::native_layer_norm_backward::call(grad_out, input, normalized_shape, mean, rstd, weight, bias, output_mask);
|
|
}
|
|
Tensor grad_out_value;
|
|
optional<int64_t> grad_out_bdim;
|
|
std::tie(grad_out_value, grad_out_bdim) = unwrapTensorAtLevel(grad_out, cur_level);
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
Tensor mean_value;
|
|
optional<int64_t> mean_bdim;
|
|
std::tie(mean_value, mean_bdim) = unwrapTensorAtLevel(mean, cur_level);
|
|
Tensor rstd_value;
|
|
optional<int64_t> rstd_bdim;
|
|
std::tie(rstd_value, rstd_bdim) = unwrapTensorAtLevel(rstd, cur_level);
|
|
optional<Tensor> weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
if (weight) {
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
|
|
}
|
|
optional<Tensor> bias_value;
|
|
optional<int64_t> bias_bdim;
|
|
if (bias) {
|
|
std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(grad_out_value, grad_out_bdim, input_value, input_bdim, normalized_shape, mean_value, mean_bdim, rstd_value, rstd_bdim, weight_value, weight_bdim, bias_value, bias_bdim, output_mask);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor nan_to_num_generated_plumbing(const at::Tensor & self, c10::optional<double> nan, c10::optional<double> posinf, c10::optional<double> neginf) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::nan_to_num::call(self, nan, posinf, neginf);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, nan, posinf, neginf);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & nan_to_num__generated_plumbing(at::Tensor & self, c10::optional<double> nan, c10::optional<double> posinf, c10::optional<double> neginf) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::nan_to_num_::call(self, nan, posinf, neginf);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim, nan, posinf, neginf);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor linear_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
|
|
return at::_ops::linear::call(input, weight, bias);
|
|
}
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
Tensor weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
|
|
optional<Tensor> bias_value;
|
|
optional<int64_t> bias_bdim;
|
|
if (bias) {
|
|
std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor,at::Tensor> linear_backward_generated_plumbing(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, ::std::array<bool,3> output_mask) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
|
|
return at::_ops::linear_backward::call(self, grad_output, weight, output_mask);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor grad_output_value;
|
|
optional<int64_t> grad_output_bdim;
|
|
std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
|
|
Tensor weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, grad_output_value, grad_output_bdim, weight_value, weight_bdim, output_mask);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor mkldnn_linear_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
|
|
return at::_ops::mkldnn_linear::call(self, weight, bias);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
|
|
optional<Tensor> bias_value;
|
|
optional<int64_t> bias_bdim;
|
|
if (bias) {
|
|
std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, bias_value, bias_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor mkldnn_linear_backward_input_generated_plumbing(at::IntArrayRef input_size, const at::Tensor & grad_output, const at::Tensor & weight) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
|
|
return at::_ops::mkldnn_linear_backward_input::call(input_size, grad_output, weight);
|
|
}
|
|
Tensor grad_output_value;
|
|
optional<int64_t> grad_output_bdim;
|
|
std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
|
|
Tensor weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
|
|
auto results = batch_rule(input_size, grad_output_value, grad_output_bdim, weight_value, weight_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> mkldnn_linear_backward_weights_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & weight, bool bias_defined) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
|
|
return at::_ops::mkldnn_linear_backward_weights::call(grad_output, input, weight, bias_defined);
|
|
}
|
|
Tensor grad_output_value;
|
|
optional<int64_t> grad_output_bdim;
|
|
std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
Tensor weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
|
|
auto results = batch_rule(grad_output_value, grad_output_bdim, input_value, input_bdim, weight_value, weight_bdim, bias_defined);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor,at::Tensor> mkldnn_linear_backward_generated_plumbing(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, ::std::array<bool,3> output_mask) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
|
|
return at::_ops::mkldnn_linear_backward::call(self, grad_output, weight, output_mask);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor grad_output_value;
|
|
optional<int64_t> grad_output_bdim;
|
|
std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
|
|
Tensor weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, grad_output_value, grad_output_bdim, weight_value, weight_bdim, output_mask);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _cslt_compress_generated_plumbing(const at::Tensor & input) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(input, cur_level)) {
|
|
return at::_ops::_cslt_compress::call(input);
|
|
}
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
auto results = batch_rule(input_value, input_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _cslt_sparse_mm_generated_plumbing(const at::Tensor & compressed_A, const at::Tensor & dense_B, const c10::optional<at::Tensor> & bias, const c10::optional<at::Tensor> & alpha, c10::optional<at::ScalarType> out_dtype, bool transpose_result, int64_t alg_id) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(compressed_A, cur_level) && !isBatchedAtLevel(dense_B, cur_level) && !isBatchedAtLevel(bias, cur_level) && !isBatchedAtLevel(alpha, cur_level)) {
|
|
return at::_ops::_cslt_sparse_mm::call(compressed_A, dense_B, bias, alpha, out_dtype, transpose_result, alg_id);
|
|
}
|
|
Tensor compressed_A_value;
|
|
optional<int64_t> compressed_A_bdim;
|
|
std::tie(compressed_A_value, compressed_A_bdim) = unwrapTensorAtLevel(compressed_A, cur_level);
|
|
Tensor dense_B_value;
|
|
optional<int64_t> dense_B_bdim;
|
|
std::tie(dense_B_value, dense_B_bdim) = unwrapTensorAtLevel(dense_B, cur_level);
|
|
optional<Tensor> bias_value;
|
|
optional<int64_t> bias_bdim;
|
|
if (bias) {
|
|
std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
|
|
}
|
|
optional<Tensor> alpha_value;
|
|
optional<int64_t> alpha_bdim;
|
|
if (alpha) {
|
|
std::tie(alpha_value, alpha_bdim) = unwrapTensorAtLevel(alpha.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(compressed_A_value, compressed_A_bdim, dense_B_value, dense_B_bdim, bias_value, bias_bdim, alpha_value, alpha_bdim, out_dtype, transpose_result, alg_id);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _sparse_semi_structured_linear_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const at::Tensor & meta, const c10::optional<at::Tensor> & bias, c10::optional<c10::string_view> activation, c10::optional<at::ScalarType> out_dtype) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(meta, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
|
|
return at::_ops::_sparse_semi_structured_linear::call(input, weight, meta, bias, activation, out_dtype);
|
|
}
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
Tensor weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
|
|
Tensor meta_value;
|
|
optional<int64_t> meta_bdim;
|
|
std::tie(meta_value, meta_bdim) = unwrapTensorAtLevel(meta, cur_level);
|
|
optional<Tensor> bias_value;
|
|
optional<int64_t> bias_bdim;
|
|
if (bias) {
|
|
std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, meta_value, meta_bdim, bias_value, bias_bdim, activation, out_dtype);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _mixed_dtypes_linear_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const at::Tensor & scale, const c10::optional<at::Tensor> & bias, c10::optional<c10::string_view> activation) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(scale, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
|
|
return at::_ops::_mixed_dtypes_linear::call(input, weight, scale, bias, activation);
|
|
}
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
Tensor weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
|
|
Tensor scale_value;
|
|
optional<int64_t> scale_bdim;
|
|
std::tie(scale_value, scale_bdim) = unwrapTensorAtLevel(scale, cur_level);
|
|
optional<Tensor> bias_value;
|
|
optional<int64_t> bias_bdim;
|
|
if (bias) {
|
|
std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, scale_value, scale_bdim, bias_value, bias_bdim, activation);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor fbgemm_linear_int8_weight_fp32_activation_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const at::Tensor & packed, const at::Tensor & col_offsets, const at::Scalar & weight_scale, const at::Scalar & weight_zero_point, const at::Tensor & bias) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(packed, cur_level) && !isBatchedAtLevel(col_offsets, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
|
|
return at::_ops::fbgemm_linear_int8_weight_fp32_activation::call(input, weight, packed, col_offsets, weight_scale, weight_zero_point, bias);
|
|
}
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
Tensor weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
|
|
Tensor packed_value;
|
|
optional<int64_t> packed_bdim;
|
|
std::tie(packed_value, packed_bdim) = unwrapTensorAtLevel(packed, cur_level);
|
|
Tensor col_offsets_value;
|
|
optional<int64_t> col_offsets_bdim;
|
|
std::tie(col_offsets_value, col_offsets_bdim) = unwrapTensorAtLevel(col_offsets, cur_level);
|
|
Tensor bias_value;
|
|
optional<int64_t> bias_bdim;
|
|
std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias, cur_level);
|
|
auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, packed_value, packed_bdim, col_offsets_value, col_offsets_bdim, weight_scale, weight_zero_point, bias_value, bias_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor fbgemm_linear_int8_weight_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const at::Tensor & packed, const at::Tensor & col_offsets, const at::Scalar & weight_scale, const at::Scalar & weight_zero_point, const at::Tensor & bias) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(packed, cur_level) && !isBatchedAtLevel(col_offsets, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
|
|
return at::_ops::fbgemm_linear_int8_weight::call(input, weight, packed, col_offsets, weight_scale, weight_zero_point, bias);
|
|
}
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
Tensor weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
|
|
Tensor packed_value;
|
|
optional<int64_t> packed_bdim;
|
|
std::tie(packed_value, packed_bdim) = unwrapTensorAtLevel(packed, cur_level);
|
|
Tensor col_offsets_value;
|
|
optional<int64_t> col_offsets_bdim;
|
|
std::tie(col_offsets_value, col_offsets_bdim) = unwrapTensorAtLevel(col_offsets, cur_level);
|
|
Tensor bias_value;
|
|
optional<int64_t> bias_bdim;
|
|
std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias, cur_level);
|
|
auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, packed_value, packed_bdim, col_offsets_value, col_offsets_bdim, weight_scale, weight_zero_point, bias_value, bias_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor fbgemm_pack_gemm_matrix_fp16_generated_plumbing(const at::Tensor & input) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(input, cur_level)) {
|
|
return at::_ops::fbgemm_pack_gemm_matrix_fp16::call(input);
|
|
}
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
auto results = batch_rule(input_value, input_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor fbgemm_linear_fp16_weight_fp32_activation_generated_plumbing(const at::Tensor & input, const at::Tensor & packed_weight, const at::Tensor & bias) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(packed_weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
|
|
return at::_ops::fbgemm_linear_fp16_weight_fp32_activation::call(input, packed_weight, bias);
|
|
}
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
Tensor packed_weight_value;
|
|
optional<int64_t> packed_weight_bdim;
|
|
std::tie(packed_weight_value, packed_weight_bdim) = unwrapTensorAtLevel(packed_weight, cur_level);
|
|
Tensor bias_value;
|
|
optional<int64_t> bias_bdim;
|
|
std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias, cur_level);
|
|
auto results = batch_rule(input_value, input_bdim, packed_weight_value, packed_weight_bdim, bias_value, bias_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor fbgemm_linear_fp16_weight_generated_plumbing(const at::Tensor & input, const at::Tensor & packed_weight, const at::Tensor & bias) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(packed_weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
|
|
return at::_ops::fbgemm_linear_fp16_weight::call(input, packed_weight, bias);
|
|
}
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
Tensor packed_weight_value;
|
|
optional<int64_t> packed_weight_bdim;
|
|
std::tie(packed_weight_value, packed_weight_bdim) = unwrapTensorAtLevel(packed_weight, cur_level);
|
|
Tensor bias_value;
|
|
optional<int64_t> bias_bdim;
|
|
std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias, cur_level);
|
|
auto results = batch_rule(input_value, input_bdim, packed_weight_value, packed_weight_bdim, bias_value, bias_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor fbgemm_pack_quantized_matrix_generated_plumbing(const at::Tensor & input) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(input, cur_level)) {
|
|
return at::_ops::fbgemm_pack_quantized_matrix::call(input);
|
|
}
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
auto results = batch_rule(input_value, input_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor fbgemm_pack_quantized_matrix_KN_generated_plumbing(const at::Tensor & input, int64_t K, int64_t N) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(input, cur_level)) {
|
|
return at::_ops::fbgemm_pack_quantized_matrix_KN::call(input, K, N);
|
|
}
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
auto results = batch_rule(input_value, input_bdim, K, N);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor ldexp_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::ldexp_Tensor::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & ldexp__generated_plumbing(at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::ldexp_::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor linspace_Tensor_Tensor_generated_plumbing(const at::Tensor & start, const at::Tensor & end, int64_t steps, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(start, cur_level) && !isBatchedAtLevel(end, cur_level)) {
|
|
return at::_ops::linspace_Tensor_Tensor::call(start, end, steps, dtype, layout, device, pin_memory);
|
|
}
|
|
Tensor start_value;
|
|
optional<int64_t> start_bdim;
|
|
std::tie(start_value, start_bdim) = unwrapTensorAtLevel(start, cur_level);
|
|
Tensor end_value;
|
|
optional<int64_t> end_bdim;
|
|
std::tie(end_value, end_bdim) = unwrapTensorAtLevel(end, cur_level);
|
|
auto results = batch_rule(start_value, start_bdim, end_value, end_bdim, steps, dtype, layout, device, pin_memory);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor linspace_Tensor_Scalar_generated_plumbing(const at::Tensor & start, const at::Scalar & end, int64_t steps, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(start, cur_level)) {
|
|
return at::_ops::linspace_Tensor_Scalar::call(start, end, steps, dtype, layout, device, pin_memory);
|
|
}
|
|
Tensor start_value;
|
|
optional<int64_t> start_bdim;
|
|
std::tie(start_value, start_bdim) = unwrapTensorAtLevel(start, cur_level);
|
|
auto results = batch_rule(start_value, start_bdim, end, steps, dtype, layout, device, pin_memory);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor linspace_Scalar_Tensor_generated_plumbing(const at::Scalar & start, const at::Tensor & end, int64_t steps, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(end, cur_level)) {
|
|
return at::_ops::linspace_Scalar_Tensor::call(start, end, steps, dtype, layout, device, pin_memory);
|
|
}
|
|
Tensor end_value;
|
|
optional<int64_t> end_bdim;
|
|
std::tie(end_value, end_bdim) = unwrapTensorAtLevel(end, cur_level);
|
|
auto results = batch_rule(start, end_value, end_bdim, steps, dtype, layout, device, pin_memory);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor log_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::log::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & log__generated_plumbing(at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::log_::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor log10_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::log10::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & log10__generated_plumbing(at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::log10_::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor log1p_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::log1p::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & log1p__generated_plumbing(at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::log1p_::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor log2_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::log2::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & log2__generated_plumbing(at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::log2_::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor logaddexp_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::logaddexp::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor logaddexp2_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::logaddexp2::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor xlogy_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::xlogy_Tensor::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor xlogy_Scalar_Self_generated_plumbing(const at::Scalar & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::xlogy_Scalar_Self::call(self, other);
|
|
}
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
auto results = batch_rule(self, other_value, other_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor xlogy_Scalar_Other_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::xlogy_Scalar_Other::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & xlogy__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::xlogy__Tensor::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & xlogy__Scalar_Other_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::xlogy__Scalar_Other::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim, other);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor logspace_Tensor_Tensor_generated_plumbing(const at::Tensor & start, const at::Tensor & end, int64_t steps, double base, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(start, cur_level) && !isBatchedAtLevel(end, cur_level)) {
|
|
return at::_ops::logspace_Tensor_Tensor::call(start, end, steps, base, dtype, layout, device, pin_memory);
|
|
}
|
|
Tensor start_value;
|
|
optional<int64_t> start_bdim;
|
|
std::tie(start_value, start_bdim) = unwrapTensorAtLevel(start, cur_level);
|
|
Tensor end_value;
|
|
optional<int64_t> end_bdim;
|
|
std::tie(end_value, end_bdim) = unwrapTensorAtLevel(end, cur_level);
|
|
auto results = batch_rule(start_value, start_bdim, end_value, end_bdim, steps, base, dtype, layout, device, pin_memory);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor logspace_Tensor_Scalar_generated_plumbing(const at::Tensor & start, const at::Scalar & end, int64_t steps, double base, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(start, cur_level)) {
|
|
return at::_ops::logspace_Tensor_Scalar::call(start, end, steps, base, dtype, layout, device, pin_memory);
|
|
}
|
|
Tensor start_value;
|
|
optional<int64_t> start_bdim;
|
|
std::tie(start_value, start_bdim) = unwrapTensorAtLevel(start, cur_level);
|
|
auto results = batch_rule(start_value, start_bdim, end, steps, base, dtype, layout, device, pin_memory);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor logspace_Scalar_Tensor_generated_plumbing(const at::Scalar & start, const at::Tensor & end, int64_t steps, double base, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(end, cur_level)) {
|
|
return at::_ops::logspace_Scalar_Tensor::call(start, end, steps, base, dtype, layout, device, pin_memory);
|
|
}
|
|
Tensor end_value;
|
|
optional<int64_t> end_bdim;
|
|
std::tie(end_value, end_bdim) = unwrapTensorAtLevel(end, cur_level);
|
|
auto results = batch_rule(start, end_value, end_bdim, steps, base, dtype, layout, device, pin_memory);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor log_softmax_int_generated_plumbing(const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::log_softmax_int::call(self, dim, dtype);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, dtype);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor log_softmax_Dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::log_softmax_Dimname::call(self, dim, dtype);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, dtype);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _log_softmax_generated_plumbing(const at::Tensor & self, int64_t dim, bool half_to_float) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_log_softmax::call(self, dim, half_to_float);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, half_to_float);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _log_softmax_backward_data_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(output, cur_level)) {
|
|
return at::_ops::_log_softmax_backward_data::call(grad_output, output, dim, input_dtype);
|
|
}
|
|
Tensor grad_output_value;
|
|
optional<int64_t> grad_output_bdim;
|
|
std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
|
|
Tensor output_value;
|
|
optional<int64_t> output_bdim;
|
|
std::tie(output_value, output_bdim) = unwrapTensorAtLevel(output, cur_level);
|
|
auto results = batch_rule(grad_output_value, grad_output_bdim, output_value, output_bdim, dim, input_dtype);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _logcumsumexp_generated_plumbing(const at::Tensor & self, int64_t dim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_logcumsumexp::call(self, dim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor logcumsumexp_generated_plumbing(const at::Tensor & self, int64_t dim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::logcumsumexp::call(self, dim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor logcumsumexp_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::logcumsumexp_dimname::call(self, dim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor logsumexp_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim, bool keepdim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::logsumexp::call(self, dim, keepdim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, keepdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor logsumexp_names_generated_plumbing(const at::Tensor & self, at::DimnameList dim, bool keepdim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::logsumexp_names::call(self, dim, keepdim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, keepdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor margin_ranking_loss_generated_plumbing(const at::Tensor & input1, const at::Tensor & input2, const at::Tensor & target, double margin, int64_t reduction) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(input1, cur_level) && !isBatchedAtLevel(input2, cur_level) && !isBatchedAtLevel(target, cur_level)) {
|
|
return at::_ops::margin_ranking_loss::call(input1, input2, target, margin, reduction);
|
|
}
|
|
Tensor input1_value;
|
|
optional<int64_t> input1_bdim;
|
|
std::tie(input1_value, input1_bdim) = unwrapTensorAtLevel(input1, cur_level);
|
|
Tensor input2_value;
|
|
optional<int64_t> input2_bdim;
|
|
std::tie(input2_value, input2_bdim) = unwrapTensorAtLevel(input2, cur_level);
|
|
Tensor target_value;
|
|
optional<int64_t> target_bdim;
|
|
std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
|
|
auto results = batch_rule(input1_value, input1_bdim, input2_value, input2_bdim, target_value, target_bdim, margin, reduction);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor matmul_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::matmul::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> matmul_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & self, const at::Tensor & other, ::std::array<bool,2> mask) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::matmul_backward::call(grad, self, other, mask);
|
|
}
|
|
Tensor grad_value;
|
|
optional<int64_t> grad_bdim;
|
|
std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
auto results = batch_rule(grad_value, grad_bdim, self_value, self_bdim, other_value, other_bdim, mask);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor matrix_power_generated_plumbing(const at::Tensor & self, int64_t n) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::matrix_power::call(self, n);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, n);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor matrix_exp_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::matrix_exp::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor matrix_exp_backward_generated_plumbing(const at::Tensor & self, const at::Tensor & grad) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grad, cur_level)) {
|
|
return at::_ops::matrix_exp_backward::call(self, grad);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor grad_value;
|
|
optional<int64_t> grad_bdim;
|
|
std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, grad_value, grad_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> _aminmax_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_aminmax::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> _aminmax_dim_generated_plumbing(const at::Tensor & self, int64_t dim, bool keepdim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_aminmax_dim::call(self, dim, keepdim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, keepdim);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> aminmax_generated_plumbing(const at::Tensor & self, c10::optional<int64_t> dim, bool keepdim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::aminmax::call(self, dim, keepdim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, keepdim);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _compute_linear_combination_generated_plumbing(const at::Tensor & input, const at::Tensor & coefficients) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(coefficients, cur_level)) {
|
|
return at::_ops::_compute_linear_combination::call(input, coefficients);
|
|
}
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
Tensor coefficients_value;
|
|
optional<int64_t> coefficients_bdim;
|
|
std::tie(coefficients_value, coefficients_bdim) = unwrapTensorAtLevel(coefficients, cur_level);
|
|
auto results = batch_rule(input_value, input_bdim, coefficients_value, coefficients_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> max_dim_generated_plumbing(const at::Tensor & self, int64_t dim, bool keepdim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::max_dim::call(self, dim, keepdim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, keepdim);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> max_names_dim_generated_plumbing(const at::Tensor & self, at::Dimname dim, bool keepdim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::max_names_dim::call(self, dim, keepdim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, keepdim);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor value_selecting_reduction_backward_generated_plumbing(const at::Tensor & grad, int64_t dim, const at::Tensor & indices, c10::SymIntArrayRef sizes, bool keepdim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
|
|
return at::_ops::value_selecting_reduction_backward::call(grad, dim, indices, sizes, keepdim);
|
|
}
|
|
Tensor grad_value;
|
|
optional<int64_t> grad_bdim;
|
|
std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
|
|
Tensor indices_value;
|
|
optional<int64_t> indices_bdim;
|
|
std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
|
|
auto results = batch_rule(grad_value, grad_bdim, dim, indices_value, indices_bdim, sizes, keepdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor amax_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim, bool keepdim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::amax::call(self, dim, keepdim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, keepdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> max_pool1d_with_indices_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::max_pool1d_with_indices::call(self, kernel_size, stride, padding, dilation, ceil_mode);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, kernel_size, stride, padding, dilation, ceil_mode);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor max_pool1d_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::max_pool1d::call(self, kernel_size, stride, padding, dilation, ceil_mode);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, kernel_size, stride, padding, dilation, ceil_mode);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor max_pool2d_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::max_pool2d::call(self, kernel_size, stride, padding, dilation, ceil_mode);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, kernel_size, stride, padding, dilation, ceil_mode);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor max_pool2d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::max_pool2d_backward::call(grad_output, self, kernel_size, stride, padding, dilation, ceil_mode);
|
|
}
|
|
Tensor grad_output_value;
|
|
optional<int64_t> grad_output_bdim;
|
|
std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, kernel_size, stride, padding, dilation, ceil_mode);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor mkldnn_max_pool2d_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::mkldnn_max_pool2d::call(self, kernel_size, stride, padding, dilation, ceil_mode);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, kernel_size, stride, padding, dilation, ceil_mode);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor mkldnn_max_pool2d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & input, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(output, cur_level) && !isBatchedAtLevel(input, cur_level)) {
|
|
return at::_ops::mkldnn_max_pool2d_backward::call(grad_output, output, input, kernel_size, stride, padding, dilation, ceil_mode);
|
|
}
|
|
Tensor grad_output_value;
|
|
optional<int64_t> grad_output_bdim;
|
|
std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
|
|
Tensor output_value;
|
|
optional<int64_t> output_bdim;
|
|
std::tie(output_value, output_bdim) = unwrapTensorAtLevel(output, cur_level);
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
auto results = batch_rule(grad_output_value, grad_output_bdim, output_value, output_bdim, input_value, input_bdim, kernel_size, stride, padding, dilation, ceil_mode);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor mkldnn_max_pool3d_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::mkldnn_max_pool3d::call(self, kernel_size, stride, padding, dilation, ceil_mode);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, kernel_size, stride, padding, dilation, ceil_mode);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor mkldnn_max_pool3d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & input, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(output, cur_level) && !isBatchedAtLevel(input, cur_level)) {
|
|
return at::_ops::mkldnn_max_pool3d_backward::call(grad_output, output, input, kernel_size, stride, padding, dilation, ceil_mode);
|
|
}
|
|
Tensor grad_output_value;
|
|
optional<int64_t> grad_output_bdim;
|
|
std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
|
|
Tensor output_value;
|
|
optional<int64_t> output_bdim;
|
|
std::tie(output_value, output_bdim) = unwrapTensorAtLevel(output, cur_level);
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
auto results = batch_rule(grad_output_value, grad_output_bdim, output_value, output_bdim, input_value, input_bdim, kernel_size, stride, padding, dilation, ceil_mode);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor quantized_max_pool1d_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::quantized_max_pool1d::call(self, kernel_size, stride, padding, dilation, ceil_mode);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, kernel_size, stride, padding, dilation, ceil_mode);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor quantized_max_pool2d_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::quantized_max_pool2d::call(self, kernel_size, stride, padding, dilation, ceil_mode);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, kernel_size, stride, padding, dilation, ceil_mode);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor quantized_max_pool3d_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::quantized_max_pool3d::call(self, kernel_size, stride, padding, dilation, ceil_mode);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, kernel_size, stride, padding, dilation, ceil_mode);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor max_pool3d_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::max_pool3d::call(self, kernel_size, stride, padding, dilation, ceil_mode);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, kernel_size, stride, padding, dilation, ceil_mode);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor mean_generated_plumbing(const at::Tensor & self, c10::optional<at::ScalarType> dtype) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::mean::call(self, dtype);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dtype);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor mean_dim_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::mean_dim::call(self, dim, keepdim, dtype);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, keepdim, dtype);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor mean_names_dim_generated_plumbing(const at::Tensor & self, at::DimnameList dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::mean_names_dim::call(self, dim, keepdim, dtype);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, keepdim, dtype);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor nanmean_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::nanmean::call(self, dim, keepdim, dtype);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, keepdim, dtype);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor median_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::median::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> median_dim_generated_plumbing(const at::Tensor & self, int64_t dim, bool keepdim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::median_dim::call(self, dim, keepdim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, keepdim);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> median_names_dim_generated_plumbing(const at::Tensor & self, at::Dimname dim, bool keepdim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::median_names_dim::call(self, dim, keepdim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, keepdim);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor nanmedian_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::nanmedian::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> nanmedian_dim_generated_plumbing(const at::Tensor & self, int64_t dim, bool keepdim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::nanmedian_dim::call(self, dim, keepdim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, keepdim);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> nanmedian_names_dim_generated_plumbing(const at::Tensor & self, at::Dimname dim, bool keepdim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::nanmedian_names_dim::call(self, dim, keepdim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, keepdim);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> min_dim_generated_plumbing(const at::Tensor & self, int64_t dim, bool keepdim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::min_dim::call(self, dim, keepdim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, keepdim);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> min_names_dim_generated_plumbing(const at::Tensor & self, at::Dimname dim, bool keepdim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::min_names_dim::call(self, dim, keepdim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, keepdim);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor amin_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim, bool keepdim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::amin::call(self, dim, keepdim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, keepdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _mps_convolution_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
|
|
return at::_ops::_mps_convolution::call(self, weight, bias, padding, stride, dilation, groups);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
|
|
optional<Tensor> bias_value;
|
|
optional<int64_t> bias_bdim;
|
|
if (bias) {
|
|
std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, bias_value, bias_bdim, padding, stride, dilation, groups);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor,at::Tensor> mps_convolution_backward_generated_plumbing(const at::Tensor & self, const at::Tensor & grad_output, const at::Tensor & weight, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, ::std::array<bool,3> output_mask) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
|
|
return at::_ops::mps_convolution_backward::call(self, grad_output, weight, padding, stride, dilation, groups, output_mask);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor grad_output_value;
|
|
optional<int64_t> grad_output_bdim;
|
|
std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
|
|
Tensor weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, grad_output_value, grad_output_bdim, weight_value, weight_bdim, padding, stride, dilation, groups, output_mask);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor mkldnn_convolution_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
|
|
return at::_ops::mkldnn_convolution::call(self, weight, bias, padding, stride, dilation, groups);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
|
|
optional<Tensor> bias_value;
|
|
optional<int64_t> bias_bdim;
|
|
if (bias) {
|
|
std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, bias_value, bias_bdim, padding, stride, dilation, groups);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> mkldnn_rnn_layer_generated_plumbing(const at::Tensor & input, const at::Tensor & weight0, const at::Tensor & weight1, const at::Tensor & weight2, const at::Tensor & weight3, const at::Tensor & hx_, const at::Tensor & cx_, bool reverse, at::IntArrayRef batch_sizes, int64_t mode, int64_t hidden_size, int64_t num_layers, bool has_biases, bool bidirectional, bool batch_first, bool train) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight0, cur_level) && !isBatchedAtLevel(weight1, cur_level) && !isBatchedAtLevel(weight2, cur_level) && !isBatchedAtLevel(weight3, cur_level) && !isBatchedAtLevel(hx_, cur_level) && !isBatchedAtLevel(cx_, cur_level)) {
|
|
return at::_ops::mkldnn_rnn_layer::call(input, weight0, weight1, weight2, weight3, hx_, cx_, reverse, batch_sizes, mode, hidden_size, num_layers, has_biases, bidirectional, batch_first, train);
|
|
}
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
Tensor weight0_value;
|
|
optional<int64_t> weight0_bdim;
|
|
std::tie(weight0_value, weight0_bdim) = unwrapTensorAtLevel(weight0, cur_level);
|
|
Tensor weight1_value;
|
|
optional<int64_t> weight1_bdim;
|
|
std::tie(weight1_value, weight1_bdim) = unwrapTensorAtLevel(weight1, cur_level);
|
|
Tensor weight2_value;
|
|
optional<int64_t> weight2_bdim;
|
|
std::tie(weight2_value, weight2_bdim) = unwrapTensorAtLevel(weight2, cur_level);
|
|
Tensor weight3_value;
|
|
optional<int64_t> weight3_bdim;
|
|
std::tie(weight3_value, weight3_bdim) = unwrapTensorAtLevel(weight3, cur_level);
|
|
Tensor hx__value;
|
|
optional<int64_t> hx__bdim;
|
|
std::tie(hx__value, hx__bdim) = unwrapTensorAtLevel(hx_, cur_level);
|
|
Tensor cx__value;
|
|
optional<int64_t> cx__bdim;
|
|
std::tie(cx__value, cx__bdim) = unwrapTensorAtLevel(cx_, cur_level);
|
|
auto results = batch_rule(input_value, input_bdim, weight0_value, weight0_bdim, weight1_value, weight1_bdim, weight2_value, weight2_bdim, weight3_value, weight3_bdim, hx__value, hx__bdim, cx__value, cx__bdim, reverse, batch_sizes, mode, hidden_size, num_layers, has_biases, bidirectional, batch_first, train);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> mkldnn_rnn_layer_backward_generated_plumbing(const at::Tensor & input, const at::Tensor & weight1, const at::Tensor & weight2, const at::Tensor & weight3, const at::Tensor & weight4, const at::Tensor & hx_, const at::Tensor & cx_tmp, const at::Tensor & output, const at::Tensor & hy_, const at::Tensor & cy_, const c10::optional<at::Tensor> & grad_output, const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, bool reverse, int64_t mode, int64_t hidden_size, int64_t num_layers, bool has_biases, bool train, bool bidirectional, at::IntArrayRef batch_sizes, bool batch_first, const at::Tensor & workspace) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight1, cur_level) && !isBatchedAtLevel(weight2, cur_level) && !isBatchedAtLevel(weight3, cur_level) && !isBatchedAtLevel(weight4, cur_level) && !isBatchedAtLevel(hx_, cur_level) && !isBatchedAtLevel(cx_tmp, cur_level) && !isBatchedAtLevel(output, cur_level) && !isBatchedAtLevel(hy_, cur_level) && !isBatchedAtLevel(cy_, cur_level) && !isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(grad_hy, cur_level) && !isBatchedAtLevel(grad_cy, cur_level) && !isBatchedAtLevel(workspace, cur_level)) {
|
|
return at::_ops::mkldnn_rnn_layer_backward::call(input, weight1, weight2, weight3, weight4, hx_, cx_tmp, output, hy_, cy_, grad_output, grad_hy, grad_cy, reverse, mode, hidden_size, num_layers, has_biases, train, bidirectional, batch_sizes, batch_first, workspace);
|
|
}
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
Tensor weight1_value;
|
|
optional<int64_t> weight1_bdim;
|
|
std::tie(weight1_value, weight1_bdim) = unwrapTensorAtLevel(weight1, cur_level);
|
|
Tensor weight2_value;
|
|
optional<int64_t> weight2_bdim;
|
|
std::tie(weight2_value, weight2_bdim) = unwrapTensorAtLevel(weight2, cur_level);
|
|
Tensor weight3_value;
|
|
optional<int64_t> weight3_bdim;
|
|
std::tie(weight3_value, weight3_bdim) = unwrapTensorAtLevel(weight3, cur_level);
|
|
Tensor weight4_value;
|
|
optional<int64_t> weight4_bdim;
|
|
std::tie(weight4_value, weight4_bdim) = unwrapTensorAtLevel(weight4, cur_level);
|
|
Tensor hx__value;
|
|
optional<int64_t> hx__bdim;
|
|
std::tie(hx__value, hx__bdim) = unwrapTensorAtLevel(hx_, cur_level);
|
|
Tensor cx_tmp_value;
|
|
optional<int64_t> cx_tmp_bdim;
|
|
std::tie(cx_tmp_value, cx_tmp_bdim) = unwrapTensorAtLevel(cx_tmp, cur_level);
|
|
Tensor output_value;
|
|
optional<int64_t> output_bdim;
|
|
std::tie(output_value, output_bdim) = unwrapTensorAtLevel(output, cur_level);
|
|
Tensor hy__value;
|
|
optional<int64_t> hy__bdim;
|
|
std::tie(hy__value, hy__bdim) = unwrapTensorAtLevel(hy_, cur_level);
|
|
Tensor cy__value;
|
|
optional<int64_t> cy__bdim;
|
|
std::tie(cy__value, cy__bdim) = unwrapTensorAtLevel(cy_, cur_level);
|
|
Tensor workspace_value;
|
|
optional<int64_t> workspace_bdim;
|
|
std::tie(workspace_value, workspace_bdim) = unwrapTensorAtLevel(workspace, cur_level);
|
|
optional<Tensor> grad_output_value;
|
|
optional<int64_t> grad_output_bdim;
|
|
if (grad_output) {
|
|
std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output.value(), cur_level);
|
|
}
|
|
optional<Tensor> grad_hy_value;
|
|
optional<int64_t> grad_hy_bdim;
|
|
if (grad_hy) {
|
|
std::tie(grad_hy_value, grad_hy_bdim) = unwrapTensorAtLevel(grad_hy.value(), cur_level);
|
|
}
|
|
optional<Tensor> grad_cy_value;
|
|
optional<int64_t> grad_cy_bdim;
|
|
if (grad_cy) {
|
|
std::tie(grad_cy_value, grad_cy_bdim) = unwrapTensorAtLevel(grad_cy.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(input_value, input_bdim, weight1_value, weight1_bdim, weight2_value, weight2_bdim, weight3_value, weight3_bdim, weight4_value, weight4_bdim, hx__value, hx__bdim, cx_tmp_value, cx_tmp_bdim, output_value, output_bdim, hy__value, hy__bdim, cy__value, cy__bdim, grad_output_value, grad_output_bdim, grad_hy_value, grad_hy_bdim, grad_cy_value, grad_cy_bdim, reverse, mode, hidden_size, num_layers, has_biases, train, bidirectional, batch_sizes, batch_first, workspace_value, workspace_bdim);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level), makeBatched(std::get<8>(results), std::get<9>(results), cur_level), makeBatched(std::get<10>(results), std::get<11>(results), cur_level), makeBatched(std::get<12>(results), std::get<13>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor,at::Tensor> miopen_batch_norm_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, bool training, double exponential_average_factor, double epsilon) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level) && !isBatchedAtLevel(running_mean, cur_level) && !isBatchedAtLevel(running_var, cur_level)) {
|
|
return at::_ops::miopen_batch_norm::call(input, weight, bias, running_mean, running_var, training, exponential_average_factor, epsilon);
|
|
}
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
Tensor weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
|
|
optional<Tensor> bias_value;
|
|
optional<int64_t> bias_bdim;
|
|
if (bias) {
|
|
std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
|
|
}
|
|
optional<Tensor> running_mean_value;
|
|
optional<int64_t> running_mean_bdim;
|
|
if (running_mean) {
|
|
std::tie(running_mean_value, running_mean_bdim) = unwrapTensorAtLevel(running_mean.value(), cur_level);
|
|
}
|
|
optional<Tensor> running_var_value;
|
|
optional<int64_t> running_var_bdim;
|
|
if (running_var) {
|
|
std::tie(running_var_value, running_var_bdim) = unwrapTensorAtLevel(running_var.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, running_mean_value, running_mean_bdim, running_var_value, running_var_bdim, training, exponential_average_factor, epsilon);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor,at::Tensor> miopen_batch_norm_backward_generated_plumbing(const at::Tensor & input, const at::Tensor & grad_output, const at::Tensor & weight, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, const c10::optional<at::Tensor> & save_mean, const c10::optional<at::Tensor> & save_var, double epsilon) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(running_mean, cur_level) && !isBatchedAtLevel(running_var, cur_level) && !isBatchedAtLevel(save_mean, cur_level) && !isBatchedAtLevel(save_var, cur_level)) {
|
|
return at::_ops::miopen_batch_norm_backward::call(input, grad_output, weight, running_mean, running_var, save_mean, save_var, epsilon);
|
|
}
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
Tensor grad_output_value;
|
|
optional<int64_t> grad_output_bdim;
|
|
std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
|
|
Tensor weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
|
|
optional<Tensor> running_mean_value;
|
|
optional<int64_t> running_mean_bdim;
|
|
if (running_mean) {
|
|
std::tie(running_mean_value, running_mean_bdim) = unwrapTensorAtLevel(running_mean.value(), cur_level);
|
|
}
|
|
optional<Tensor> running_var_value;
|
|
optional<int64_t> running_var_bdim;
|
|
if (running_var) {
|
|
std::tie(running_var_value, running_var_bdim) = unwrapTensorAtLevel(running_var.value(), cur_level);
|
|
}
|
|
optional<Tensor> save_mean_value;
|
|
optional<int64_t> save_mean_bdim;
|
|
if (save_mean) {
|
|
std::tie(save_mean_value, save_mean_bdim) = unwrapTensorAtLevel(save_mean.value(), cur_level);
|
|
}
|
|
optional<Tensor> save_var_value;
|
|
optional<int64_t> save_var_bdim;
|
|
if (save_var) {
|
|
std::tie(save_var_value, save_var_bdim) = unwrapTensorAtLevel(save_var.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(input_value, input_bdim, grad_output_value, grad_output_bdim, weight_value, weight_bdim, running_mean_value, running_mean_bdim, running_var_value, running_var_bdim, save_mean_value, save_mean_bdim, save_var_value, save_var_bdim, epsilon);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor miopen_convolution_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
|
|
return at::_ops::miopen_convolution::call(self, weight, bias, padding, stride, dilation, groups, benchmark, deterministic);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
|
|
optional<Tensor> bias_value;
|
|
optional<int64_t> bias_bdim;
|
|
if (bias) {
|
|
std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, bias_value, bias_bdim, padding, stride, dilation, groups, benchmark, deterministic);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor miopen_convolution_transpose_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
|
|
return at::_ops::miopen_convolution_transpose::call(self, weight, bias, padding, output_padding, stride, dilation, groups, benchmark, deterministic);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
|
|
optional<Tensor> bias_value;
|
|
optional<int64_t> bias_bdim;
|
|
if (bias) {
|
|
std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, bias_value, bias_bdim, padding, output_padding, stride, dilation, groups, benchmark, deterministic);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor miopen_depthwise_convolution_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
|
|
return at::_ops::miopen_depthwise_convolution::call(self, weight, bias, padding, stride, dilation, groups, benchmark, deterministic);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
|
|
optional<Tensor> bias_value;
|
|
optional<int64_t> bias_bdim;
|
|
if (bias) {
|
|
std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, bias_value, bias_bdim, padding, stride, dilation, groups, benchmark, deterministic);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor miopen_convolution_relu_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, c10::SymInt groups) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
|
|
return at::_ops::miopen_convolution_relu::call(self, weight, bias, stride, padding, dilation, groups);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
|
|
optional<Tensor> bias_value;
|
|
optional<int64_t> bias_bdim;
|
|
if (bias) {
|
|
std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, bias_value, bias_bdim, stride, padding, dilation, groups);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor miopen_convolution_add_relu_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, const at::Tensor & z, const c10::optional<at::Scalar> & alpha, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, c10::SymInt groups) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(z, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
|
|
return at::_ops::miopen_convolution_add_relu::call(self, weight, z, alpha, bias, stride, padding, dilation, groups);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
|
|
Tensor z_value;
|
|
optional<int64_t> z_bdim;
|
|
std::tie(z_value, z_bdim) = unwrapTensorAtLevel(z, cur_level);
|
|
optional<Tensor> bias_value;
|
|
optional<int64_t> bias_bdim;
|
|
if (bias) {
|
|
std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, z_value, z_bdim, alpha, bias_value, bias_bdim, stride, padding, dilation, groups);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> miopen_rnn_generated_plumbing(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & hx, const c10::optional<at::Tensor> & cx, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional<at::Tensor> & dropout_state) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(cx, cur_level) && !isBatchedAtLevel(dropout_state, cur_level)) {
|
|
return at::_ops::miopen_rnn::call(input, weight, weight_stride0, hx, cx, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state);
|
|
}
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
Tensor hx_value;
|
|
optional<int64_t> hx_bdim;
|
|
std::tie(hx_value, hx_bdim) = unwrapTensorAtLevel(hx, cur_level);
|
|
optional<Tensor> cx_value;
|
|
optional<int64_t> cx_bdim;
|
|
if (cx) {
|
|
std::tie(cx_value, cx_bdim) = unwrapTensorAtLevel(cx.value(), cur_level);
|
|
}
|
|
optional<Tensor> dropout_state_value;
|
|
optional<int64_t> dropout_state_bdim;
|
|
if (dropout_state) {
|
|
std::tie(dropout_state_value, dropout_state_bdim) = unwrapTensorAtLevel(dropout_state.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(input_value, input_bdim, weight, weight_stride0, hx_value, hx_bdim, cx_value, cx_bdim, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state_value, dropout_state_bdim);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level), makeBatched(std::get<8>(results), std::get<9>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor,at::Tensor,::std::vector<at::Tensor>> miopen_rnn_backward_generated_plumbing(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional<at::Tensor> & cx, const at::Tensor & output, const c10::optional<at::Tensor> & grad_output, const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional<at::Tensor> & dropout_state, const at::Tensor & reserve, ::std::array<bool,4> output_mask) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(weight_buf, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(cx, cur_level) && !isBatchedAtLevel(output, cur_level) && !isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(grad_hy, cur_level) && !isBatchedAtLevel(grad_cy, cur_level) && !isBatchedAtLevel(dropout_state, cur_level) && !isBatchedAtLevel(reserve, cur_level)) {
|
|
return at::_ops::miopen_rnn_backward::call(input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, reserve, output_mask);
|
|
}
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
Tensor weight_buf_value;
|
|
optional<int64_t> weight_buf_bdim;
|
|
std::tie(weight_buf_value, weight_buf_bdim) = unwrapTensorAtLevel(weight_buf, cur_level);
|
|
Tensor hx_value;
|
|
optional<int64_t> hx_bdim;
|
|
std::tie(hx_value, hx_bdim) = unwrapTensorAtLevel(hx, cur_level);
|
|
Tensor output_value;
|
|
optional<int64_t> output_bdim;
|
|
std::tie(output_value, output_bdim) = unwrapTensorAtLevel(output, cur_level);
|
|
Tensor reserve_value;
|
|
optional<int64_t> reserve_bdim;
|
|
std::tie(reserve_value, reserve_bdim) = unwrapTensorAtLevel(reserve, cur_level);
|
|
optional<Tensor> cx_value;
|
|
optional<int64_t> cx_bdim;
|
|
if (cx) {
|
|
std::tie(cx_value, cx_bdim) = unwrapTensorAtLevel(cx.value(), cur_level);
|
|
}
|
|
optional<Tensor> grad_output_value;
|
|
optional<int64_t> grad_output_bdim;
|
|
if (grad_output) {
|
|
std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output.value(), cur_level);
|
|
}
|
|
optional<Tensor> grad_hy_value;
|
|
optional<int64_t> grad_hy_bdim;
|
|
if (grad_hy) {
|
|
std::tie(grad_hy_value, grad_hy_bdim) = unwrapTensorAtLevel(grad_hy.value(), cur_level);
|
|
}
|
|
optional<Tensor> grad_cy_value;
|
|
optional<int64_t> grad_cy_bdim;
|
|
if (grad_cy) {
|
|
std::tie(grad_cy_value, grad_cy_bdim) = unwrapTensorAtLevel(grad_cy.value(), cur_level);
|
|
}
|
|
optional<Tensor> dropout_state_value;
|
|
optional<int64_t> dropout_state_bdim;
|
|
if (dropout_state) {
|
|
std::tie(dropout_state_value, dropout_state_bdim) = unwrapTensorAtLevel(dropout_state.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(input_value, input_bdim, weight, weight_stride0, weight_buf_value, weight_buf_bdim, hx_value, hx_bdim, cx_value, cx_bdim, output_value, output_bdim, grad_output_value, grad_output_bdim, grad_hy_value, grad_hy_bdim, grad_cy_value, grad_cy_bdim, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state_value, dropout_state_bdim, reserve_value, reserve_bdim, output_mask);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatchedVector(std::get<6>(results), std::get<7>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor mm_generated_plumbing(const at::Tensor & self, const at::Tensor & mat2) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mat2, cur_level)) {
|
|
return at::_ops::mm::call(self, mat2);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor mat2_value;
|
|
optional<int64_t> mat2_bdim;
|
|
std::tie(mat2_value, mat2_bdim) = unwrapTensorAtLevel(mat2, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, mat2_value, mat2_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _int_mm_generated_plumbing(const at::Tensor & self, const at::Tensor & mat2) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mat2, cur_level)) {
|
|
return at::_ops::_int_mm::call(self, mat2);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor mat2_value;
|
|
optional<int64_t> mat2_bdim;
|
|
std::tie(mat2_value, mat2_bdim) = unwrapTensorAtLevel(mat2, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, mat2_value, mat2_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _convert_weight_to_int4pack_generated_plumbing(const at::Tensor & self, int64_t innerKTiles) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_convert_weight_to_int4pack::call(self, innerKTiles);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, innerKTiles);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _weight_int4pack_mm_generated_plumbing(const at::Tensor & self, const at::Tensor & mat2, int64_t qGroupSize, const at::Tensor & qScaleAndZeros) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mat2, cur_level) && !isBatchedAtLevel(qScaleAndZeros, cur_level)) {
|
|
return at::_ops::_weight_int4pack_mm::call(self, mat2, qGroupSize, qScaleAndZeros);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor mat2_value;
|
|
optional<int64_t> mat2_bdim;
|
|
std::tie(mat2_value, mat2_bdim) = unwrapTensorAtLevel(mat2, cur_level);
|
|
Tensor qScaleAndZeros_value;
|
|
optional<int64_t> qScaleAndZeros_bdim;
|
|
std::tie(qScaleAndZeros_value, qScaleAndZeros_bdim) = unwrapTensorAtLevel(qScaleAndZeros, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, mat2_value, mat2_bdim, qGroupSize, qScaleAndZeros_value, qScaleAndZeros_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _weight_int8pack_mm_generated_plumbing(const at::Tensor & self, const at::Tensor & mat2, const at::Tensor & scales) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mat2, cur_level) && !isBatchedAtLevel(scales, cur_level)) {
|
|
return at::_ops::_weight_int8pack_mm::call(self, mat2, scales);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor mat2_value;
|
|
optional<int64_t> mat2_bdim;
|
|
std::tie(mat2_value, mat2_bdim) = unwrapTensorAtLevel(mat2, cur_level);
|
|
Tensor scales_value;
|
|
optional<int64_t> scales_bdim;
|
|
std::tie(scales_value, scales_bdim) = unwrapTensorAtLevel(scales, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, mat2_value, mat2_bdim, scales_value, scales_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _sparse_mm_generated_plumbing(const at::Tensor & sparse, const at::Tensor & dense) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(sparse, cur_level) && !isBatchedAtLevel(dense, cur_level)) {
|
|
return at::_ops::_sparse_mm::call(sparse, dense);
|
|
}
|
|
Tensor sparse_value;
|
|
optional<int64_t> sparse_bdim;
|
|
std::tie(sparse_value, sparse_bdim) = unwrapTensorAtLevel(sparse, cur_level);
|
|
Tensor dense_value;
|
|
optional<int64_t> dense_bdim;
|
|
std::tie(dense_value, dense_bdim) = unwrapTensorAtLevel(dense, cur_level);
|
|
auto results = batch_rule(sparse_value, sparse_bdim, dense_value, dense_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _sparse_mm_reduce_generated_plumbing(const at::Tensor & sparse, const at::Tensor & dense, c10::string_view reduce) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(sparse, cur_level) && !isBatchedAtLevel(dense, cur_level)) {
|
|
return at::_ops::_sparse_mm_reduce::call(sparse, dense, reduce);
|
|
}
|
|
Tensor sparse_value;
|
|
optional<int64_t> sparse_bdim;
|
|
std::tie(sparse_value, sparse_bdim) = unwrapTensorAtLevel(sparse, cur_level);
|
|
Tensor dense_value;
|
|
optional<int64_t> dense_bdim;
|
|
std::tie(dense_value, dense_bdim) = unwrapTensorAtLevel(dense, cur_level);
|
|
auto results = batch_rule(sparse_value, sparse_bdim, dense_value, dense_bdim, reduce);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _sparse_sparse_matmul_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::_sparse_sparse_matmul::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> mode_generated_plumbing(const at::Tensor & self, int64_t dim, bool keepdim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::mode::call(self, dim, keepdim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, keepdim);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> mode_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, bool keepdim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::mode_dimname::call(self, dim, keepdim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, keepdim);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor mul_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::mul_Tensor::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & mul__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::mul__Tensor::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor mul_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::mul_Scalar::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & mul__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::mul__Scalar::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim, other);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor multiply_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::multiply_Tensor::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & multiply__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::multiply__Tensor::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor multiply_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::multiply_Scalar::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & multiply__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::multiply__Scalar::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim, other);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor mv_generated_plumbing(const at::Tensor & self, const at::Tensor & vec) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(vec, cur_level)) {
|
|
return at::_ops::mv::call(self, vec);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor vec_value;
|
|
optional<int64_t> vec_bdim;
|
|
std::tie(vec_value, vec_bdim) = unwrapTensorAtLevel(vec, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, vec_value, vec_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor mvlgamma_generated_plumbing(const at::Tensor & self, int64_t p) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::mvlgamma::call(self, p);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, p);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & mvlgamma__generated_plumbing(at::Tensor & self, int64_t p) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::mvlgamma_::call(self, p);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim, p);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor narrow_copy_generated_plumbing(const at::Tensor & self, int64_t dim, c10::SymInt start, c10::SymInt length) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::narrow_copy::call(self, dim, start, length);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, start, length);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor narrow_generated_plumbing(const at::Tensor & self, int64_t dim, c10::SymInt start, c10::SymInt length) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::narrow::call(self, dim, start, length);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, start, length);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor narrow_Tensor_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & start, c10::SymInt length) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(start, cur_level)) {
|
|
return at::_ops::narrow_Tensor::call(self, dim, start, length);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor start_value;
|
|
optional<int64_t> start_bdim;
|
|
std::tie(start_value, start_bdim) = unwrapTensorAtLevel(start, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, start_value, start_bdim, length);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor,at::Tensor> native_batch_norm_generated_plumbing(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, bool training, double momentum, double eps) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level) && !isBatchedAtLevel(running_mean, cur_level) && !isBatchedAtLevel(running_var, cur_level)) {
|
|
return at::_ops::native_batch_norm::call(input, weight, bias, running_mean, running_var, training, momentum, eps);
|
|
}
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
optional<Tensor> weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
if (weight) {
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
|
|
}
|
|
optional<Tensor> bias_value;
|
|
optional<int64_t> bias_bdim;
|
|
if (bias) {
|
|
std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
|
|
}
|
|
optional<Tensor> running_mean_value;
|
|
optional<int64_t> running_mean_bdim;
|
|
if (running_mean) {
|
|
std::tie(running_mean_value, running_mean_bdim) = unwrapTensorAtLevel(running_mean.value(), cur_level);
|
|
}
|
|
optional<Tensor> running_var_value;
|
|
optional<int64_t> running_var_bdim;
|
|
if (running_var) {
|
|
std::tie(running_var_value, running_var_bdim) = unwrapTensorAtLevel(running_var.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, running_mean_value, running_mean_bdim, running_var_value, running_var_bdim, training, momentum, eps);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor,at::Tensor> _native_batch_norm_legit_no_training_generated_plumbing(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, const at::Tensor & running_mean, const at::Tensor & running_var, double momentum, double eps) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level) && !isBatchedAtLevel(running_mean, cur_level) && !isBatchedAtLevel(running_var, cur_level)) {
|
|
return at::_ops::_native_batch_norm_legit_no_training::call(input, weight, bias, running_mean, running_var, momentum, eps);
|
|
}
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
Tensor running_mean_value;
|
|
optional<int64_t> running_mean_bdim;
|
|
std::tie(running_mean_value, running_mean_bdim) = unwrapTensorAtLevel(running_mean, cur_level);
|
|
Tensor running_var_value;
|
|
optional<int64_t> running_var_bdim;
|
|
std::tie(running_var_value, running_var_bdim) = unwrapTensorAtLevel(running_var, cur_level);
|
|
optional<Tensor> weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
if (weight) {
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
|
|
}
|
|
optional<Tensor> bias_value;
|
|
optional<int64_t> bias_bdim;
|
|
if (bias) {
|
|
std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, running_mean_value, running_mean_bdim, running_var_value, running_var_bdim, momentum, eps);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor,at::Tensor> _native_batch_norm_legit_no_stats_generated_plumbing(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, bool training, double momentum, double eps) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
|
|
return at::_ops::_native_batch_norm_legit_no_stats::call(input, weight, bias, training, momentum, eps);
|
|
}
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
optional<Tensor> weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
if (weight) {
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
|
|
}
|
|
optional<Tensor> bias_value;
|
|
optional<int64_t> bias_bdim;
|
|
if (bias) {
|
|
std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, training, momentum, eps);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> batch_norm_stats_generated_plumbing(const at::Tensor & input, double eps) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(input, cur_level)) {
|
|
return at::_ops::batch_norm_stats::call(input, eps);
|
|
}
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
auto results = batch_rule(input_value, input_bdim, eps);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor batch_norm_elemt_generated_plumbing(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, const at::Tensor & mean, const at::Tensor & invstd, double eps) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level) && !isBatchedAtLevel(mean, cur_level) && !isBatchedAtLevel(invstd, cur_level)) {
|
|
return at::_ops::batch_norm_elemt::call(input, weight, bias, mean, invstd, eps);
|
|
}
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
Tensor mean_value;
|
|
optional<int64_t> mean_bdim;
|
|
std::tie(mean_value, mean_bdim) = unwrapTensorAtLevel(mean, cur_level);
|
|
Tensor invstd_value;
|
|
optional<int64_t> invstd_bdim;
|
|
std::tie(invstd_value, invstd_bdim) = unwrapTensorAtLevel(invstd, cur_level);
|
|
optional<Tensor> weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
if (weight) {
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
|
|
}
|
|
optional<Tensor> bias_value;
|
|
optional<int64_t> bias_bdim;
|
|
if (bias) {
|
|
std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, mean_value, mean_bdim, invstd_value, invstd_bdim, eps);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> batch_norm_gather_stats_generated_plumbing(const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, double momentum, double eps, int64_t count) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(mean, cur_level) && !isBatchedAtLevel(invstd, cur_level) && !isBatchedAtLevel(running_mean, cur_level) && !isBatchedAtLevel(running_var, cur_level)) {
|
|
return at::_ops::batch_norm_gather_stats::call(input, mean, invstd, running_mean, running_var, momentum, eps, count);
|
|
}
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
Tensor mean_value;
|
|
optional<int64_t> mean_bdim;
|
|
std::tie(mean_value, mean_bdim) = unwrapTensorAtLevel(mean, cur_level);
|
|
Tensor invstd_value;
|
|
optional<int64_t> invstd_bdim;
|
|
std::tie(invstd_value, invstd_bdim) = unwrapTensorAtLevel(invstd, cur_level);
|
|
optional<Tensor> running_mean_value;
|
|
optional<int64_t> running_mean_bdim;
|
|
if (running_mean) {
|
|
std::tie(running_mean_value, running_mean_bdim) = unwrapTensorAtLevel(running_mean.value(), cur_level);
|
|
}
|
|
optional<Tensor> running_var_value;
|
|
optional<int64_t> running_var_bdim;
|
|
if (running_var) {
|
|
std::tie(running_var_value, running_var_bdim) = unwrapTensorAtLevel(running_var.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(input_value, input_bdim, mean_value, mean_bdim, invstd_value, invstd_bdim, running_mean_value, running_mean_bdim, running_var_value, running_var_bdim, momentum, eps, count);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> batch_norm_gather_stats_with_counts_generated_plumbing(const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, double momentum, double eps, const at::Tensor & counts) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(mean, cur_level) && !isBatchedAtLevel(invstd, cur_level) && !isBatchedAtLevel(running_mean, cur_level) && !isBatchedAtLevel(running_var, cur_level) && !isBatchedAtLevel(counts, cur_level)) {
|
|
return at::_ops::batch_norm_gather_stats_with_counts::call(input, mean, invstd, running_mean, running_var, momentum, eps, counts);
|
|
}
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
Tensor mean_value;
|
|
optional<int64_t> mean_bdim;
|
|
std::tie(mean_value, mean_bdim) = unwrapTensorAtLevel(mean, cur_level);
|
|
Tensor invstd_value;
|
|
optional<int64_t> invstd_bdim;
|
|
std::tie(invstd_value, invstd_bdim) = unwrapTensorAtLevel(invstd, cur_level);
|
|
Tensor counts_value;
|
|
optional<int64_t> counts_bdim;
|
|
std::tie(counts_value, counts_bdim) = unwrapTensorAtLevel(counts, cur_level);
|
|
optional<Tensor> running_mean_value;
|
|
optional<int64_t> running_mean_bdim;
|
|
if (running_mean) {
|
|
std::tie(running_mean_value, running_mean_bdim) = unwrapTensorAtLevel(running_mean.value(), cur_level);
|
|
}
|
|
optional<Tensor> running_var_value;
|
|
optional<int64_t> running_var_bdim;
|
|
if (running_var) {
|
|
std::tie(running_var_value, running_var_bdim) = unwrapTensorAtLevel(running_var.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(input_value, input_bdim, mean_value, mean_bdim, invstd_value, invstd_bdim, running_mean_value, running_mean_bdim, running_var_value, running_var_bdim, momentum, eps, counts_value, counts_bdim);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor,at::Tensor> native_batch_norm_backward_generated_plumbing(const at::Tensor & grad_out, const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, const c10::optional<at::Tensor> & save_mean, const c10::optional<at::Tensor> & save_invstd, bool train, double eps, ::std::array<bool,3> output_mask) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad_out, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(running_mean, cur_level) && !isBatchedAtLevel(running_var, cur_level) && !isBatchedAtLevel(save_mean, cur_level) && !isBatchedAtLevel(save_invstd, cur_level)) {
|
|
return at::_ops::native_batch_norm_backward::call(grad_out, input, weight, running_mean, running_var, save_mean, save_invstd, train, eps, output_mask);
|
|
}
|
|
Tensor grad_out_value;
|
|
optional<int64_t> grad_out_bdim;
|
|
std::tie(grad_out_value, grad_out_bdim) = unwrapTensorAtLevel(grad_out, cur_level);
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
optional<Tensor> weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
if (weight) {
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
|
|
}
|
|
optional<Tensor> running_mean_value;
|
|
optional<int64_t> running_mean_bdim;
|
|
if (running_mean) {
|
|
std::tie(running_mean_value, running_mean_bdim) = unwrapTensorAtLevel(running_mean.value(), cur_level);
|
|
}
|
|
optional<Tensor> running_var_value;
|
|
optional<int64_t> running_var_bdim;
|
|
if (running_var) {
|
|
std::tie(running_var_value, running_var_bdim) = unwrapTensorAtLevel(running_var.value(), cur_level);
|
|
}
|
|
optional<Tensor> save_mean_value;
|
|
optional<int64_t> save_mean_bdim;
|
|
if (save_mean) {
|
|
std::tie(save_mean_value, save_mean_bdim) = unwrapTensorAtLevel(save_mean.value(), cur_level);
|
|
}
|
|
optional<Tensor> save_invstd_value;
|
|
optional<int64_t> save_invstd_bdim;
|
|
if (save_invstd) {
|
|
std::tie(save_invstd_value, save_invstd_bdim) = unwrapTensorAtLevel(save_invstd.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(grad_out_value, grad_out_bdim, input_value, input_bdim, weight_value, weight_bdim, running_mean_value, running_mean_bdim, running_var_value, running_var_bdim, save_mean_value, save_mean_bdim, save_invstd_value, save_invstd_bdim, train, eps, output_mask);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> batch_norm_backward_reduce_generated_plumbing(const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional<at::Tensor> & weight, bool input_g, bool weight_g, bool bias_g) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad_out, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(mean, cur_level) && !isBatchedAtLevel(invstd, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
|
|
return at::_ops::batch_norm_backward_reduce::call(grad_out, input, mean, invstd, weight, input_g, weight_g, bias_g);
|
|
}
|
|
Tensor grad_out_value;
|
|
optional<int64_t> grad_out_bdim;
|
|
std::tie(grad_out_value, grad_out_bdim) = unwrapTensorAtLevel(grad_out, cur_level);
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
Tensor mean_value;
|
|
optional<int64_t> mean_bdim;
|
|
std::tie(mean_value, mean_bdim) = unwrapTensorAtLevel(mean, cur_level);
|
|
Tensor invstd_value;
|
|
optional<int64_t> invstd_bdim;
|
|
std::tie(invstd_value, invstd_bdim) = unwrapTensorAtLevel(invstd, cur_level);
|
|
optional<Tensor> weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
if (weight) {
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(grad_out_value, grad_out_bdim, input_value, input_bdim, mean_value, mean_bdim, invstd_value, invstd_bdim, weight_value, weight_bdim, input_g, weight_g, bias_g);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor batch_norm_backward_elemt_generated_plumbing(const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional<at::Tensor> & weight, const at::Tensor & sum_dy, const at::Tensor & sum_dy_xmu, const at::Tensor & count) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad_out, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(mean, cur_level) && !isBatchedAtLevel(invstd, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(sum_dy, cur_level) && !isBatchedAtLevel(sum_dy_xmu, cur_level) && !isBatchedAtLevel(count, cur_level)) {
|
|
return at::_ops::batch_norm_backward_elemt::call(grad_out, input, mean, invstd, weight, sum_dy, sum_dy_xmu, count);
|
|
}
|
|
Tensor grad_out_value;
|
|
optional<int64_t> grad_out_bdim;
|
|
std::tie(grad_out_value, grad_out_bdim) = unwrapTensorAtLevel(grad_out, cur_level);
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
Tensor mean_value;
|
|
optional<int64_t> mean_bdim;
|
|
std::tie(mean_value, mean_bdim) = unwrapTensorAtLevel(mean, cur_level);
|
|
Tensor invstd_value;
|
|
optional<int64_t> invstd_bdim;
|
|
std::tie(invstd_value, invstd_bdim) = unwrapTensorAtLevel(invstd, cur_level);
|
|
Tensor sum_dy_value;
|
|
optional<int64_t> sum_dy_bdim;
|
|
std::tie(sum_dy_value, sum_dy_bdim) = unwrapTensorAtLevel(sum_dy, cur_level);
|
|
Tensor sum_dy_xmu_value;
|
|
optional<int64_t> sum_dy_xmu_bdim;
|
|
std::tie(sum_dy_xmu_value, sum_dy_xmu_bdim) = unwrapTensorAtLevel(sum_dy_xmu, cur_level);
|
|
Tensor count_value;
|
|
optional<int64_t> count_bdim;
|
|
std::tie(count_value, count_bdim) = unwrapTensorAtLevel(count, cur_level);
|
|
optional<Tensor> weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
if (weight) {
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(grad_out_value, grad_out_bdim, input_value, input_bdim, mean_value, mean_bdim, invstd_value, invstd_bdim, weight_value, weight_bdim, sum_dy_value, sum_dy_bdim, sum_dy_xmu_value, sum_dy_xmu_bdim, count_value, count_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> batch_norm_update_stats_generated_plumbing(const at::Tensor & input, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, double momentum) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(running_mean, cur_level) && !isBatchedAtLevel(running_var, cur_level)) {
|
|
return at::_ops::batch_norm_update_stats::call(input, running_mean, running_var, momentum);
|
|
}
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
optional<Tensor> running_mean_value;
|
|
optional<int64_t> running_mean_bdim;
|
|
if (running_mean) {
|
|
std::tie(running_mean_value, running_mean_bdim) = unwrapTensorAtLevel(running_mean.value(), cur_level);
|
|
}
|
|
optional<Tensor> running_var_value;
|
|
optional<int64_t> running_var_bdim;
|
|
if (running_var) {
|
|
std::tie(running_var_value, running_var_bdim) = unwrapTensorAtLevel(running_var.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(input_value, input_bdim, running_mean_value, running_mean_bdim, running_var_value, running_var_bdim, momentum);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _nnpack_spatial_convolution_generated_plumbing(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
|
|
return at::_ops::_nnpack_spatial_convolution::call(input, weight, bias, padding, stride);
|
|
}
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
Tensor weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
|
|
optional<Tensor> bias_value;
|
|
optional<int64_t> bias_bdim;
|
|
if (bias) {
|
|
std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, padding, stride);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor ones_like_generated_plumbing(const at::Tensor & self, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::ones_like::call(self, dtype, layout, device, pin_memory, memory_format);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dtype, layout, device, pin_memory, memory_format);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor pairwise_distance_generated_plumbing(const at::Tensor & x1, const at::Tensor & x2, double p, double eps, bool keepdim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(x1, cur_level) && !isBatchedAtLevel(x2, cur_level)) {
|
|
return at::_ops::pairwise_distance::call(x1, x2, p, eps, keepdim);
|
|
}
|
|
Tensor x1_value;
|
|
optional<int64_t> x1_bdim;
|
|
std::tie(x1_value, x1_bdim) = unwrapTensorAtLevel(x1, cur_level);
|
|
Tensor x2_value;
|
|
optional<int64_t> x2_bdim;
|
|
std::tie(x2_value, x2_bdim) = unwrapTensorAtLevel(x2, cur_level);
|
|
auto results = batch_rule(x1_value, x1_bdim, x2_value, x2_bdim, p, eps, keepdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor cdist_generated_plumbing(const at::Tensor & x1, const at::Tensor & x2, double p, c10::optional<int64_t> compute_mode) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(x1, cur_level) && !isBatchedAtLevel(x2, cur_level)) {
|
|
return at::_ops::cdist::call(x1, x2, p, compute_mode);
|
|
}
|
|
Tensor x1_value;
|
|
optional<int64_t> x1_bdim;
|
|
std::tie(x1_value, x1_bdim) = unwrapTensorAtLevel(x1, cur_level);
|
|
Tensor x2_value;
|
|
optional<int64_t> x2_bdim;
|
|
std::tie(x2_value, x2_bdim) = unwrapTensorAtLevel(x2, cur_level);
|
|
auto results = batch_rule(x1_value, x1_bdim, x2_value, x2_bdim, p, compute_mode);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _euclidean_dist_generated_plumbing(const at::Tensor & x1, const at::Tensor & x2) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(x1, cur_level) && !isBatchedAtLevel(x2, cur_level)) {
|
|
return at::_ops::_euclidean_dist::call(x1, x2);
|
|
}
|
|
Tensor x1_value;
|
|
optional<int64_t> x1_bdim;
|
|
std::tie(x1_value, x1_bdim) = unwrapTensorAtLevel(x1, cur_level);
|
|
Tensor x2_value;
|
|
optional<int64_t> x2_bdim;
|
|
std::tie(x2_value, x2_bdim) = unwrapTensorAtLevel(x2, cur_level);
|
|
auto results = batch_rule(x1_value, x1_bdim, x2_value, x2_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _cdist_forward_generated_plumbing(const at::Tensor & x1, const at::Tensor & x2, double p, c10::optional<int64_t> compute_mode) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(x1, cur_level) && !isBatchedAtLevel(x2, cur_level)) {
|
|
return at::_ops::_cdist_forward::call(x1, x2, p, compute_mode);
|
|
}
|
|
Tensor x1_value;
|
|
optional<int64_t> x1_bdim;
|
|
std::tie(x1_value, x1_bdim) = unwrapTensorAtLevel(x1, cur_level);
|
|
Tensor x2_value;
|
|
optional<int64_t> x2_bdim;
|
|
std::tie(x2_value, x2_bdim) = unwrapTensorAtLevel(x2, cur_level);
|
|
auto results = batch_rule(x1_value, x1_bdim, x2_value, x2_bdim, p, compute_mode);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _cdist_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & x1, const at::Tensor & x2, double p, const at::Tensor & cdist) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(x1, cur_level) && !isBatchedAtLevel(x2, cur_level) && !isBatchedAtLevel(cdist, cur_level)) {
|
|
return at::_ops::_cdist_backward::call(grad, x1, x2, p, cdist);
|
|
}
|
|
Tensor grad_value;
|
|
optional<int64_t> grad_bdim;
|
|
std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
|
|
Tensor x1_value;
|
|
optional<int64_t> x1_bdim;
|
|
std::tie(x1_value, x1_bdim) = unwrapTensorAtLevel(x1, cur_level);
|
|
Tensor x2_value;
|
|
optional<int64_t> x2_bdim;
|
|
std::tie(x2_value, x2_bdim) = unwrapTensorAtLevel(x2, cur_level);
|
|
Tensor cdist_value;
|
|
optional<int64_t> cdist_bdim;
|
|
std::tie(cdist_value, cdist_bdim) = unwrapTensorAtLevel(cdist, cur_level);
|
|
auto results = batch_rule(grad_value, grad_bdim, x1_value, x1_bdim, x2_value, x2_bdim, p, cdist_value, cdist_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor pdist_generated_plumbing(const at::Tensor & self, double p) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::pdist::call(self, p);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, p);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _pdist_forward_generated_plumbing(const at::Tensor & self, double p) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_pdist_forward::call(self, p);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, p);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _pdist_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & self, double p, const at::Tensor & pdist) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(pdist, cur_level)) {
|
|
return at::_ops::_pdist_backward::call(grad, self, p, pdist);
|
|
}
|
|
Tensor grad_value;
|
|
optional<int64_t> grad_bdim;
|
|
std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor pdist_value;
|
|
optional<int64_t> pdist_bdim;
|
|
std::tie(pdist_value, pdist_bdim) = unwrapTensorAtLevel(pdist, cur_level);
|
|
auto results = batch_rule(grad_value, grad_bdim, self_value, self_bdim, p, pdist_value, pdist_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor cosine_similarity_generated_plumbing(const at::Tensor & x1, const at::Tensor & x2, int64_t dim, double eps) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(x1, cur_level) && !isBatchedAtLevel(x2, cur_level)) {
|
|
return at::_ops::cosine_similarity::call(x1, x2, dim, eps);
|
|
}
|
|
Tensor x1_value;
|
|
optional<int64_t> x1_bdim;
|
|
std::tie(x1_value, x1_bdim) = unwrapTensorAtLevel(x1, cur_level);
|
|
Tensor x2_value;
|
|
optional<int64_t> x2_bdim;
|
|
std::tie(x2_value, x2_bdim) = unwrapTensorAtLevel(x2, cur_level);
|
|
auto results = batch_rule(x1_value, x1_bdim, x2_value, x2_bdim, dim, eps);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor permute_generated_plumbing(const at::Tensor & self, at::IntArrayRef dims) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::permute::call(self, dims);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dims);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor movedim_intlist_generated_plumbing(const at::Tensor & self, at::IntArrayRef source, at::IntArrayRef destination) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::movedim_intlist::call(self, source, destination);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, source, destination);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor movedim_int_generated_plumbing(const at::Tensor & self, int64_t source, int64_t destination) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::movedim_int::call(self, source, destination);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, source, destination);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor moveaxis_intlist_generated_plumbing(const at::Tensor & self, at::IntArrayRef source, at::IntArrayRef destination) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::moveaxis_intlist::call(self, source, destination);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, source, destination);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor moveaxis_int_generated_plumbing(const at::Tensor & self, int64_t source, int64_t destination) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::moveaxis_int::call(self, source, destination);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, source, destination);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor numpy_T_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::numpy_T::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor matrix_H_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::matrix_H::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor mT_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::mT::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor mH_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::mH::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor adjoint_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::adjoint::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor pixel_shuffle_generated_plumbing(const at::Tensor & self, int64_t upscale_factor) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::pixel_shuffle::call(self, upscale_factor);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, upscale_factor);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor pixel_unshuffle_generated_plumbing(const at::Tensor & self, int64_t downscale_factor) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::pixel_unshuffle::call(self, downscale_factor);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, downscale_factor);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor channel_shuffle_generated_plumbing(const at::Tensor & self, c10::SymInt groups) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::channel_shuffle::call(self, groups);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, groups);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor native_channel_shuffle_generated_plumbing(const at::Tensor & self, c10::SymInt groups) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::native_channel_shuffle::call(self, groups);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, groups);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor pin_memory_generated_plumbing(const at::Tensor & self, c10::optional<at::Device> device) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::pin_memory::call(self, device);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, device);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _pin_memory_generated_plumbing(const at::Tensor & self, c10::optional<at::Device> device) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_pin_memory::call(self, device);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, device);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor pinverse_generated_plumbing(const at::Tensor & self, double rcond) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::pinverse::call(self, rcond);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, rcond);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor poisson_nll_loss_generated_plumbing(const at::Tensor & input, const at::Tensor & target, bool log_input, bool full, double eps, int64_t reduction) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(target, cur_level)) {
|
|
return at::_ops::poisson_nll_loss::call(input, target, log_input, full, eps, reduction);
|
|
}
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
Tensor target_value;
|
|
optional<int64_t> target_bdim;
|
|
std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
|
|
auto results = batch_rule(input_value, input_bdim, target_value, target_bdim, log_input, full, eps, reduction);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor rad2deg_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::rad2deg::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & rad2deg__generated_plumbing(at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::rad2deg_::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor deg2rad_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::deg2rad::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & deg2rad__generated_plumbing(at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::deg2rad_::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor rand_like_generated_plumbing(const at::Tensor & self, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::rand_like::call(self, dtype, layout, device, pin_memory, memory_format);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dtype, layout, device, pin_memory, memory_format);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor randint_like_generated_plumbing(const at::Tensor & self, c10::SymInt high, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::randint_like::call(self, high, dtype, layout, device, pin_memory, memory_format);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, high, dtype, layout, device, pin_memory, memory_format);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor randint_like_low_dtype_generated_plumbing(const at::Tensor & self, c10::SymInt low, c10::SymInt high, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::randint_like_low_dtype::call(self, low, high, dtype, layout, device, pin_memory, memory_format);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, low, high, dtype, layout, device, pin_memory, memory_format);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor randn_like_generated_plumbing(const at::Tensor & self, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::randn_like::call(self, dtype, layout, device, pin_memory, memory_format);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dtype, layout, device, pin_memory, memory_format);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor ravel_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::ravel::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor reciprocal_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::reciprocal::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & reciprocal__generated_plumbing(at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::reciprocal_::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor neg_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::neg::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & neg__generated_plumbing(at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::neg_::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor negative_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::negative::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & negative__generated_plumbing(at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::negative_::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor repeat_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef repeats) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::repeat::call(self, repeats);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, repeats);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor repeat_interleave_Tensor_generated_plumbing(const at::Tensor & repeats, c10::optional<c10::SymInt> output_size) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(repeats, cur_level)) {
|
|
return at::_ops::repeat_interleave_Tensor::call(repeats, output_size);
|
|
}
|
|
Tensor repeats_value;
|
|
optional<int64_t> repeats_bdim;
|
|
std::tie(repeats_value, repeats_bdim) = unwrapTensorAtLevel(repeats, cur_level);
|
|
auto results = batch_rule(repeats_value, repeats_bdim, output_size);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor repeat_interleave_self_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & repeats, c10::optional<int64_t> dim, c10::optional<c10::SymInt> output_size) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(repeats, cur_level)) {
|
|
return at::_ops::repeat_interleave_self_Tensor::call(self, repeats, dim, output_size);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor repeats_value;
|
|
optional<int64_t> repeats_bdim;
|
|
std::tie(repeats_value, repeats_bdim) = unwrapTensorAtLevel(repeats, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, repeats_value, repeats_bdim, dim, output_size);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor repeat_interleave_self_int_generated_plumbing(const at::Tensor & self, c10::SymInt repeats, c10::optional<int64_t> dim, c10::optional<c10::SymInt> output_size) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::repeat_interleave_self_int::call(self, repeats, dim, output_size);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, repeats, dim, output_size);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor reshape_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef shape) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::reshape::call(self, shape);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, shape);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _reshape_copy_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_reshape_copy::call(self, size);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, size);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _reshape_alias_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_reshape_alias::call(self, size, stride);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, size, stride);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _mkldnn_reshape_generated_plumbing(const at::Tensor & self, at::IntArrayRef shape) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_mkldnn_reshape::call(self, shape);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, shape);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor reshape_as_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::reshape_as::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor round_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::round::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & round__generated_plumbing(at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::round_::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor round_decimals_generated_plumbing(const at::Tensor & self, int64_t decimals) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::round_decimals::call(self, decimals);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, decimals);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & round__decimals_generated_plumbing(at::Tensor & self, int64_t decimals) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::round__decimals::call(self, decimals);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim, decimals);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor rrelu_generated_plumbing(const at::Tensor & self, const at::Scalar & lower, const at::Scalar & upper, bool training, c10::optional<at::Generator> generator) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::rrelu::call(self, lower, upper, training, generator);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, lower, upper, training, generator);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & rrelu__generated_plumbing(at::Tensor & self, const at::Scalar & lower, const at::Scalar & upper, bool training, c10::optional<at::Generator> generator) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::rrelu_::call(self, lower, upper, training, generator);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim, lower, upper, training, generator);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor relu_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::relu::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & relu__generated_plumbing(at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::relu_::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor relu6_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::relu6::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & relu6__generated_plumbing(at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::relu6_::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor prelu_generated_plumbing(const at::Tensor & self, const at::Tensor & weight) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
|
|
return at::_ops::prelu::call(self, weight);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _prelu_kernel_generated_plumbing(const at::Tensor & self, const at::Tensor & weight) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
|
|
return at::_ops::_prelu_kernel::call(self, weight);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> _prelu_kernel_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
|
|
return at::_ops::_prelu_kernel_backward::call(grad_output, self, weight);
|
|
}
|
|
Tensor grad_output_value;
|
|
optional<int64_t> grad_output_bdim;
|
|
std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
|
|
auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, weight_value, weight_bdim);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & gelu__generated_plumbing(at::Tensor & self, c10::string_view approximate) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::gelu_::call(self, approximate);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim, approximate);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor gelu_generated_plumbing(const at::Tensor & self, c10::string_view approximate) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::gelu::call(self, approximate);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, approximate);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor gelu_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::gelu_backward::call(grad_output, self, approximate);
|
|
}
|
|
Tensor grad_output_value;
|
|
optional<int64_t> grad_output_bdim;
|
|
std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, approximate);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor infinitely_differentiable_gelu_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::infinitely_differentiable_gelu_backward::call(grad, self);
|
|
}
|
|
Tensor grad_value;
|
|
optional<int64_t> grad_bdim;
|
|
std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(grad_value, grad_bdim, self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor hardshrink_generated_plumbing(const at::Tensor & self, const at::Scalar & lambd) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::hardshrink::call(self, lambd);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, lambd);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor hardshrink_backward_generated_plumbing(const at::Tensor & grad_out, const at::Tensor & self, const at::Scalar & lambd) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad_out, cur_level) && !isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::hardshrink_backward::call(grad_out, self, lambd);
|
|
}
|
|
Tensor grad_out_value;
|
|
optional<int64_t> grad_out_bdim;
|
|
std::tie(grad_out_value, grad_out_bdim) = unwrapTensorAtLevel(grad_out, cur_level);
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(grad_out_value, grad_out_bdim, self_value, self_bdim, lambd);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor rsqrt_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::rsqrt::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & rsqrt__generated_plumbing(at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::rsqrt_::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor select_Dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, int64_t index) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::select_Dimname::call(self, dim, index);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, index);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor select_int_generated_plumbing(const at::Tensor & self, int64_t dim, c10::SymInt index) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::select_int::call(self, dim, index);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, index);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor select_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t dim, c10::SymInt index) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad_output, cur_level)) {
|
|
return at::_ops::select_backward::call(grad_output, input_sizes, dim, index);
|
|
}
|
|
Tensor grad_output_value;
|
|
optional<int64_t> grad_output_bdim;
|
|
std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
|
|
auto results = batch_rule(grad_output_value, grad_output_bdim, input_sizes, dim, index);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _nested_select_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, int64_t dim, c10::SymInt index) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_nested_select_backward::call(grad_output, self, dim, index);
|
|
}
|
|
Tensor grad_output_value;
|
|
optional<int64_t> grad_output_bdim;
|
|
std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, dim, index);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor selu_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::selu::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & selu__generated_plumbing(at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::selu_::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor celu_generated_plumbing(const at::Tensor & self, const at::Scalar & alpha) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::celu::call(self, alpha);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, alpha);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & celu__generated_plumbing(at::Tensor & self, const at::Scalar & alpha) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::celu_::call(self, alpha);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim, alpha);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor silu_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::silu::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & silu__generated_plumbing(at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::silu_::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor silu_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::silu_backward::call(grad_output, self);
|
|
}
|
|
Tensor grad_output_value;
|
|
optional<int64_t> grad_output_bdim;
|
|
std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor mish_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::mish::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & mish__generated_plumbing(at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::mish_::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor mish_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::mish_backward::call(grad_output, self);
|
|
}
|
|
Tensor grad_output_value;
|
|
optional<int64_t> grad_output_bdim;
|
|
std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor sigmoid_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::sigmoid::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & sigmoid__generated_plumbing(at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::sigmoid_::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor logit_generated_plumbing(const at::Tensor & self, c10::optional<double> eps) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::logit::call(self, eps);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, eps);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & logit__generated_plumbing(at::Tensor & self, c10::optional<double> eps) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::logit_::call(self, eps);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim, eps);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor sin_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::sin::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & sin__generated_plumbing(at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::sin_::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor sinc_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::sinc::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & sinc__generated_plumbing(at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::sinc_::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor sinh_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::sinh::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & sinh__generated_plumbing(at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::sinh_::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor detach_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::detach::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor slice_Tensor_generated_plumbing(const at::Tensor & self, int64_t dim, c10::optional<c10::SymInt> start, c10::optional<c10::SymInt> end, c10::SymInt step) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::slice_Tensor::call(self, dim, start, end, step);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, start, end, step);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor slice_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t dim, c10::SymInt start, c10::SymInt end, c10::SymInt step) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad_output, cur_level)) {
|
|
return at::_ops::slice_backward::call(grad_output, input_sizes, dim, start, end, step);
|
|
}
|
|
Tensor grad_output_value;
|
|
optional<int64_t> grad_output_bdim;
|
|
std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
|
|
auto results = batch_rule(grad_output_value, grad_output_bdim, input_sizes, dim, start, end, step);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor slice_inverse_generated_plumbing(const at::Tensor & self, const at::Tensor & src, int64_t dim, c10::optional<c10::SymInt> start, c10::optional<c10::SymInt> end, c10::SymInt step) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(src, cur_level)) {
|
|
return at::_ops::slice_inverse::call(self, src, dim, start, end, step);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor src_value;
|
|
optional<int64_t> src_bdim;
|
|
std::tie(src_value, src_bdim) = unwrapTensorAtLevel(src, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, src_value, src_bdim, dim, start, end, step);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor slice_scatter_generated_plumbing(const at::Tensor & self, const at::Tensor & src, int64_t dim, c10::optional<c10::SymInt> start, c10::optional<c10::SymInt> end, c10::SymInt step) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(src, cur_level)) {
|
|
return at::_ops::slice_scatter::call(self, src, dim, start, end, step);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor src_value;
|
|
optional<int64_t> src_bdim;
|
|
std::tie(src_value, src_bdim) = unwrapTensorAtLevel(src, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, src_value, src_bdim, dim, start, end, step);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor select_scatter_generated_plumbing(const at::Tensor & self, const at::Tensor & src, int64_t dim, c10::SymInt index) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(src, cur_level)) {
|
|
return at::_ops::select_scatter::call(self, src, dim, index);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor src_value;
|
|
optional<int64_t> src_bdim;
|
|
std::tie(src_value, src_bdim) = unwrapTensorAtLevel(src, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, src_value, src_bdim, dim, index);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor diagonal_scatter_generated_plumbing(const at::Tensor & self, const at::Tensor & src, int64_t offset, int64_t dim1, int64_t dim2) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(src, cur_level)) {
|
|
return at::_ops::diagonal_scatter::call(self, src, offset, dim1, dim2);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor src_value;
|
|
optional<int64_t> src_bdim;
|
|
std::tie(src_value, src_bdim) = unwrapTensorAtLevel(src, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, src_value, src_bdim, offset, dim1, dim2);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor as_strided_scatter_generated_plumbing(const at::Tensor & self, const at::Tensor & src, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<c10::SymInt> storage_offset) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(src, cur_level)) {
|
|
return at::_ops::as_strided_scatter::call(self, src, size, stride, storage_offset);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor src_value;
|
|
optional<int64_t> src_bdim;
|
|
std::tie(src_value, src_bdim) = unwrapTensorAtLevel(src, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, src_value, src_bdim, size, stride, storage_offset);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor smm_generated_plumbing(const at::Tensor & self, const at::Tensor & mat2) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mat2, cur_level)) {
|
|
return at::_ops::smm::call(self, mat2);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor mat2_value;
|
|
optional<int64_t> mat2_bdim;
|
|
std::tie(mat2_value, mat2_bdim) = unwrapTensorAtLevel(mat2, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, mat2_value, mat2_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor softmax_int_generated_plumbing(const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::softmax_int::call(self, dim, dtype);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, dtype);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor softmax_Dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::softmax_Dimname::call(self, dim, dtype);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, dtype);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _softmax_generated_plumbing(const at::Tensor & self, int64_t dim, bool half_to_float) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_softmax::call(self, dim, half_to_float);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, half_to_float);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _softmax_backward_data_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(output, cur_level)) {
|
|
return at::_ops::_softmax_backward_data::call(grad_output, output, dim, input_dtype);
|
|
}
|
|
Tensor grad_output_value;
|
|
optional<int64_t> grad_output_bdim;
|
|
std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
|
|
Tensor output_value;
|
|
optional<int64_t> output_bdim;
|
|
std::tie(output_value, output_bdim) = unwrapTensorAtLevel(output, cur_level);
|
|
auto results = batch_rule(grad_output_value, grad_output_bdim, output_value, output_bdim, dim, input_dtype);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> unsafe_split_Tensor_generated_plumbing(const at::Tensor & self, c10::SymInt split_size, int64_t dim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::unsafe_split_Tensor::call(self, split_size, dim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, split_size, dim);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> split_Tensor_generated_plumbing(const at::Tensor & self, c10::SymInt split_size, int64_t dim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::split_Tensor::call(self, split_size, dim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, split_size, dim);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> split_sizes_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef split_size, int64_t dim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::split_sizes::call(self, split_size, dim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, split_size, dim);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> unsafe_split_with_sizes_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::unsafe_split_with_sizes::call(self, split_sizes, dim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, split_sizes, dim);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> split_with_sizes_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::split_with_sizes::call(self, split_sizes, dim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, split_sizes, dim);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> hsplit_int_generated_plumbing(const at::Tensor & self, int64_t sections) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::hsplit_int::call(self, sections);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, sections);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> hsplit_array_generated_plumbing(const at::Tensor & self, at::IntArrayRef indices) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::hsplit_array::call(self, indices);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, indices);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> vsplit_int_generated_plumbing(const at::Tensor & self, int64_t sections) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::vsplit_int::call(self, sections);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, sections);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> vsplit_array_generated_plumbing(const at::Tensor & self, at::IntArrayRef indices) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::vsplit_array::call(self, indices);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, indices);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> dsplit_int_generated_plumbing(const at::Tensor & self, int64_t sections) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::dsplit_int::call(self, sections);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, sections);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> dsplit_array_generated_plumbing(const at::Tensor & self, at::IntArrayRef indices) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::dsplit_array::call(self, indices);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, indices);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor squeeze_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::squeeze::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor squeeze_dim_generated_plumbing(const at::Tensor & self, int64_t dim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::squeeze_dim::call(self, dim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor squeeze_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::squeeze_dimname::call(self, dim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor squeeze_dims_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::squeeze_dims::call(self, dim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor sspaddmm_generated_plumbing(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mat1, cur_level) && !isBatchedAtLevel(mat2, cur_level)) {
|
|
return at::_ops::sspaddmm::call(self, mat1, mat2, beta, alpha);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor mat1_value;
|
|
optional<int64_t> mat1_bdim;
|
|
std::tie(mat1_value, mat1_bdim) = unwrapTensorAtLevel(mat1, cur_level);
|
|
Tensor mat2_value;
|
|
optional<int64_t> mat2_bdim;
|
|
std::tie(mat2_value, mat2_bdim) = unwrapTensorAtLevel(mat2, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, mat1_value, mat1_bdim, mat2_value, mat2_bdim, beta, alpha);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _chunk_cat_generated_plumbing(at::TensorList tensors, int64_t dim, int64_t num_chunks) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(tensors, cur_level)) {
|
|
return at::_ops::_chunk_cat::call(tensors, dim, num_chunks);
|
|
}
|
|
|
|
auto results = batch_rule(tensors, dim, num_chunks);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor stack_generated_plumbing(at::TensorList tensors, int64_t dim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(tensors, cur_level)) {
|
|
return at::_ops::stack::call(tensors, dim);
|
|
}
|
|
|
|
auto results = batch_rule(tensors, dim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _stack_generated_plumbing(at::TensorList tensors, int64_t dim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(tensors, cur_level)) {
|
|
return at::_ops::_stack::call(tensors, dim);
|
|
}
|
|
|
|
auto results = batch_rule(tensors, dim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor hstack_generated_plumbing(at::TensorList tensors) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(tensors, cur_level)) {
|
|
return at::_ops::hstack::call(tensors);
|
|
}
|
|
|
|
auto results = batch_rule(tensors);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor vstack_generated_plumbing(at::TensorList tensors) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(tensors, cur_level)) {
|
|
return at::_ops::vstack::call(tensors);
|
|
}
|
|
|
|
auto results = batch_rule(tensors);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor dstack_generated_plumbing(at::TensorList tensors) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(tensors, cur_level)) {
|
|
return at::_ops::dstack::call(tensors);
|
|
}
|
|
|
|
auto results = batch_rule(tensors);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor stft_generated_plumbing(const at::Tensor & self, int64_t n_fft, c10::optional<int64_t> hop_length, c10::optional<int64_t> win_length, const c10::optional<at::Tensor> & window, bool normalized, c10::optional<bool> onesided, c10::optional<bool> return_complex) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(window, cur_level)) {
|
|
return at::_ops::stft::call(self, n_fft, hop_length, win_length, window, normalized, onesided, return_complex);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
optional<Tensor> window_value;
|
|
optional<int64_t> window_bdim;
|
|
if (window) {
|
|
std::tie(window_value, window_bdim) = unwrapTensorAtLevel(window.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(self_value, self_bdim, n_fft, hop_length, win_length, window_value, window_bdim, normalized, onesided, return_complex);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor stft_center_generated_plumbing(const at::Tensor & self, int64_t n_fft, c10::optional<int64_t> hop_length, c10::optional<int64_t> win_length, const c10::optional<at::Tensor> & window, bool center, c10::string_view pad_mode, bool normalized, c10::optional<bool> onesided, c10::optional<bool> return_complex) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(window, cur_level)) {
|
|
return at::_ops::stft_center::call(self, n_fft, hop_length, win_length, window, center, pad_mode, normalized, onesided, return_complex);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
optional<Tensor> window_value;
|
|
optional<int64_t> window_bdim;
|
|
if (window) {
|
|
std::tie(window_value, window_bdim) = unwrapTensorAtLevel(window.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(self_value, self_bdim, n_fft, hop_length, win_length, window_value, window_bdim, center, pad_mode, normalized, onesided, return_complex);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor istft_generated_plumbing(const at::Tensor & self, int64_t n_fft, c10::optional<int64_t> hop_length, c10::optional<int64_t> win_length, const c10::optional<at::Tensor> & window, bool center, bool normalized, c10::optional<bool> onesided, c10::optional<int64_t> length, bool return_complex) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(window, cur_level)) {
|
|
return at::_ops::istft::call(self, n_fft, hop_length, win_length, window, center, normalized, onesided, length, return_complex);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
optional<Tensor> window_value;
|
|
optional<int64_t> window_bdim;
|
|
if (window) {
|
|
std::tie(window_value, window_bdim) = unwrapTensorAtLevel(window.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(self_value, self_bdim, n_fft, hop_length, win_length, window_value, window_bdim, center, normalized, onesided, length, return_complex);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor sum_generated_plumbing(const at::Tensor & self, c10::optional<at::ScalarType> dtype) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::sum::call(self, dtype);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dtype);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor sum_dim_IntList_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::sum_dim_IntList::call(self, dim, keepdim, dtype);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, keepdim, dtype);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor sum_dim_DimnameList_generated_plumbing(const at::Tensor & self, at::DimnameList dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::sum_dim_DimnameList::call(self, dim, keepdim, dtype);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, keepdim, dtype);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _nested_sum_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_nested_sum_backward::call(grad, self, dim, keepdim);
|
|
}
|
|
Tensor grad_value;
|
|
optional<int64_t> grad_bdim;
|
|
std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(grad_value, grad_bdim, self_value, self_bdim, dim, keepdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor nansum_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::nansum::call(self, dim, keepdim, dtype);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, keepdim, dtype);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor sum_to_size_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::sum_to_size::call(self, size);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, size);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor sqrt_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::sqrt::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & sqrt__generated_plumbing(at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::sqrt_::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor square_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::square::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & square__generated_plumbing(at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::square_::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor std_generated_plumbing(const at::Tensor & self, bool unbiased) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::std::call(self, unbiased);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, unbiased);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor std_dim_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::std_dim::call(self, dim, unbiased, keepdim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, unbiased, keepdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor std_correction_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim, const c10::optional<at::Scalar> & correction, bool keepdim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::std_correction::call(self, dim, correction, keepdim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, correction, keepdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> std_mean_generated_plumbing(const at::Tensor & self, bool unbiased) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::std_mean::call(self, unbiased);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, unbiased);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> std_mean_dim_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::std_mean_dim::call(self, dim, unbiased, keepdim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, unbiased, keepdim);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> std_mean_correction_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim, const c10::optional<at::Scalar> & correction, bool keepdim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::std_mean_correction::call(self, dim, correction, keepdim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, correction, keepdim);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> std_mean_names_dim_generated_plumbing(const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::std_mean_names_dim::call(self, dim, unbiased, keepdim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, unbiased, keepdim);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> std_mean_correction_names_generated_plumbing(const at::Tensor & self, at::DimnameList dim, const c10::optional<at::Scalar> & correction, bool keepdim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::std_mean_correction_names::call(self, dim, correction, keepdim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, correction, keepdim);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor std_names_dim_generated_plumbing(const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::std_names_dim::call(self, dim, unbiased, keepdim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, unbiased, keepdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor std_correction_names_generated_plumbing(const at::Tensor & self, at::DimnameList dim, const c10::optional<at::Scalar> & correction, bool keepdim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::std_correction_names::call(self, dim, correction, keepdim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, correction, keepdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor prod_generated_plumbing(const at::Tensor & self, c10::optional<at::ScalarType> dtype) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::prod::call(self, dtype);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dtype);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor prod_dim_int_generated_plumbing(const at::Tensor & self, int64_t dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::prod_dim_int::call(self, dim, keepdim, dtype);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, keepdim, dtype);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor prod_dim_Dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::prod_dim_Dimname::call(self, dim, keepdim, dtype);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, keepdim, dtype);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor t_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::t::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor tan_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::tan::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & tan__generated_plumbing(at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::tan_::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor tanh_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::tanh::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & tanh__generated_plumbing(at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::tanh_::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor tensordot_generated_plumbing(const at::Tensor & self, const at::Tensor & other, at::IntArrayRef dims_self, at::IntArrayRef dims_other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::tensordot::call(self, other, dims_self, dims_other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, dims_self, dims_other);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor threshold_generated_plumbing(const at::Tensor & self, const at::Scalar & threshold, const at::Scalar & value) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::threshold::call(self, threshold, value);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, threshold, value);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & threshold__generated_plumbing(at::Tensor & self, const at::Scalar & threshold, const at::Scalar & value) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::threshold_::call(self, threshold, value);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim, threshold, value);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor threshold_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & threshold) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::threshold_backward::call(grad_output, self, threshold);
|
|
}
|
|
Tensor grad_output_value;
|
|
optional<int64_t> grad_output_bdim;
|
|
std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, threshold);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor tile_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef dims) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::tile::call(self, dims);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dims);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor transpose_int_generated_plumbing(const at::Tensor & self, int64_t dim0, int64_t dim1) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::transpose_int::call(self, dim0, dim1);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim0, dim1);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor transpose_Dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim0, at::Dimname dim1) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::transpose_Dimname::call(self, dim0, dim1);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim0, dim1);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _mkldnn_transpose_generated_plumbing(const at::Tensor & self, int64_t dim0, int64_t dim1) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_mkldnn_transpose::call(self, dim0, dim1);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim0, dim1);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & _mkldnn_transpose__generated_plumbing(at::Tensor & self, int64_t dim0, int64_t dim1) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_mkldnn_transpose_::call(self, dim0, dim1);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim, dim0, dim1);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor one_hot_generated_plumbing(const at::Tensor & self, int64_t num_classes) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::one_hot::call(self, num_classes);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, num_classes);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor flip_generated_plumbing(const at::Tensor & self, at::IntArrayRef dims) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::flip::call(self, dims);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dims);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor fliplr_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::fliplr::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor flipud_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::flipud::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor roll_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef shifts, at::IntArrayRef dims) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::roll::call(self, shifts, dims);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, shifts, dims);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor rot90_generated_plumbing(const at::Tensor & self, int64_t k, at::IntArrayRef dims) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::rot90::call(self, k, dims);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, k, dims);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor trapezoid_x_generated_plumbing(const at::Tensor & y, const at::Tensor & x, int64_t dim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(y, cur_level) && !isBatchedAtLevel(x, cur_level)) {
|
|
return at::_ops::trapezoid_x::call(y, x, dim);
|
|
}
|
|
Tensor y_value;
|
|
optional<int64_t> y_bdim;
|
|
std::tie(y_value, y_bdim) = unwrapTensorAtLevel(y, cur_level);
|
|
Tensor x_value;
|
|
optional<int64_t> x_bdim;
|
|
std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
|
|
auto results = batch_rule(y_value, y_bdim, x_value, x_bdim, dim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor trapezoid_dx_generated_plumbing(const at::Tensor & y, const at::Scalar & dx, int64_t dim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(y, cur_level)) {
|
|
return at::_ops::trapezoid_dx::call(y, dx, dim);
|
|
}
|
|
Tensor y_value;
|
|
optional<int64_t> y_bdim;
|
|
std::tie(y_value, y_bdim) = unwrapTensorAtLevel(y, cur_level);
|
|
auto results = batch_rule(y_value, y_bdim, dx, dim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor trapz_x_generated_plumbing(const at::Tensor & y, const at::Tensor & x, int64_t dim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(y, cur_level) && !isBatchedAtLevel(x, cur_level)) {
|
|
return at::_ops::trapz_x::call(y, x, dim);
|
|
}
|
|
Tensor y_value;
|
|
optional<int64_t> y_bdim;
|
|
std::tie(y_value, y_bdim) = unwrapTensorAtLevel(y, cur_level);
|
|
Tensor x_value;
|
|
optional<int64_t> x_bdim;
|
|
std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
|
|
auto results = batch_rule(y_value, y_bdim, x_value, x_bdim, dim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor trapz_dx_generated_plumbing(const at::Tensor & y, double dx, int64_t dim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(y, cur_level)) {
|
|
return at::_ops::trapz_dx::call(y, dx, dim);
|
|
}
|
|
Tensor y_value;
|
|
optional<int64_t> y_bdim;
|
|
std::tie(y_value, y_bdim) = unwrapTensorAtLevel(y, cur_level);
|
|
auto results = batch_rule(y_value, y_bdim, dx, dim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor,at::Tensor> _transform_bias_rescale_qkv_generated_plumbing(const at::Tensor & qkv, const at::Tensor & qkv_bias, int64_t num_heads) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(qkv, cur_level) && !isBatchedAtLevel(qkv_bias, cur_level)) {
|
|
return at::_ops::_transform_bias_rescale_qkv::call(qkv, qkv_bias, num_heads);
|
|
}
|
|
Tensor qkv_value;
|
|
optional<int64_t> qkv_bdim;
|
|
std::tie(qkv_value, qkv_bdim) = unwrapTensorAtLevel(qkv, cur_level);
|
|
Tensor qkv_bias_value;
|
|
optional<int64_t> qkv_bias_bdim;
|
|
std::tie(qkv_bias_value, qkv_bias_bdim) = unwrapTensorAtLevel(qkv_bias, cur_level);
|
|
auto results = batch_rule(qkv_value, qkv_bdim, qkv_bias_value, qkv_bias_bdim, num_heads);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _nested_tensor_from_mask_generated_plumbing(const at::Tensor & t, const at::Tensor & mask, bool mask_check) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(t, cur_level) && !isBatchedAtLevel(mask, cur_level)) {
|
|
return at::_ops::_nested_tensor_from_mask::call(t, mask, mask_check);
|
|
}
|
|
Tensor t_value;
|
|
optional<int64_t> t_bdim;
|
|
std::tie(t_value, t_bdim) = unwrapTensorAtLevel(t, cur_level);
|
|
Tensor mask_value;
|
|
optional<int64_t> mask_bdim;
|
|
std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask, cur_level);
|
|
auto results = batch_rule(t_value, t_bdim, mask_value, mask_bdim, mask_check);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _nested_from_padded_generated_plumbing(const at::Tensor & padded, const at::Tensor & cpu_nested_shape_example, bool fuse_transform_0213) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(padded, cur_level) && !isBatchedAtLevel(cpu_nested_shape_example, cur_level)) {
|
|
return at::_ops::_nested_from_padded::call(padded, cpu_nested_shape_example, fuse_transform_0213);
|
|
}
|
|
Tensor padded_value;
|
|
optional<int64_t> padded_bdim;
|
|
std::tie(padded_value, padded_bdim) = unwrapTensorAtLevel(padded, cur_level);
|
|
Tensor cpu_nested_shape_example_value;
|
|
optional<int64_t> cpu_nested_shape_example_bdim;
|
|
std::tie(cpu_nested_shape_example_value, cpu_nested_shape_example_bdim) = unwrapTensorAtLevel(cpu_nested_shape_example, cur_level);
|
|
auto results = batch_rule(padded_value, padded_bdim, cpu_nested_shape_example_value, cpu_nested_shape_example_bdim, fuse_transform_0213);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _nested_tensor_size_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_nested_tensor_size::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _nested_tensor_strides_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_nested_tensor_strides::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _nested_tensor_storage_offsets_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_nested_tensor_storage_offsets::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _nested_from_padded_and_nested_example_generated_plumbing(const at::Tensor & padded, const at::Tensor & nt_example) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(padded, cur_level) && !isBatchedAtLevel(nt_example, cur_level)) {
|
|
return at::_ops::_nested_from_padded_and_nested_example::call(padded, nt_example);
|
|
}
|
|
Tensor padded_value;
|
|
optional<int64_t> padded_bdim;
|
|
std::tie(padded_value, padded_bdim) = unwrapTensorAtLevel(padded, cur_level);
|
|
Tensor nt_example_value;
|
|
optional<int64_t> nt_example_bdim;
|
|
std::tie(nt_example_value, nt_example_bdim) = unwrapTensorAtLevel(nt_example, cur_level);
|
|
auto results = batch_rule(padded_value, padded_bdim, nt_example_value, nt_example_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _nested_view_from_buffer_generated_plumbing(const at::Tensor & self, const at::Tensor & nested_size, const at::Tensor & nested_strides, const at::Tensor & offsets) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(nested_size, cur_level) && !isBatchedAtLevel(nested_strides, cur_level) && !isBatchedAtLevel(offsets, cur_level)) {
|
|
return at::_ops::_nested_view_from_buffer::call(self, nested_size, nested_strides, offsets);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor nested_size_value;
|
|
optional<int64_t> nested_size_bdim;
|
|
std::tie(nested_size_value, nested_size_bdim) = unwrapTensorAtLevel(nested_size, cur_level);
|
|
Tensor nested_strides_value;
|
|
optional<int64_t> nested_strides_bdim;
|
|
std::tie(nested_strides_value, nested_strides_bdim) = unwrapTensorAtLevel(nested_strides, cur_level);
|
|
Tensor offsets_value;
|
|
optional<int64_t> offsets_bdim;
|
|
std::tie(offsets_value, offsets_bdim) = unwrapTensorAtLevel(offsets, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, nested_size_value, nested_size_bdim, nested_strides_value, nested_strides_bdim, offsets_value, offsets_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _nested_view_from_buffer_copy_generated_plumbing(const at::Tensor & self, const at::Tensor & nested_size, const at::Tensor & nested_strides, const at::Tensor & offsets) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(nested_size, cur_level) && !isBatchedAtLevel(nested_strides, cur_level) && !isBatchedAtLevel(offsets, cur_level)) {
|
|
return at::_ops::_nested_view_from_buffer_copy::call(self, nested_size, nested_strides, offsets);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor nested_size_value;
|
|
optional<int64_t> nested_size_bdim;
|
|
std::tie(nested_size_value, nested_size_bdim) = unwrapTensorAtLevel(nested_size, cur_level);
|
|
Tensor nested_strides_value;
|
|
optional<int64_t> nested_strides_bdim;
|
|
std::tie(nested_strides_value, nested_strides_bdim) = unwrapTensorAtLevel(nested_strides, cur_level);
|
|
Tensor offsets_value;
|
|
optional<int64_t> offsets_bdim;
|
|
std::tie(offsets_value, offsets_bdim) = unwrapTensorAtLevel(offsets, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, nested_size_value, nested_size_bdim, nested_strides_value, nested_strides_bdim, offsets_value, offsets_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _nested_view_from_jagged_generated_plumbing(const at::Tensor & self, const at::Tensor & offsets, const at::Tensor & dummy, const c10::optional<at::Tensor> & lengths, int64_t ragged_idx) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(offsets, cur_level) && !isBatchedAtLevel(dummy, cur_level) && !isBatchedAtLevel(lengths, cur_level)) {
|
|
return at::_ops::_nested_view_from_jagged::call(self, offsets, dummy, lengths, ragged_idx);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor offsets_value;
|
|
optional<int64_t> offsets_bdim;
|
|
std::tie(offsets_value, offsets_bdim) = unwrapTensorAtLevel(offsets, cur_level);
|
|
Tensor dummy_value;
|
|
optional<int64_t> dummy_bdim;
|
|
std::tie(dummy_value, dummy_bdim) = unwrapTensorAtLevel(dummy, cur_level);
|
|
optional<Tensor> lengths_value;
|
|
optional<int64_t> lengths_bdim;
|
|
if (lengths) {
|
|
std::tie(lengths_value, lengths_bdim) = unwrapTensorAtLevel(lengths.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(self_value, self_bdim, offsets_value, offsets_bdim, dummy_value, dummy_bdim, lengths_value, lengths_bdim, ragged_idx);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _nested_view_from_jagged_copy_generated_plumbing(const at::Tensor & self, const at::Tensor & offsets, const at::Tensor & dummy, const c10::optional<at::Tensor> & lengths, int64_t ragged_idx) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(offsets, cur_level) && !isBatchedAtLevel(dummy, cur_level) && !isBatchedAtLevel(lengths, cur_level)) {
|
|
return at::_ops::_nested_view_from_jagged_copy::call(self, offsets, dummy, lengths, ragged_idx);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor offsets_value;
|
|
optional<int64_t> offsets_bdim;
|
|
std::tie(offsets_value, offsets_bdim) = unwrapTensorAtLevel(offsets, cur_level);
|
|
Tensor dummy_value;
|
|
optional<int64_t> dummy_bdim;
|
|
std::tie(dummy_value, dummy_bdim) = unwrapTensorAtLevel(dummy, cur_level);
|
|
optional<Tensor> lengths_value;
|
|
optional<int64_t> lengths_bdim;
|
|
if (lengths) {
|
|
std::tie(lengths_value, lengths_bdim) = unwrapTensorAtLevel(lengths.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(self_value, self_bdim, offsets_value, offsets_bdim, dummy_value, dummy_bdim, lengths_value, lengths_bdim, ragged_idx);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _nested_get_values_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_nested_get_values::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _nested_get_values_copy_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_nested_get_values_copy::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _nested_get_offsets_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_nested_get_offsets::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _nested_get_lengths_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_nested_get_lengths::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _nested_get_jagged_dummy_generated_plumbing(const at::Tensor & any) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(any, cur_level)) {
|
|
return at::_ops::_nested_get_jagged_dummy::call(any);
|
|
}
|
|
Tensor any_value;
|
|
optional<int64_t> any_bdim;
|
|
std::tie(any_value, any_bdim) = unwrapTensorAtLevel(any, cur_level);
|
|
auto results = batch_rule(any_value, any_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _trilinear_generated_plumbing(const at::Tensor & i1, const at::Tensor & i2, const at::Tensor & i3, at::IntArrayRef expand1, at::IntArrayRef expand2, at::IntArrayRef expand3, at::IntArrayRef sumdim, int64_t unroll_dim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(i1, cur_level) && !isBatchedAtLevel(i2, cur_level) && !isBatchedAtLevel(i3, cur_level)) {
|
|
return at::_ops::_trilinear::call(i1, i2, i3, expand1, expand2, expand3, sumdim, unroll_dim);
|
|
}
|
|
Tensor i1_value;
|
|
optional<int64_t> i1_bdim;
|
|
std::tie(i1_value, i1_bdim) = unwrapTensorAtLevel(i1, cur_level);
|
|
Tensor i2_value;
|
|
optional<int64_t> i2_bdim;
|
|
std::tie(i2_value, i2_bdim) = unwrapTensorAtLevel(i2, cur_level);
|
|
Tensor i3_value;
|
|
optional<int64_t> i3_bdim;
|
|
std::tie(i3_value, i3_bdim) = unwrapTensorAtLevel(i3, cur_level);
|
|
auto results = batch_rule(i1_value, i1_bdim, i2_value, i2_bdim, i3_value, i3_bdim, expand1, expand2, expand3, sumdim, unroll_dim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor triplet_margin_loss_generated_plumbing(const at::Tensor & anchor, const at::Tensor & positive, const at::Tensor & negative, double margin, double p, double eps, bool swap, int64_t reduction) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(anchor, cur_level) && !isBatchedAtLevel(positive, cur_level) && !isBatchedAtLevel(negative, cur_level)) {
|
|
return at::_ops::triplet_margin_loss::call(anchor, positive, negative, margin, p, eps, swap, reduction);
|
|
}
|
|
Tensor anchor_value;
|
|
optional<int64_t> anchor_bdim;
|
|
std::tie(anchor_value, anchor_bdim) = unwrapTensorAtLevel(anchor, cur_level);
|
|
Tensor positive_value;
|
|
optional<int64_t> positive_bdim;
|
|
std::tie(positive_value, positive_bdim) = unwrapTensorAtLevel(positive, cur_level);
|
|
Tensor negative_value;
|
|
optional<int64_t> negative_bdim;
|
|
std::tie(negative_value, negative_bdim) = unwrapTensorAtLevel(negative, cur_level);
|
|
auto results = batch_rule(anchor_value, anchor_bdim, positive_value, positive_bdim, negative_value, negative_bdim, margin, p, eps, swap, reduction);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor trunc_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::trunc::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & trunc__generated_plumbing(at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::trunc_::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor fix_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::fix::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & fix__generated_plumbing(at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::fix_::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor type_as_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::type_as::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> _unique_generated_plumbing(const at::Tensor & self, bool sorted, bool return_inverse) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_unique::call(self, sorted, return_inverse);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, sorted, return_inverse);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor,at::Tensor> unique_dim_generated_plumbing(const at::Tensor & self, int64_t dim, bool sorted, bool return_inverse, bool return_counts) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::unique_dim::call(self, dim, sorted, return_inverse, return_counts);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, sorted, return_inverse, return_counts);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor,at::Tensor> unique_consecutive_generated_plumbing(const at::Tensor & self, bool return_inverse, bool return_counts, c10::optional<int64_t> dim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::unique_consecutive::call(self, return_inverse, return_counts, dim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, return_inverse, return_counts, dim);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor,at::Tensor> unique_dim_consecutive_generated_plumbing(const at::Tensor & self, int64_t dim, bool return_inverse, bool return_counts) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::unique_dim_consecutive::call(self, dim, return_inverse, return_counts);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, return_inverse, return_counts);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor,at::Tensor> _unique2_generated_plumbing(const at::Tensor & self, bool sorted, bool return_inverse, bool return_counts) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_unique2::call(self, sorted, return_inverse, return_counts);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, sorted, return_inverse, return_counts);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _unsafe_view_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_unsafe_view::call(self, size);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, size);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor unsqueeze_generated_plumbing(const at::Tensor & self, int64_t dim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::unsqueeze::call(self, dim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor vander_generated_plumbing(const at::Tensor & x, c10::optional<int64_t> N, bool increasing) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(x, cur_level)) {
|
|
return at::_ops::vander::call(x, N, increasing);
|
|
}
|
|
Tensor x_value;
|
|
optional<int64_t> x_bdim;
|
|
std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
|
|
auto results = batch_rule(x_value, x_bdim, N, increasing);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor var_generated_plumbing(const at::Tensor & self, bool unbiased) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::var::call(self, unbiased);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, unbiased);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor var_dim_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::var_dim::call(self, dim, unbiased, keepdim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, unbiased, keepdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor var_correction_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim, const c10::optional<at::Scalar> & correction, bool keepdim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::var_correction::call(self, dim, correction, keepdim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, correction, keepdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor var_names_dim_generated_plumbing(const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::var_names_dim::call(self, dim, unbiased, keepdim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, unbiased, keepdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor var_correction_names_generated_plumbing(const at::Tensor & self, at::DimnameList dim, const c10::optional<at::Scalar> & correction, bool keepdim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::var_correction_names::call(self, dim, correction, keepdim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, correction, keepdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> var_mean_generated_plumbing(const at::Tensor & self, bool unbiased) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::var_mean::call(self, unbiased);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, unbiased);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> var_mean_dim_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim, bool unbiased, bool keepdim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::var_mean_dim::call(self, dim, unbiased, keepdim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, unbiased, keepdim);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> var_mean_correction_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim, const c10::optional<at::Scalar> & correction, bool keepdim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::var_mean_correction::call(self, dim, correction, keepdim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, correction, keepdim);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> var_mean_names_dim_generated_plumbing(const at::Tensor & self, at::DimnameList dim, bool unbiased, bool keepdim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::var_mean_names_dim::call(self, dim, unbiased, keepdim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, unbiased, keepdim);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> var_mean_correction_names_generated_plumbing(const at::Tensor & self, at::DimnameList dim, const c10::optional<at::Scalar> & correction, bool keepdim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::var_mean_correction_names::call(self, dim, correction, keepdim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, correction, keepdim);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor view_as_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::view_as::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor where_self_generated_plumbing(const at::Tensor & condition, const at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(condition, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::where_self::call(condition, self, other);
|
|
}
|
|
Tensor condition_value;
|
|
optional<int64_t> condition_bdim;
|
|
std::tie(condition_value, condition_bdim) = unwrapTensorAtLevel(condition, cur_level);
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
auto results = batch_rule(condition_value, condition_bdim, self_value, self_bdim, other_value, other_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor where_ScalarSelf_generated_plumbing(const at::Tensor & condition, const at::Scalar & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(condition, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::where_ScalarSelf::call(condition, self, other);
|
|
}
|
|
Tensor condition_value;
|
|
optional<int64_t> condition_bdim;
|
|
std::tie(condition_value, condition_bdim) = unwrapTensorAtLevel(condition, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
auto results = batch_rule(condition_value, condition_bdim, self, other_value, other_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor where_ScalarOther_generated_plumbing(const at::Tensor & condition, const at::Tensor & self, const at::Scalar & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(condition, cur_level) && !isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::where_ScalarOther::call(condition, self, other);
|
|
}
|
|
Tensor condition_value;
|
|
optional<int64_t> condition_bdim;
|
|
std::tie(condition_value, condition_bdim) = unwrapTensorAtLevel(condition, cur_level);
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(condition_value, condition_bdim, self_value, self_bdim, other);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor where_Scalar_generated_plumbing(const at::Tensor & condition, const at::Scalar & self, const at::Scalar & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(condition, cur_level)) {
|
|
return at::_ops::where_Scalar::call(condition, self, other);
|
|
}
|
|
Tensor condition_value;
|
|
optional<int64_t> condition_bdim;
|
|
std::tie(condition_value, condition_bdim) = unwrapTensorAtLevel(condition, cur_level);
|
|
auto results = batch_rule(condition_value, condition_bdim, self, other);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> where_generated_plumbing(const at::Tensor & condition) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(condition, cur_level)) {
|
|
return at::_ops::where::call(condition);
|
|
}
|
|
Tensor condition_value;
|
|
optional<int64_t> condition_bdim;
|
|
std::tie(condition_value, condition_bdim) = unwrapTensorAtLevel(condition, cur_level);
|
|
auto results = batch_rule(condition_value, condition_bdim);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor norm_except_dim_generated_plumbing(const at::Tensor & v, int64_t pow, int64_t dim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(v, cur_level)) {
|
|
return at::_ops::norm_except_dim::call(v, pow, dim);
|
|
}
|
|
Tensor v_value;
|
|
optional<int64_t> v_bdim;
|
|
std::tie(v_value, v_bdim) = unwrapTensorAtLevel(v, cur_level);
|
|
auto results = batch_rule(v_value, v_bdim, pow, dim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _weight_norm_generated_plumbing(const at::Tensor & v, const at::Tensor & g, int64_t dim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(v, cur_level) && !isBatchedAtLevel(g, cur_level)) {
|
|
return at::_ops::_weight_norm::call(v, g, dim);
|
|
}
|
|
Tensor v_value;
|
|
optional<int64_t> v_bdim;
|
|
std::tie(v_value, v_bdim) = unwrapTensorAtLevel(v, cur_level);
|
|
Tensor g_value;
|
|
optional<int64_t> g_bdim;
|
|
std::tie(g_value, g_bdim) = unwrapTensorAtLevel(g, cur_level);
|
|
auto results = batch_rule(v_value, v_bdim, g_value, g_bdim, dim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> _weight_norm_interface_generated_plumbing(const at::Tensor & v, const at::Tensor & g, int64_t dim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(v, cur_level) && !isBatchedAtLevel(g, cur_level)) {
|
|
return at::_ops::_weight_norm_interface::call(v, g, dim);
|
|
}
|
|
Tensor v_value;
|
|
optional<int64_t> v_bdim;
|
|
std::tie(v_value, v_bdim) = unwrapTensorAtLevel(v, cur_level);
|
|
Tensor g_value;
|
|
optional<int64_t> g_bdim;
|
|
std::tie(g_value, g_bdim) = unwrapTensorAtLevel(g, cur_level);
|
|
auto results = batch_rule(v_value, v_bdim, g_value, g_bdim, dim);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> _weight_norm_interface_backward_generated_plumbing(const at::Tensor & grad_w, const at::Tensor & saved_v, const at::Tensor & saved_g, const at::Tensor & saved_norms, int64_t dim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad_w, cur_level) && !isBatchedAtLevel(saved_v, cur_level) && !isBatchedAtLevel(saved_g, cur_level) && !isBatchedAtLevel(saved_norms, cur_level)) {
|
|
return at::_ops::_weight_norm_interface_backward::call(grad_w, saved_v, saved_g, saved_norms, dim);
|
|
}
|
|
Tensor grad_w_value;
|
|
optional<int64_t> grad_w_bdim;
|
|
std::tie(grad_w_value, grad_w_bdim) = unwrapTensorAtLevel(grad_w, cur_level);
|
|
Tensor saved_v_value;
|
|
optional<int64_t> saved_v_bdim;
|
|
std::tie(saved_v_value, saved_v_bdim) = unwrapTensorAtLevel(saved_v, cur_level);
|
|
Tensor saved_g_value;
|
|
optional<int64_t> saved_g_bdim;
|
|
std::tie(saved_g_value, saved_g_bdim) = unwrapTensorAtLevel(saved_g, cur_level);
|
|
Tensor saved_norms_value;
|
|
optional<int64_t> saved_norms_bdim;
|
|
std::tie(saved_norms_value, saved_norms_bdim) = unwrapTensorAtLevel(saved_norms, cur_level);
|
|
auto results = batch_rule(grad_w_value, grad_w_bdim, saved_v_value, saved_v_bdim, saved_g_value, saved_g_bdim, saved_norms_value, saved_norms_bdim, dim);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> _weight_norm_differentiable_backward_generated_plumbing(const at::Tensor & grad_w, const at::Tensor & saved_v, const at::Tensor & saved_g, const at::Tensor & saved_norms, int64_t dim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad_w, cur_level) && !isBatchedAtLevel(saved_v, cur_level) && !isBatchedAtLevel(saved_g, cur_level) && !isBatchedAtLevel(saved_norms, cur_level)) {
|
|
return at::_ops::_weight_norm_differentiable_backward::call(grad_w, saved_v, saved_g, saved_norms, dim);
|
|
}
|
|
Tensor grad_w_value;
|
|
optional<int64_t> grad_w_bdim;
|
|
std::tie(grad_w_value, grad_w_bdim) = unwrapTensorAtLevel(grad_w, cur_level);
|
|
Tensor saved_v_value;
|
|
optional<int64_t> saved_v_bdim;
|
|
std::tie(saved_v_value, saved_v_bdim) = unwrapTensorAtLevel(saved_v, cur_level);
|
|
Tensor saved_g_value;
|
|
optional<int64_t> saved_g_bdim;
|
|
std::tie(saved_g_value, saved_g_bdim) = unwrapTensorAtLevel(saved_g, cur_level);
|
|
Tensor saved_norms_value;
|
|
optional<int64_t> saved_norms_bdim;
|
|
std::tie(saved_norms_value, saved_norms_bdim) = unwrapTensorAtLevel(saved_norms, cur_level);
|
|
auto results = batch_rule(grad_w_value, grad_w_bdim, saved_v_value, saved_v_bdim, saved_g_value, saved_g_bdim, saved_norms_value, saved_norms_bdim, dim);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor zeros_like_generated_plumbing(const at::Tensor & self, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::zeros_like::call(self, dtype, layout, device, pin_memory, memory_format);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dtype, layout, device, pin_memory, memory_format);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _standard_gamma_grad_generated_plumbing(const at::Tensor & self, const at::Tensor & output) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(output, cur_level)) {
|
|
return at::_ops::_standard_gamma_grad::call(self, output);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor output_value;
|
|
optional<int64_t> output_bdim;
|
|
std::tie(output_value, output_bdim) = unwrapTensorAtLevel(output, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, output_value, output_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _standard_gamma_generated_plumbing(const at::Tensor & self, c10::optional<at::Generator> generator) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_standard_gamma::call(self, generator);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, generator);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _dirichlet_grad_generated_plumbing(const at::Tensor & x, const at::Tensor & alpha, const at::Tensor & total) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(alpha, cur_level) && !isBatchedAtLevel(total, cur_level)) {
|
|
return at::_ops::_dirichlet_grad::call(x, alpha, total);
|
|
}
|
|
Tensor x_value;
|
|
optional<int64_t> x_bdim;
|
|
std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
|
|
Tensor alpha_value;
|
|
optional<int64_t> alpha_bdim;
|
|
std::tie(alpha_value, alpha_bdim) = unwrapTensorAtLevel(alpha, cur_level);
|
|
Tensor total_value;
|
|
optional<int64_t> total_bdim;
|
|
std::tie(total_value, total_bdim) = unwrapTensorAtLevel(total, cur_level);
|
|
auto results = batch_rule(x_value, x_bdim, alpha_value, alpha_bdim, total_value, total_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _sample_dirichlet_generated_plumbing(const at::Tensor & self, c10::optional<at::Generator> generator) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_sample_dirichlet::call(self, generator);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, generator);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor poisson_generated_plumbing(const at::Tensor & self, c10::optional<at::Generator> generator) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::poisson::call(self, generator);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, generator);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor binomial_generated_plumbing(const at::Tensor & count, const at::Tensor & prob, c10::optional<at::Generator> generator) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(count, cur_level) && !isBatchedAtLevel(prob, cur_level)) {
|
|
return at::_ops::binomial::call(count, prob, generator);
|
|
}
|
|
Tensor count_value;
|
|
optional<int64_t> count_bdim;
|
|
std::tie(count_value, count_bdim) = unwrapTensorAtLevel(count, cur_level);
|
|
Tensor prob_value;
|
|
optional<int64_t> prob_bdim;
|
|
std::tie(prob_value, prob_bdim) = unwrapTensorAtLevel(prob, cur_level);
|
|
auto results = batch_rule(count_value, count_bdim, prob_value, prob_bdim, generator);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor native_norm_generated_plumbing(const at::Tensor & self, const at::Scalar & p) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::native_norm::call(self, p);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, p);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor native_norm_ScalarOpt_dim_dtype_generated_plumbing(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::native_norm_ScalarOpt_dim_dtype::call(self, p, dim, keepdim, dtype);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, p, dim, keepdim, dtype);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _sparse_sum_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_sparse_sum::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _sparse_sum_dtype_generated_plumbing(const at::Tensor & self, at::ScalarType dtype) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_sparse_sum_dtype::call(self, dtype);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dtype);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _sparse_sum_dim_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_sparse_sum_dim::call(self, dim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _sparse_sum_dim_dtype_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim, at::ScalarType dtype) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_sparse_sum_dim_dtype::call(self, dim, dtype);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, dtype);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _sparse_sum_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & self, at::IntArrayRef dim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_sparse_sum_backward::call(grad, self, dim);
|
|
}
|
|
Tensor grad_value;
|
|
optional<int64_t> grad_bdim;
|
|
std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(grad_value, grad_bdim, self_value, self_bdim, dim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _sparse_csr_sum_dim_dtype_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_sparse_csr_sum_dim_dtype::call(self, dim, keepdim, dtype);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, keepdim, dtype);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _sparse_csr_prod_dim_dtype_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_sparse_csr_prod_dim_dtype::call(self, dim, keepdim, dtype);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, keepdim, dtype);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _sparse_softmax_int_generated_plumbing(const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_sparse_softmax_int::call(self, dim, dtype);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, dtype);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _sparse_softmax_Dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_sparse_softmax_Dimname::call(self, dim, dtype);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, dtype);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _sparse_softmax_generated_plumbing(const at::Tensor & self, int64_t dim, bool half_to_float) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_sparse_softmax::call(self, dim, half_to_float);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, half_to_float);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _sparse_softmax_backward_data_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_sparse_softmax_backward_data::call(grad_output, output, dim, self);
|
|
}
|
|
Tensor grad_output_value;
|
|
optional<int64_t> grad_output_bdim;
|
|
std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
|
|
Tensor output_value;
|
|
optional<int64_t> output_bdim;
|
|
std::tie(output_value, output_bdim) = unwrapTensorAtLevel(output, cur_level);
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(grad_output_value, grad_output_bdim, output_value, output_bdim, dim, self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _sparse_log_softmax_int_generated_plumbing(const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_sparse_log_softmax_int::call(self, dim, dtype);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, dtype);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _sparse_log_softmax_Dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, c10::optional<at::ScalarType> dtype) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_sparse_log_softmax_Dimname::call(self, dim, dtype);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, dtype);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _sparse_log_softmax_generated_plumbing(const at::Tensor & self, int64_t dim, bool half_to_float) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_sparse_log_softmax::call(self, dim, half_to_float);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, half_to_float);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _sparse_log_softmax_backward_data_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_sparse_log_softmax_backward_data::call(grad_output, output, dim, self);
|
|
}
|
|
Tensor grad_output_value;
|
|
optional<int64_t> grad_output_bdim;
|
|
std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
|
|
Tensor output_value;
|
|
optional<int64_t> output_bdim;
|
|
std::tie(output_value, output_bdim) = unwrapTensorAtLevel(output, cur_level);
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(grad_output_value, grad_output_bdim, output_value, output_bdim, dim, self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _spdiags_generated_plumbing(const at::Tensor & diagonals, const at::Tensor & offsets, at::IntArrayRef shape, c10::optional<at::Layout> layout) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(diagonals, cur_level) && !isBatchedAtLevel(offsets, cur_level)) {
|
|
return at::_ops::_spdiags::call(diagonals, offsets, shape, layout);
|
|
}
|
|
Tensor diagonals_value;
|
|
optional<int64_t> diagonals_bdim;
|
|
std::tie(diagonals_value, diagonals_bdim) = unwrapTensorAtLevel(diagonals, cur_level);
|
|
Tensor offsets_value;
|
|
optional<int64_t> offsets_bdim;
|
|
std::tie(offsets_value, offsets_bdim) = unwrapTensorAtLevel(offsets, cur_level);
|
|
auto results = batch_rule(diagonals_value, diagonals_bdim, offsets_value, offsets_bdim, shape, layout);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor norm_ScalarOpt_dtype_generated_plumbing(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::ScalarType dtype) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::norm_ScalarOpt_dtype::call(self, p, dtype);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, p, dtype);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor norm_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & p) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::norm_Scalar::call(self, p);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, p);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor norm_ScalarOpt_dim_dtype_generated_plumbing(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim, at::ScalarType dtype) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::norm_ScalarOpt_dim_dtype::call(self, p, dim, keepdim, dtype);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, p, dim, keepdim, dtype);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor norm_ScalarOpt_dim_generated_plumbing(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::IntArrayRef dim, bool keepdim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::norm_ScalarOpt_dim::call(self, p, dim, keepdim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, p, dim, keepdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor norm_names_ScalarOpt_dim_dtype_generated_plumbing(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::DimnameList dim, bool keepdim, at::ScalarType dtype) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::norm_names_ScalarOpt_dim_dtype::call(self, p, dim, keepdim, dtype);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, p, dim, keepdim, dtype);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor norm_names_ScalarOpt_dim_generated_plumbing(const at::Tensor & self, const c10::optional<at::Scalar> & p, at::DimnameList dim, bool keepdim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::norm_names_ScalarOpt_dim::call(self, p, dim, keepdim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, p, dim, keepdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> frexp_Tensor_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::frexp_Tensor::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor frobenius_norm_dim_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim, bool keepdim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::frobenius_norm_dim::call(self, dim, keepdim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, keepdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor nuclear_norm_generated_plumbing(const at::Tensor & self, bool keepdim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::nuclear_norm::call(self, keepdim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, keepdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor nuclear_norm_dim_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim, bool keepdim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::nuclear_norm_dim::call(self, dim, keepdim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, keepdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor clone_generated_plumbing(const at::Tensor & self, c10::optional<at::MemoryFormat> memory_format) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::clone::call(self, memory_format);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, memory_format);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor positive_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::positive::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
const at::Tensor & resize_as_sparse__generated_plumbing(const at::Tensor & self, const at::Tensor & the_template) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(the_template, cur_level)) {
|
|
return at::_ops::resize_as_sparse_::call(self, the_template);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor the_template_value;
|
|
optional<int64_t> the_template_bdim;
|
|
std::tie(the_template_value, the_template_bdim) = unwrapTensorAtLevel(the_template, cur_level);
|
|
batch_rule(self_value, self_bdim, the_template_value, the_template_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & zero__generated_plumbing(at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::zero_::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor sub_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::sub_Tensor::call(self, other, alpha);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, alpha);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & sub__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::sub__Tensor::call(self, other, alpha);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
batch_rule(self_value, self_bdim, other_value, other_bdim, alpha);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor sub_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::sub_Scalar::call(self, other, alpha);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other, alpha);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & sub__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::sub__Scalar::call(self, other, alpha);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim, other, alpha);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor subtract_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::subtract_Tensor::call(self, other, alpha);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, alpha);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & subtract__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::subtract__Tensor::call(self, other, alpha);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
batch_rule(self_value, self_bdim, other_value, other_bdim, alpha);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor subtract_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::subtract_Scalar::call(self, other, alpha);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other, alpha);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & subtract__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::subtract__Scalar::call(self, other, alpha);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim, other, alpha);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor rsub_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::rsub_Tensor::call(self, other, alpha);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, alpha);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor heaviside_generated_plumbing(const at::Tensor & self, const at::Tensor & values) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(values, cur_level)) {
|
|
return at::_ops::heaviside::call(self, values);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor values_value;
|
|
optional<int64_t> values_bdim;
|
|
std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, values_value, values_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & heaviside__generated_plumbing(at::Tensor & self, const at::Tensor & values) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(values, cur_level)) {
|
|
return at::_ops::heaviside_::call(self, values);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor values_value;
|
|
optional<int64_t> values_bdim;
|
|
std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
|
|
batch_rule(self_value, self_bdim, values_value, values_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor rsub_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other, const at::Scalar & alpha) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::rsub_Scalar::call(self, other, alpha);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other, alpha);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _sparse_addmm_generated_plumbing(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mat1, cur_level) && !isBatchedAtLevel(mat2, cur_level)) {
|
|
return at::_ops::_sparse_addmm::call(self, mat1, mat2, beta, alpha);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor mat1_value;
|
|
optional<int64_t> mat1_bdim;
|
|
std::tie(mat1_value, mat1_bdim) = unwrapTensorAtLevel(mat1, cur_level);
|
|
Tensor mat2_value;
|
|
optional<int64_t> mat2_bdim;
|
|
std::tie(mat2_value, mat2_bdim) = unwrapTensorAtLevel(mat2, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, mat1_value, mat1_bdim, mat2_value, mat2_bdim, beta, alpha);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor sparse_sampled_addmm_generated_plumbing(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mat1, cur_level) && !isBatchedAtLevel(mat2, cur_level)) {
|
|
return at::_ops::sparse_sampled_addmm::call(self, mat1, mat2, beta, alpha);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor mat1_value;
|
|
optional<int64_t> mat1_bdim;
|
|
std::tie(mat1_value, mat1_bdim) = unwrapTensorAtLevel(mat1, cur_level);
|
|
Tensor mat2_value;
|
|
optional<int64_t> mat2_bdim;
|
|
std::tie(mat2_value, mat2_bdim) = unwrapTensorAtLevel(mat2, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, mat1_value, mat1_bdim, mat2_value, mat2_bdim, beta, alpha);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> _sparse_mm_reduce_impl_generated_plumbing(const at::Tensor & self, const at::Tensor & other, c10::string_view reduce) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::_sparse_mm_reduce_impl::call(self, other, reduce);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, reduce);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> _sparse_mm_reduce_impl_backward_generated_plumbing(const at::Tensor & self, const at::Tensor & grad_out, const at::Tensor & weight, c10::string_view reduce, const at::Tensor & arg_out, ::std::array<bool,2> output_mask) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grad_out, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(arg_out, cur_level)) {
|
|
return at::_ops::_sparse_mm_reduce_impl_backward::call(self, grad_out, weight, reduce, arg_out, output_mask);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor grad_out_value;
|
|
optional<int64_t> grad_out_bdim;
|
|
std::tie(grad_out_value, grad_out_bdim) = unwrapTensorAtLevel(grad_out, cur_level);
|
|
Tensor weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
|
|
Tensor arg_out_value;
|
|
optional<int64_t> arg_out_bdim;
|
|
std::tie(arg_out_value, arg_out_bdim) = unwrapTensorAtLevel(arg_out, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, grad_out_value, grad_out_bdim, weight_value, weight_bdim, reduce, arg_out_value, arg_out_bdim, output_mask);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor addmm_generated_plumbing(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mat1, cur_level) && !isBatchedAtLevel(mat2, cur_level)) {
|
|
return at::_ops::addmm::call(self, mat1, mat2, beta, alpha);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor mat1_value;
|
|
optional<int64_t> mat1_bdim;
|
|
std::tie(mat1_value, mat1_bdim) = unwrapTensorAtLevel(mat1, cur_level);
|
|
Tensor mat2_value;
|
|
optional<int64_t> mat2_bdim;
|
|
std::tie(mat2_value, mat2_bdim) = unwrapTensorAtLevel(mat2, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, mat1_value, mat1_bdim, mat2_value, mat2_bdim, beta, alpha);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & addmm__generated_plumbing(at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mat1, cur_level) && !isBatchedAtLevel(mat2, cur_level)) {
|
|
return at::_ops::addmm_::call(self, mat1, mat2, beta, alpha);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor mat1_value;
|
|
optional<int64_t> mat1_bdim;
|
|
std::tie(mat1_value, mat1_bdim) = unwrapTensorAtLevel(mat1, cur_level);
|
|
Tensor mat2_value;
|
|
optional<int64_t> mat2_bdim;
|
|
std::tie(mat2_value, mat2_bdim) = unwrapTensorAtLevel(mat2, cur_level);
|
|
batch_rule(self_value, self_bdim, mat1_value, mat1_bdim, mat2_value, mat2_bdim, beta, alpha);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _addmm_activation_generated_plumbing(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, bool use_gelu) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mat1, cur_level) && !isBatchedAtLevel(mat2, cur_level)) {
|
|
return at::_ops::_addmm_activation::call(self, mat1, mat2, beta, alpha, use_gelu);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor mat1_value;
|
|
optional<int64_t> mat1_bdim;
|
|
std::tie(mat1_value, mat1_bdim) = unwrapTensorAtLevel(mat1, cur_level);
|
|
Tensor mat2_value;
|
|
optional<int64_t> mat2_bdim;
|
|
std::tie(mat2_value, mat2_bdim) = unwrapTensorAtLevel(mat2, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, mat1_value, mat1_bdim, mat2_value, mat2_bdim, beta, alpha, use_gelu);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> _scaled_mm_generated_plumbing(const at::Tensor & self, const at::Tensor & mat2, const c10::optional<at::Tensor> & bias, c10::optional<at::ScalarType> out_dtype, const c10::optional<at::Tensor> & scale_a, const c10::optional<at::Tensor> & scale_b, const c10::optional<at::Tensor> & scale_result, bool use_fast_accum) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mat2, cur_level) && !isBatchedAtLevel(bias, cur_level) && !isBatchedAtLevel(scale_a, cur_level) && !isBatchedAtLevel(scale_b, cur_level) && !isBatchedAtLevel(scale_result, cur_level)) {
|
|
return at::_ops::_scaled_mm::call(self, mat2, bias, out_dtype, scale_a, scale_b, scale_result, use_fast_accum);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor mat2_value;
|
|
optional<int64_t> mat2_bdim;
|
|
std::tie(mat2_value, mat2_bdim) = unwrapTensorAtLevel(mat2, cur_level);
|
|
optional<Tensor> bias_value;
|
|
optional<int64_t> bias_bdim;
|
|
if (bias) {
|
|
std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
|
|
}
|
|
optional<Tensor> scale_a_value;
|
|
optional<int64_t> scale_a_bdim;
|
|
if (scale_a) {
|
|
std::tie(scale_a_value, scale_a_bdim) = unwrapTensorAtLevel(scale_a.value(), cur_level);
|
|
}
|
|
optional<Tensor> scale_b_value;
|
|
optional<int64_t> scale_b_bdim;
|
|
if (scale_b) {
|
|
std::tie(scale_b_value, scale_b_bdim) = unwrapTensorAtLevel(scale_b.value(), cur_level);
|
|
}
|
|
optional<Tensor> scale_result_value;
|
|
optional<int64_t> scale_result_bdim;
|
|
if (scale_result) {
|
|
std::tie(scale_result_value, scale_result_bdim) = unwrapTensorAtLevel(scale_result.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(self_value, self_bdim, mat2_value, mat2_bdim, bias_value, bias_bdim, out_dtype, scale_a_value, scale_a_bdim, scale_b_value, scale_b_bdim, scale_result_value, scale_result_bdim, use_fast_accum);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor sparse_compressed_tensor_comp_plain_value_size_generated_plumbing(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(compressed_indices, cur_level) && !isBatchedAtLevel(plain_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
|
|
return at::_ops::sparse_compressed_tensor_comp_plain_value_size::call(compressed_indices, plain_indices, values, size, dtype, layout, device, pin_memory);
|
|
}
|
|
Tensor compressed_indices_value;
|
|
optional<int64_t> compressed_indices_bdim;
|
|
std::tie(compressed_indices_value, compressed_indices_bdim) = unwrapTensorAtLevel(compressed_indices, cur_level);
|
|
Tensor plain_indices_value;
|
|
optional<int64_t> plain_indices_bdim;
|
|
std::tie(plain_indices_value, plain_indices_bdim) = unwrapTensorAtLevel(plain_indices, cur_level);
|
|
Tensor values_value;
|
|
optional<int64_t> values_bdim;
|
|
std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
|
|
auto results = batch_rule(compressed_indices_value, compressed_indices_bdim, plain_indices_value, plain_indices_bdim, values_value, values_bdim, size, dtype, layout, device, pin_memory);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor sparse_csr_tensor_crow_col_value_size_generated_plumbing(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(crow_indices, cur_level) && !isBatchedAtLevel(col_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
|
|
return at::_ops::sparse_csr_tensor_crow_col_value_size::call(crow_indices, col_indices, values, size, dtype, layout, device, pin_memory);
|
|
}
|
|
Tensor crow_indices_value;
|
|
optional<int64_t> crow_indices_bdim;
|
|
std::tie(crow_indices_value, crow_indices_bdim) = unwrapTensorAtLevel(crow_indices, cur_level);
|
|
Tensor col_indices_value;
|
|
optional<int64_t> col_indices_bdim;
|
|
std::tie(col_indices_value, col_indices_bdim) = unwrapTensorAtLevel(col_indices, cur_level);
|
|
Tensor values_value;
|
|
optional<int64_t> values_bdim;
|
|
std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
|
|
auto results = batch_rule(crow_indices_value, crow_indices_bdim, col_indices_value, col_indices_bdim, values_value, values_bdim, size, dtype, layout, device, pin_memory);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor sparse_csc_tensor_ccol_row_value_size_generated_plumbing(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(ccol_indices, cur_level) && !isBatchedAtLevel(row_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
|
|
return at::_ops::sparse_csc_tensor_ccol_row_value_size::call(ccol_indices, row_indices, values, size, dtype, layout, device, pin_memory);
|
|
}
|
|
Tensor ccol_indices_value;
|
|
optional<int64_t> ccol_indices_bdim;
|
|
std::tie(ccol_indices_value, ccol_indices_bdim) = unwrapTensorAtLevel(ccol_indices, cur_level);
|
|
Tensor row_indices_value;
|
|
optional<int64_t> row_indices_bdim;
|
|
std::tie(row_indices_value, row_indices_bdim) = unwrapTensorAtLevel(row_indices, cur_level);
|
|
Tensor values_value;
|
|
optional<int64_t> values_bdim;
|
|
std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
|
|
auto results = batch_rule(ccol_indices_value, ccol_indices_bdim, row_indices_value, row_indices_bdim, values_value, values_bdim, size, dtype, layout, device, pin_memory);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor sparse_bsr_tensor_crow_col_value_size_generated_plumbing(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(crow_indices, cur_level) && !isBatchedAtLevel(col_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
|
|
return at::_ops::sparse_bsr_tensor_crow_col_value_size::call(crow_indices, col_indices, values, size, dtype, layout, device, pin_memory);
|
|
}
|
|
Tensor crow_indices_value;
|
|
optional<int64_t> crow_indices_bdim;
|
|
std::tie(crow_indices_value, crow_indices_bdim) = unwrapTensorAtLevel(crow_indices, cur_level);
|
|
Tensor col_indices_value;
|
|
optional<int64_t> col_indices_bdim;
|
|
std::tie(col_indices_value, col_indices_bdim) = unwrapTensorAtLevel(col_indices, cur_level);
|
|
Tensor values_value;
|
|
optional<int64_t> values_bdim;
|
|
std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
|
|
auto results = batch_rule(crow_indices_value, crow_indices_bdim, col_indices_value, col_indices_bdim, values_value, values_bdim, size, dtype, layout, device, pin_memory);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor sparse_bsc_tensor_ccol_row_value_size_generated_plumbing(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(ccol_indices, cur_level) && !isBatchedAtLevel(row_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
|
|
return at::_ops::sparse_bsc_tensor_ccol_row_value_size::call(ccol_indices, row_indices, values, size, dtype, layout, device, pin_memory);
|
|
}
|
|
Tensor ccol_indices_value;
|
|
optional<int64_t> ccol_indices_bdim;
|
|
std::tie(ccol_indices_value, ccol_indices_bdim) = unwrapTensorAtLevel(ccol_indices, cur_level);
|
|
Tensor row_indices_value;
|
|
optional<int64_t> row_indices_bdim;
|
|
std::tie(row_indices_value, row_indices_bdim) = unwrapTensorAtLevel(row_indices, cur_level);
|
|
Tensor values_value;
|
|
optional<int64_t> values_bdim;
|
|
std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
|
|
auto results = batch_rule(ccol_indices_value, ccol_indices_bdim, row_indices_value, row_indices_bdim, values_value, values_bdim, size, dtype, layout, device, pin_memory);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor sparse_compressed_tensor_comp_plain_value_generated_plumbing(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(compressed_indices, cur_level) && !isBatchedAtLevel(plain_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
|
|
return at::_ops::sparse_compressed_tensor_comp_plain_value::call(compressed_indices, plain_indices, values, dtype, layout, device, pin_memory);
|
|
}
|
|
Tensor compressed_indices_value;
|
|
optional<int64_t> compressed_indices_bdim;
|
|
std::tie(compressed_indices_value, compressed_indices_bdim) = unwrapTensorAtLevel(compressed_indices, cur_level);
|
|
Tensor plain_indices_value;
|
|
optional<int64_t> plain_indices_bdim;
|
|
std::tie(plain_indices_value, plain_indices_bdim) = unwrapTensorAtLevel(plain_indices, cur_level);
|
|
Tensor values_value;
|
|
optional<int64_t> values_bdim;
|
|
std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
|
|
auto results = batch_rule(compressed_indices_value, compressed_indices_bdim, plain_indices_value, plain_indices_bdim, values_value, values_bdim, dtype, layout, device, pin_memory);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor sparse_csr_tensor_crow_col_value_generated_plumbing(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(crow_indices, cur_level) && !isBatchedAtLevel(col_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
|
|
return at::_ops::sparse_csr_tensor_crow_col_value::call(crow_indices, col_indices, values, dtype, layout, device, pin_memory);
|
|
}
|
|
Tensor crow_indices_value;
|
|
optional<int64_t> crow_indices_bdim;
|
|
std::tie(crow_indices_value, crow_indices_bdim) = unwrapTensorAtLevel(crow_indices, cur_level);
|
|
Tensor col_indices_value;
|
|
optional<int64_t> col_indices_bdim;
|
|
std::tie(col_indices_value, col_indices_bdim) = unwrapTensorAtLevel(col_indices, cur_level);
|
|
Tensor values_value;
|
|
optional<int64_t> values_bdim;
|
|
std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
|
|
auto results = batch_rule(crow_indices_value, crow_indices_bdim, col_indices_value, col_indices_bdim, values_value, values_bdim, dtype, layout, device, pin_memory);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor sparse_csc_tensor_ccol_row_value_generated_plumbing(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(ccol_indices, cur_level) && !isBatchedAtLevel(row_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
|
|
return at::_ops::sparse_csc_tensor_ccol_row_value::call(ccol_indices, row_indices, values, dtype, layout, device, pin_memory);
|
|
}
|
|
Tensor ccol_indices_value;
|
|
optional<int64_t> ccol_indices_bdim;
|
|
std::tie(ccol_indices_value, ccol_indices_bdim) = unwrapTensorAtLevel(ccol_indices, cur_level);
|
|
Tensor row_indices_value;
|
|
optional<int64_t> row_indices_bdim;
|
|
std::tie(row_indices_value, row_indices_bdim) = unwrapTensorAtLevel(row_indices, cur_level);
|
|
Tensor values_value;
|
|
optional<int64_t> values_bdim;
|
|
std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
|
|
auto results = batch_rule(ccol_indices_value, ccol_indices_bdim, row_indices_value, row_indices_bdim, values_value, values_bdim, dtype, layout, device, pin_memory);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor sparse_bsr_tensor_crow_col_value_generated_plumbing(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(crow_indices, cur_level) && !isBatchedAtLevel(col_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
|
|
return at::_ops::sparse_bsr_tensor_crow_col_value::call(crow_indices, col_indices, values, dtype, layout, device, pin_memory);
|
|
}
|
|
Tensor crow_indices_value;
|
|
optional<int64_t> crow_indices_bdim;
|
|
std::tie(crow_indices_value, crow_indices_bdim) = unwrapTensorAtLevel(crow_indices, cur_level);
|
|
Tensor col_indices_value;
|
|
optional<int64_t> col_indices_bdim;
|
|
std::tie(col_indices_value, col_indices_bdim) = unwrapTensorAtLevel(col_indices, cur_level);
|
|
Tensor values_value;
|
|
optional<int64_t> values_bdim;
|
|
std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
|
|
auto results = batch_rule(crow_indices_value, crow_indices_bdim, col_indices_value, col_indices_bdim, values_value, values_bdim, dtype, layout, device, pin_memory);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor sparse_bsc_tensor_ccol_row_value_generated_plumbing(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(ccol_indices, cur_level) && !isBatchedAtLevel(row_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
|
|
return at::_ops::sparse_bsc_tensor_ccol_row_value::call(ccol_indices, row_indices, values, dtype, layout, device, pin_memory);
|
|
}
|
|
Tensor ccol_indices_value;
|
|
optional<int64_t> ccol_indices_bdim;
|
|
std::tie(ccol_indices_value, ccol_indices_bdim) = unwrapTensorAtLevel(ccol_indices, cur_level);
|
|
Tensor row_indices_value;
|
|
optional<int64_t> row_indices_bdim;
|
|
std::tie(row_indices_value, row_indices_bdim) = unwrapTensorAtLevel(row_indices, cur_level);
|
|
Tensor values_value;
|
|
optional<int64_t> values_bdim;
|
|
std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
|
|
auto results = batch_rule(ccol_indices_value, ccol_indices_bdim, row_indices_value, row_indices_bdim, values_value, values_bdim, dtype, layout, device, pin_memory);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _sparse_compressed_tensor_unsafe_generated_plumbing(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(compressed_indices, cur_level) && !isBatchedAtLevel(plain_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
|
|
return at::_ops::_sparse_compressed_tensor_unsafe::call(compressed_indices, plain_indices, values, size, dtype, layout, device, pin_memory);
|
|
}
|
|
Tensor compressed_indices_value;
|
|
optional<int64_t> compressed_indices_bdim;
|
|
std::tie(compressed_indices_value, compressed_indices_bdim) = unwrapTensorAtLevel(compressed_indices, cur_level);
|
|
Tensor plain_indices_value;
|
|
optional<int64_t> plain_indices_bdim;
|
|
std::tie(plain_indices_value, plain_indices_bdim) = unwrapTensorAtLevel(plain_indices, cur_level);
|
|
Tensor values_value;
|
|
optional<int64_t> values_bdim;
|
|
std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
|
|
auto results = batch_rule(compressed_indices_value, compressed_indices_bdim, plain_indices_value, plain_indices_bdim, values_value, values_bdim, size, dtype, layout, device, pin_memory);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _sparse_csr_tensor_unsafe_generated_plumbing(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(crow_indices, cur_level) && !isBatchedAtLevel(col_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
|
|
return at::_ops::_sparse_csr_tensor_unsafe::call(crow_indices, col_indices, values, size, dtype, layout, device, pin_memory);
|
|
}
|
|
Tensor crow_indices_value;
|
|
optional<int64_t> crow_indices_bdim;
|
|
std::tie(crow_indices_value, crow_indices_bdim) = unwrapTensorAtLevel(crow_indices, cur_level);
|
|
Tensor col_indices_value;
|
|
optional<int64_t> col_indices_bdim;
|
|
std::tie(col_indices_value, col_indices_bdim) = unwrapTensorAtLevel(col_indices, cur_level);
|
|
Tensor values_value;
|
|
optional<int64_t> values_bdim;
|
|
std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
|
|
auto results = batch_rule(crow_indices_value, crow_indices_bdim, col_indices_value, col_indices_bdim, values_value, values_bdim, size, dtype, layout, device, pin_memory);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _sparse_csc_tensor_unsafe_generated_plumbing(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(ccol_indices, cur_level) && !isBatchedAtLevel(row_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
|
|
return at::_ops::_sparse_csc_tensor_unsafe::call(ccol_indices, row_indices, values, size, dtype, layout, device, pin_memory);
|
|
}
|
|
Tensor ccol_indices_value;
|
|
optional<int64_t> ccol_indices_bdim;
|
|
std::tie(ccol_indices_value, ccol_indices_bdim) = unwrapTensorAtLevel(ccol_indices, cur_level);
|
|
Tensor row_indices_value;
|
|
optional<int64_t> row_indices_bdim;
|
|
std::tie(row_indices_value, row_indices_bdim) = unwrapTensorAtLevel(row_indices, cur_level);
|
|
Tensor values_value;
|
|
optional<int64_t> values_bdim;
|
|
std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
|
|
auto results = batch_rule(ccol_indices_value, ccol_indices_bdim, row_indices_value, row_indices_bdim, values_value, values_bdim, size, dtype, layout, device, pin_memory);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _sparse_bsr_tensor_unsafe_generated_plumbing(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(crow_indices, cur_level) && !isBatchedAtLevel(col_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
|
|
return at::_ops::_sparse_bsr_tensor_unsafe::call(crow_indices, col_indices, values, size, dtype, layout, device, pin_memory);
|
|
}
|
|
Tensor crow_indices_value;
|
|
optional<int64_t> crow_indices_bdim;
|
|
std::tie(crow_indices_value, crow_indices_bdim) = unwrapTensorAtLevel(crow_indices, cur_level);
|
|
Tensor col_indices_value;
|
|
optional<int64_t> col_indices_bdim;
|
|
std::tie(col_indices_value, col_indices_bdim) = unwrapTensorAtLevel(col_indices, cur_level);
|
|
Tensor values_value;
|
|
optional<int64_t> values_bdim;
|
|
std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
|
|
auto results = batch_rule(crow_indices_value, crow_indices_bdim, col_indices_value, col_indices_bdim, values_value, values_bdim, size, dtype, layout, device, pin_memory);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _sparse_bsc_tensor_unsafe_generated_plumbing(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(ccol_indices, cur_level) && !isBatchedAtLevel(row_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
|
|
return at::_ops::_sparse_bsc_tensor_unsafe::call(ccol_indices, row_indices, values, size, dtype, layout, device, pin_memory);
|
|
}
|
|
Tensor ccol_indices_value;
|
|
optional<int64_t> ccol_indices_bdim;
|
|
std::tie(ccol_indices_value, ccol_indices_bdim) = unwrapTensorAtLevel(ccol_indices, cur_level);
|
|
Tensor row_indices_value;
|
|
optional<int64_t> row_indices_bdim;
|
|
std::tie(row_indices_value, row_indices_bdim) = unwrapTensorAtLevel(row_indices, cur_level);
|
|
Tensor values_value;
|
|
optional<int64_t> values_bdim;
|
|
std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
|
|
auto results = batch_rule(ccol_indices_value, ccol_indices_bdim, row_indices_value, row_indices_bdim, values_value, values_bdim, size, dtype, layout, device, pin_memory);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor sparse_coo_tensor_indices_generated_plumbing(const at::Tensor & indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<bool> is_coalesced) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
|
|
return at::_ops::sparse_coo_tensor_indices::call(indices, values, dtype, layout, device, pin_memory, is_coalesced);
|
|
}
|
|
Tensor indices_value;
|
|
optional<int64_t> indices_bdim;
|
|
std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
|
|
Tensor values_value;
|
|
optional<int64_t> values_bdim;
|
|
std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
|
|
auto results = batch_rule(indices_value, indices_bdim, values_value, values_bdim, dtype, layout, device, pin_memory, is_coalesced);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor sparse_coo_tensor_indices_size_generated_plumbing(const at::Tensor & indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<bool> is_coalesced) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
|
|
return at::_ops::sparse_coo_tensor_indices_size::call(indices, values, size, dtype, layout, device, pin_memory, is_coalesced);
|
|
}
|
|
Tensor indices_value;
|
|
optional<int64_t> indices_bdim;
|
|
std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
|
|
Tensor values_value;
|
|
optional<int64_t> values_bdim;
|
|
std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
|
|
auto results = batch_rule(indices_value, indices_bdim, values_value, values_bdim, size, dtype, layout, device, pin_memory, is_coalesced);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _sparse_coo_tensor_unsafe_generated_plumbing(const at::Tensor & indices, const at::Tensor & values, c10::SymIntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<bool> is_coalesced) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
|
|
return at::_ops::_sparse_coo_tensor_unsafe::call(indices, values, size, dtype, layout, device, pin_memory, is_coalesced);
|
|
}
|
|
Tensor indices_value;
|
|
optional<int64_t> indices_bdim;
|
|
std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
|
|
Tensor values_value;
|
|
optional<int64_t> values_bdim;
|
|
std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
|
|
auto results = batch_rule(indices_value, indices_bdim, values_value, values_bdim, size, dtype, layout, device, pin_memory, is_coalesced);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _validate_sparse_coo_tensor_args_generated_plumbing(const at::Tensor & indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<bool> is_coalesced) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
|
|
return at::_ops::_validate_sparse_coo_tensor_args::call(indices, values, size, is_coalesced);
|
|
}
|
|
Tensor indices_value;
|
|
optional<int64_t> indices_bdim;
|
|
std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
|
|
Tensor values_value;
|
|
optional<int64_t> values_bdim;
|
|
std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
|
|
batch_rule(indices_value, indices_bdim, values_value, values_bdim, size, is_coalesced);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _validate_sparse_compressed_tensor_args_generated_plumbing(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::IntArrayRef size, at::Layout layout) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(compressed_indices, cur_level) && !isBatchedAtLevel(plain_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
|
|
return at::_ops::_validate_sparse_compressed_tensor_args::call(compressed_indices, plain_indices, values, size, layout);
|
|
}
|
|
Tensor compressed_indices_value;
|
|
optional<int64_t> compressed_indices_bdim;
|
|
std::tie(compressed_indices_value, compressed_indices_bdim) = unwrapTensorAtLevel(compressed_indices, cur_level);
|
|
Tensor plain_indices_value;
|
|
optional<int64_t> plain_indices_bdim;
|
|
std::tie(plain_indices_value, plain_indices_bdim) = unwrapTensorAtLevel(plain_indices, cur_level);
|
|
Tensor values_value;
|
|
optional<int64_t> values_bdim;
|
|
std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
|
|
batch_rule(compressed_indices_value, compressed_indices_bdim, plain_indices_value, plain_indices_bdim, values_value, values_bdim, size, layout);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _validate_sparse_csr_tensor_args_generated_plumbing(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(crow_indices, cur_level) && !isBatchedAtLevel(col_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
|
|
return at::_ops::_validate_sparse_csr_tensor_args::call(crow_indices, col_indices, values, size);
|
|
}
|
|
Tensor crow_indices_value;
|
|
optional<int64_t> crow_indices_bdim;
|
|
std::tie(crow_indices_value, crow_indices_bdim) = unwrapTensorAtLevel(crow_indices, cur_level);
|
|
Tensor col_indices_value;
|
|
optional<int64_t> col_indices_bdim;
|
|
std::tie(col_indices_value, col_indices_bdim) = unwrapTensorAtLevel(col_indices, cur_level);
|
|
Tensor values_value;
|
|
optional<int64_t> values_bdim;
|
|
std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
|
|
batch_rule(crow_indices_value, crow_indices_bdim, col_indices_value, col_indices_bdim, values_value, values_bdim, size);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _validate_sparse_csc_tensor_args_generated_plumbing(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(ccol_indices, cur_level) && !isBatchedAtLevel(row_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
|
|
return at::_ops::_validate_sparse_csc_tensor_args::call(ccol_indices, row_indices, values, size);
|
|
}
|
|
Tensor ccol_indices_value;
|
|
optional<int64_t> ccol_indices_bdim;
|
|
std::tie(ccol_indices_value, ccol_indices_bdim) = unwrapTensorAtLevel(ccol_indices, cur_level);
|
|
Tensor row_indices_value;
|
|
optional<int64_t> row_indices_bdim;
|
|
std::tie(row_indices_value, row_indices_bdim) = unwrapTensorAtLevel(row_indices, cur_level);
|
|
Tensor values_value;
|
|
optional<int64_t> values_bdim;
|
|
std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
|
|
batch_rule(ccol_indices_value, ccol_indices_bdim, row_indices_value, row_indices_bdim, values_value, values_bdim, size);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _validate_sparse_bsr_tensor_args_generated_plumbing(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(crow_indices, cur_level) && !isBatchedAtLevel(col_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
|
|
return at::_ops::_validate_sparse_bsr_tensor_args::call(crow_indices, col_indices, values, size);
|
|
}
|
|
Tensor crow_indices_value;
|
|
optional<int64_t> crow_indices_bdim;
|
|
std::tie(crow_indices_value, crow_indices_bdim) = unwrapTensorAtLevel(crow_indices, cur_level);
|
|
Tensor col_indices_value;
|
|
optional<int64_t> col_indices_bdim;
|
|
std::tie(col_indices_value, col_indices_bdim) = unwrapTensorAtLevel(col_indices, cur_level);
|
|
Tensor values_value;
|
|
optional<int64_t> values_bdim;
|
|
std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
|
|
batch_rule(crow_indices_value, crow_indices_bdim, col_indices_value, col_indices_bdim, values_value, values_bdim, size);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _validate_sparse_bsc_tensor_args_generated_plumbing(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(ccol_indices, cur_level) && !isBatchedAtLevel(row_indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
|
|
return at::_ops::_validate_sparse_bsc_tensor_args::call(ccol_indices, row_indices, values, size);
|
|
}
|
|
Tensor ccol_indices_value;
|
|
optional<int64_t> ccol_indices_bdim;
|
|
std::tie(ccol_indices_value, ccol_indices_bdim) = unwrapTensorAtLevel(ccol_indices, cur_level);
|
|
Tensor row_indices_value;
|
|
optional<int64_t> row_indices_bdim;
|
|
std::tie(row_indices_value, row_indices_bdim) = unwrapTensorAtLevel(row_indices, cur_level);
|
|
Tensor values_value;
|
|
optional<int64_t> values_bdim;
|
|
std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
|
|
batch_rule(ccol_indices_value, ccol_indices_bdim, row_indices_value, row_indices_bdim, values_value, values_bdim, size);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _sparse_coo_tensor_with_dims_and_tensors_generated_plumbing(int64_t sparse_dim, int64_t dense_dim, c10::SymIntArrayRef size, const at::Tensor & indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<bool> is_coalesced) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
|
|
return at::_ops::_sparse_coo_tensor_with_dims_and_tensors::call(sparse_dim, dense_dim, size, indices, values, dtype, layout, device, pin_memory, is_coalesced);
|
|
}
|
|
Tensor indices_value;
|
|
optional<int64_t> indices_bdim;
|
|
std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
|
|
Tensor values_value;
|
|
optional<int64_t> values_bdim;
|
|
std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
|
|
auto results = batch_rule(sparse_dim, dense_dim, size, indices_value, indices_bdim, values_value, values_bdim, dtype, layout, device, pin_memory, is_coalesced);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
const at::Tensor & sparse_resize__generated_plumbing(const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::sparse_resize_::call(self, size, sparse_dim, dense_dim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim, size, sparse_dim, dense_dim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
const at::Tensor & sparse_resize_and_clear__generated_plumbing(const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::sparse_resize_and_clear_::call(self, size, sparse_dim, dense_dim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim, size, sparse_dim, dense_dim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor sparse_mask_generated_plumbing(const at::Tensor & self, const at::Tensor & mask) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mask, cur_level)) {
|
|
return at::_ops::sparse_mask::call(self, mask);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor mask_value;
|
|
optional<int64_t> mask_bdim;
|
|
std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, mask_value, mask_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _sparse_mask_projection_generated_plumbing(const at::Tensor & self, const at::Tensor & mask, bool accumulate_matches) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mask, cur_level)) {
|
|
return at::_ops::_sparse_mask_projection::call(self, mask, accumulate_matches);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor mask_value;
|
|
optional<int64_t> mask_bdim;
|
|
std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, mask_value, mask_bdim, accumulate_matches);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> _to_cpu_generated_plumbing(at::TensorList tensors) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(tensors, cur_level)) {
|
|
return at::_ops::_to_cpu::call(tensors);
|
|
}
|
|
|
|
auto results = batch_rule(tensors);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor to_dense_generated_plumbing(const at::Tensor & self, c10::optional<at::ScalarType> dtype, c10::optional<bool> masked_grad) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::to_dense::call(self, dtype, masked_grad);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dtype, masked_grad);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _to_dense_generated_plumbing(const at::Tensor & self, c10::optional<at::ScalarType> dtype, c10::optional<bool> masked_grad) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_to_dense::call(self, dtype, masked_grad);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dtype, masked_grad);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor to_dense_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & input, c10::optional<bool> masked_grad) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(input, cur_level)) {
|
|
return at::_ops::to_dense_backward::call(grad, input, masked_grad);
|
|
}
|
|
Tensor grad_value;
|
|
optional<int64_t> grad_bdim;
|
|
std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
auto results = batch_rule(grad_value, grad_bdim, input_value, input_bdim, masked_grad);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor coalesce_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::coalesce::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _coalesce_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_coalesce::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _indices_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_indices::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _values_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_values::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & _coalesced__generated_plumbing(at::Tensor & self, bool coalesced) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_coalesced_::call(self, coalesced);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim, coalesced);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor indices_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::indices::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor values_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::values::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor crow_indices_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::crow_indices::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor col_indices_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::col_indices::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor ccol_indices_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::ccol_indices::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor row_indices_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::row_indices::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor hspmm_generated_plumbing(const at::Tensor & mat1, const at::Tensor & mat2) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(mat1, cur_level) && !isBatchedAtLevel(mat2, cur_level)) {
|
|
return at::_ops::hspmm::call(mat1, mat2);
|
|
}
|
|
Tensor mat1_value;
|
|
optional<int64_t> mat1_bdim;
|
|
std::tie(mat1_value, mat1_bdim) = unwrapTensorAtLevel(mat1, cur_level);
|
|
Tensor mat2_value;
|
|
optional<int64_t> mat2_bdim;
|
|
std::tie(mat2_value, mat2_bdim) = unwrapTensorAtLevel(mat2, cur_level);
|
|
auto results = batch_rule(mat1_value, mat1_bdim, mat2_value, mat2_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & copy_sparse_to_sparse__generated_plumbing(at::Tensor & self, const at::Tensor & src, bool non_blocking) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(src, cur_level)) {
|
|
return at::_ops::copy_sparse_to_sparse_::call(self, src, non_blocking);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor src_value;
|
|
optional<int64_t> src_bdim;
|
|
std::tie(src_value, src_bdim) = unwrapTensorAtLevel(src, cur_level);
|
|
batch_rule(self_value, self_bdim, src_value, src_bdim, non_blocking);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> unbind_int_generated_plumbing(const at::Tensor & self, int64_t dim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::unbind_int::call(self, dim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> unbind_Dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::unbind_Dimname::call(self, dim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor to_sparse_sparse_dim_generated_plumbing(const at::Tensor & self, int64_t sparse_dim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::to_sparse_sparse_dim::call(self, sparse_dim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, sparse_dim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _to_sparse_sparse_dim_generated_plumbing(const at::Tensor & self, int64_t sparse_dim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_to_sparse_sparse_dim::call(self, sparse_dim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, sparse_dim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor to_sparse_generated_plumbing(const at::Tensor & self, c10::optional<at::Layout> layout, at::OptionalIntArrayRef blocksize, c10::optional<int64_t> dense_dim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::to_sparse::call(self, layout, blocksize, dense_dim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, layout, blocksize, dense_dim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _to_sparse_generated_plumbing(const at::Tensor & self, c10::optional<at::Layout> layout, at::OptionalIntArrayRef blocksize, c10::optional<int64_t> dense_dim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_to_sparse::call(self, layout, blocksize, dense_dim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, layout, blocksize, dense_dim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor to_sparse_csr_generated_plumbing(const at::Tensor & self, c10::optional<int64_t> dense_dim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::to_sparse_csr::call(self, dense_dim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dense_dim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _to_sparse_csr_generated_plumbing(const at::Tensor & self, c10::optional<int64_t> dense_dim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_to_sparse_csr::call(self, dense_dim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dense_dim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor to_sparse_csc_generated_plumbing(const at::Tensor & self, c10::optional<int64_t> dense_dim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::to_sparse_csc::call(self, dense_dim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dense_dim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _to_sparse_csc_generated_plumbing(const at::Tensor & self, c10::optional<int64_t> dense_dim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_to_sparse_csc::call(self, dense_dim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dense_dim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor to_sparse_bsr_generated_plumbing(const at::Tensor & self, at::IntArrayRef blocksize, c10::optional<int64_t> dense_dim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::to_sparse_bsr::call(self, blocksize, dense_dim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, blocksize, dense_dim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _to_sparse_bsr_generated_plumbing(const at::Tensor & self, at::IntArrayRef blocksize, c10::optional<int64_t> dense_dim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_to_sparse_bsr::call(self, blocksize, dense_dim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, blocksize, dense_dim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor to_sparse_bsc_generated_plumbing(const at::Tensor & self, at::IntArrayRef blocksize, c10::optional<int64_t> dense_dim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::to_sparse_bsc::call(self, blocksize, dense_dim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, blocksize, dense_dim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _to_sparse_bsc_generated_plumbing(const at::Tensor & self, at::IntArrayRef blocksize, c10::optional<int64_t> dense_dim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_to_sparse_bsc::call(self, blocksize, dense_dim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, blocksize, dense_dim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> _to_sparse_semi_structured_generated_plumbing(const at::Tensor & dense) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(dense, cur_level)) {
|
|
return at::_ops::_to_sparse_semi_structured::call(dense);
|
|
}
|
|
Tensor dense_value;
|
|
optional<int64_t> dense_bdim;
|
|
std::tie(dense_value, dense_bdim) = unwrapTensorAtLevel(dense, cur_level);
|
|
auto results = batch_rule(dense_value, dense_bdim);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor to_mkldnn_generated_plumbing(const at::Tensor & self, c10::optional<at::ScalarType> dtype) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::to_mkldnn::call(self, dtype);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dtype);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor mkldnn_reorder_conv2d_weight_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, at::OptionalSymIntArrayRef input_size) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::mkldnn_reorder_conv2d_weight::call(self, padding, stride, dilation, groups, input_size);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, padding, stride, dilation, groups, input_size);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor mkldnn_reorder_conv3d_weight_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::mkldnn_reorder_conv3d_weight::call(self, padding, stride, dilation, groups);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, padding, stride, dilation, groups);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor to_mkldnn_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & input) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(input, cur_level)) {
|
|
return at::_ops::to_mkldnn_backward::call(grad, input);
|
|
}
|
|
Tensor grad_value;
|
|
optional<int64_t> grad_bdim;
|
|
std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
auto results = batch_rule(grad_value, grad_bdim, input_value, input_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor quantize_per_tensor_dynamic_generated_plumbing(const at::Tensor & self, at::ScalarType dtype, bool reduce_range) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::quantize_per_tensor_dynamic::call(self, dtype, reduce_range);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dtype, reduce_range);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor quantize_per_tensor_generated_plumbing(const at::Tensor & self, double scale, int64_t zero_point, at::ScalarType dtype) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::quantize_per_tensor::call(self, scale, zero_point, dtype);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, scale, zero_point, dtype);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor quantize_per_tensor_tensor_qparams_generated_plumbing(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, at::ScalarType dtype) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(scale, cur_level) && !isBatchedAtLevel(zero_point, cur_level)) {
|
|
return at::_ops::quantize_per_tensor_tensor_qparams::call(self, scale, zero_point, dtype);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor scale_value;
|
|
optional<int64_t> scale_bdim;
|
|
std::tie(scale_value, scale_bdim) = unwrapTensorAtLevel(scale, cur_level);
|
|
Tensor zero_point_value;
|
|
optional<int64_t> zero_point_bdim;
|
|
std::tie(zero_point_value, zero_point_bdim) = unwrapTensorAtLevel(zero_point, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, scale_value, scale_bdim, zero_point_value, zero_point_bdim, dtype);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> quantize_per_tensor_tensors_generated_plumbing(at::TensorList tensors, const at::Tensor & scales, const at::Tensor & zero_points, at::ScalarType dtype) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(tensors, cur_level) && !isBatchedAtLevel(scales, cur_level) && !isBatchedAtLevel(zero_points, cur_level)) {
|
|
return at::_ops::quantize_per_tensor_tensors::call(tensors, scales, zero_points, dtype);
|
|
}
|
|
Tensor scales_value;
|
|
optional<int64_t> scales_bdim;
|
|
std::tie(scales_value, scales_bdim) = unwrapTensorAtLevel(scales, cur_level);
|
|
Tensor zero_points_value;
|
|
optional<int64_t> zero_points_bdim;
|
|
std::tie(zero_points_value, zero_points_bdim) = unwrapTensorAtLevel(zero_points, cur_level);
|
|
auto results = batch_rule(tensors, scales_value, scales_bdim, zero_points_value, zero_points_bdim, dtype);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor quantize_per_channel_generated_plumbing(const at::Tensor & self, const at::Tensor & scales, const at::Tensor & zero_points, int64_t axis, at::ScalarType dtype) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(scales, cur_level) && !isBatchedAtLevel(zero_points, cur_level)) {
|
|
return at::_ops::quantize_per_channel::call(self, scales, zero_points, axis, dtype);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor scales_value;
|
|
optional<int64_t> scales_bdim;
|
|
std::tie(scales_value, scales_bdim) = unwrapTensorAtLevel(scales, cur_level);
|
|
Tensor zero_points_value;
|
|
optional<int64_t> zero_points_bdim;
|
|
std::tie(zero_points_value, zero_points_bdim) = unwrapTensorAtLevel(zero_points, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, scales_value, scales_bdim, zero_points_value, zero_points_bdim, axis, dtype);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor dequantize_self_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::dequantize_self::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> dequantize_tensors_generated_plumbing(at::TensorList tensors) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(tensors, cur_level)) {
|
|
return at::_ops::dequantize_tensors::call(tensors);
|
|
}
|
|
|
|
auto results = batch_rule(tensors);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor q_per_channel_scales_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::q_per_channel_scales::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor q_per_channel_zero_points_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::q_per_channel_zero_points::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor int_repr_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::int_repr::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _make_per_tensor_quantized_tensor_generated_plumbing(const at::Tensor & self, double scale, int64_t zero_point) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_make_per_tensor_quantized_tensor::call(self, scale, zero_point);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, scale, zero_point);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _make_per_channel_quantized_tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(scale, cur_level) && !isBatchedAtLevel(zero_point, cur_level)) {
|
|
return at::_ops::_make_per_channel_quantized_tensor::call(self, scale, zero_point, axis);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor scale_value;
|
|
optional<int64_t> scale_bdim;
|
|
std::tie(scale_value, scale_bdim) = unwrapTensorAtLevel(scale, cur_level);
|
|
Tensor zero_point_value;
|
|
optional<int64_t> zero_point_bdim;
|
|
std::tie(zero_point_value, zero_point_bdim) = unwrapTensorAtLevel(zero_point, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, scale_value, scale_bdim, zero_point_value, zero_point_bdim, axis);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor fake_quantize_per_tensor_affine_generated_plumbing(const at::Tensor & self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::fake_quantize_per_tensor_affine::call(self, scale, zero_point, quant_min, quant_max);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, scale, zero_point, quant_min, quant_max);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor fake_quantize_per_tensor_affine_tensor_qparams_generated_plumbing(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t quant_min, int64_t quant_max) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(scale, cur_level) && !isBatchedAtLevel(zero_point, cur_level)) {
|
|
return at::_ops::fake_quantize_per_tensor_affine_tensor_qparams::call(self, scale, zero_point, quant_min, quant_max);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor scale_value;
|
|
optional<int64_t> scale_bdim;
|
|
std::tie(scale_value, scale_bdim) = unwrapTensorAtLevel(scale, cur_level);
|
|
Tensor zero_point_value;
|
|
optional<int64_t> zero_point_bdim;
|
|
std::tie(zero_point_value, zero_point_bdim) = unwrapTensorAtLevel(zero_point, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, scale_value, scale_bdim, zero_point_value, zero_point_bdim, quant_min, quant_max);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> fake_quantize_per_tensor_affine_cachemask_generated_plumbing(const at::Tensor & self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::fake_quantize_per_tensor_affine_cachemask::call(self, scale, zero_point, quant_min, quant_max);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, scale, zero_point, quant_min, quant_max);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> _fake_quantize_per_tensor_affine_cachemask_tensor_qparams_generated_plumbing(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, const at::Tensor & fake_quant_enabled, int64_t quant_min, int64_t quant_max) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(scale, cur_level) && !isBatchedAtLevel(zero_point, cur_level) && !isBatchedAtLevel(fake_quant_enabled, cur_level)) {
|
|
return at::_ops::_fake_quantize_per_tensor_affine_cachemask_tensor_qparams::call(self, scale, zero_point, fake_quant_enabled, quant_min, quant_max);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor scale_value;
|
|
optional<int64_t> scale_bdim;
|
|
std::tie(scale_value, scale_bdim) = unwrapTensorAtLevel(scale, cur_level);
|
|
Tensor zero_point_value;
|
|
optional<int64_t> zero_point_bdim;
|
|
std::tie(zero_point_value, zero_point_bdim) = unwrapTensorAtLevel(zero_point, cur_level);
|
|
Tensor fake_quant_enabled_value;
|
|
optional<int64_t> fake_quant_enabled_bdim;
|
|
std::tie(fake_quant_enabled_value, fake_quant_enabled_bdim) = unwrapTensorAtLevel(fake_quant_enabled, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, scale_value, scale_bdim, zero_point_value, zero_point_bdim, fake_quant_enabled_value, fake_quant_enabled_bdim, quant_min, quant_max);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor fake_quantize_per_tensor_affine_cachemask_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & mask) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(mask, cur_level)) {
|
|
return at::_ops::fake_quantize_per_tensor_affine_cachemask_backward::call(grad, mask);
|
|
}
|
|
Tensor grad_value;
|
|
optional<int64_t> grad_bdim;
|
|
std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
|
|
Tensor mask_value;
|
|
optional<int64_t> mask_bdim;
|
|
std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask, cur_level);
|
|
auto results = batch_rule(grad_value, grad_bdim, mask_value, mask_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _fake_quantize_learnable_per_tensor_affine_generated_plumbing(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t quant_min, int64_t quant_max, double grad_factor) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(scale, cur_level) && !isBatchedAtLevel(zero_point, cur_level)) {
|
|
return at::_ops::_fake_quantize_learnable_per_tensor_affine::call(self, scale, zero_point, quant_min, quant_max, grad_factor);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor scale_value;
|
|
optional<int64_t> scale_bdim;
|
|
std::tie(scale_value, scale_bdim) = unwrapTensorAtLevel(scale, cur_level);
|
|
Tensor zero_point_value;
|
|
optional<int64_t> zero_point_bdim;
|
|
std::tie(zero_point_value, zero_point_bdim) = unwrapTensorAtLevel(zero_point, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, scale_value, scale_bdim, zero_point_value, zero_point_bdim, quant_min, quant_max, grad_factor);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor,at::Tensor> _fake_quantize_learnable_per_tensor_affine_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t quant_min, int64_t quant_max, double grad_factor) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(scale, cur_level) && !isBatchedAtLevel(zero_point, cur_level)) {
|
|
return at::_ops::_fake_quantize_learnable_per_tensor_affine_backward::call(grad, self, scale, zero_point, quant_min, quant_max, grad_factor);
|
|
}
|
|
Tensor grad_value;
|
|
optional<int64_t> grad_bdim;
|
|
std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor scale_value;
|
|
optional<int64_t> scale_bdim;
|
|
std::tie(scale_value, scale_bdim) = unwrapTensorAtLevel(scale, cur_level);
|
|
Tensor zero_point_value;
|
|
optional<int64_t> zero_point_bdim;
|
|
std::tie(zero_point_value, zero_point_bdim) = unwrapTensorAtLevel(zero_point, cur_level);
|
|
auto results = batch_rule(grad_value, grad_bdim, self_value, self_bdim, scale_value, scale_bdim, zero_point_value, zero_point_bdim, quant_min, quant_max, grad_factor);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor fake_quantize_per_channel_affine_generated_plumbing(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(scale, cur_level) && !isBatchedAtLevel(zero_point, cur_level)) {
|
|
return at::_ops::fake_quantize_per_channel_affine::call(self, scale, zero_point, axis, quant_min, quant_max);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor scale_value;
|
|
optional<int64_t> scale_bdim;
|
|
std::tie(scale_value, scale_bdim) = unwrapTensorAtLevel(scale, cur_level);
|
|
Tensor zero_point_value;
|
|
optional<int64_t> zero_point_bdim;
|
|
std::tie(zero_point_value, zero_point_bdim) = unwrapTensorAtLevel(zero_point, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, scale_value, scale_bdim, zero_point_value, zero_point_bdim, axis, quant_min, quant_max);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> fake_quantize_per_channel_affine_cachemask_generated_plumbing(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(scale, cur_level) && !isBatchedAtLevel(zero_point, cur_level)) {
|
|
return at::_ops::fake_quantize_per_channel_affine_cachemask::call(self, scale, zero_point, axis, quant_min, quant_max);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor scale_value;
|
|
optional<int64_t> scale_bdim;
|
|
std::tie(scale_value, scale_bdim) = unwrapTensorAtLevel(scale, cur_level);
|
|
Tensor zero_point_value;
|
|
optional<int64_t> zero_point_bdim;
|
|
std::tie(zero_point_value, zero_point_bdim) = unwrapTensorAtLevel(zero_point, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, scale_value, scale_bdim, zero_point_value, zero_point_bdim, axis, quant_min, quant_max);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor fake_quantize_per_channel_affine_cachemask_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & mask) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(mask, cur_level)) {
|
|
return at::_ops::fake_quantize_per_channel_affine_cachemask_backward::call(grad, mask);
|
|
}
|
|
Tensor grad_value;
|
|
optional<int64_t> grad_bdim;
|
|
std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
|
|
Tensor mask_value;
|
|
optional<int64_t> mask_bdim;
|
|
std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask, cur_level);
|
|
auto results = batch_rule(grad_value, grad_bdim, mask_value, mask_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _fake_quantize_learnable_per_channel_affine_generated_plumbing(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, double grad_factor) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(scale, cur_level) && !isBatchedAtLevel(zero_point, cur_level)) {
|
|
return at::_ops::_fake_quantize_learnable_per_channel_affine::call(self, scale, zero_point, axis, quant_min, quant_max, grad_factor);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor scale_value;
|
|
optional<int64_t> scale_bdim;
|
|
std::tie(scale_value, scale_bdim) = unwrapTensorAtLevel(scale, cur_level);
|
|
Tensor zero_point_value;
|
|
optional<int64_t> zero_point_bdim;
|
|
std::tie(zero_point_value, zero_point_bdim) = unwrapTensorAtLevel(zero_point, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, scale_value, scale_bdim, zero_point_value, zero_point_bdim, axis, quant_min, quant_max, grad_factor);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor,at::Tensor> _fake_quantize_learnable_per_channel_affine_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, double grad_factor) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(scale, cur_level) && !isBatchedAtLevel(zero_point, cur_level)) {
|
|
return at::_ops::_fake_quantize_learnable_per_channel_affine_backward::call(grad, self, scale, zero_point, axis, quant_min, quant_max, grad_factor);
|
|
}
|
|
Tensor grad_value;
|
|
optional<int64_t> grad_bdim;
|
|
std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor scale_value;
|
|
optional<int64_t> scale_bdim;
|
|
std::tie(scale_value, scale_bdim) = unwrapTensorAtLevel(scale, cur_level);
|
|
Tensor zero_point_value;
|
|
optional<int64_t> zero_point_bdim;
|
|
std::tie(zero_point_value, zero_point_bdim) = unwrapTensorAtLevel(zero_point, cur_level);
|
|
auto results = batch_rule(grad_value, grad_bdim, self_value, self_bdim, scale_value, scale_bdim, zero_point_value, zero_point_bdim, axis, quant_min, quant_max, grad_factor);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _saturate_weight_to_fp16_generated_plumbing(const at::Tensor & weight) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(weight, cur_level)) {
|
|
return at::_ops::_saturate_weight_to_fp16::call(weight);
|
|
}
|
|
Tensor weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
|
|
auto results = batch_rule(weight_value, weight_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> choose_qparams_optimized_generated_plumbing(const at::Tensor & input, int64_t numel, int64_t n_bins, double ratio, int64_t bit_width) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(input, cur_level)) {
|
|
return at::_ops::choose_qparams_optimized::call(input, numel, n_bins, ratio, bit_width);
|
|
}
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
auto results = batch_rule(input_value, input_bdim, numel, n_bins, ratio, bit_width);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _autocast_to_reduced_precision_generated_plumbing(const at::Tensor & self, bool cuda_enabled, bool cpu_enabled, at::ScalarType cuda_dtype, at::ScalarType cpu_dtype) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_autocast_to_reduced_precision::call(self, cuda_enabled, cpu_enabled, cuda_dtype, cpu_dtype);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, cuda_enabled, cpu_enabled, cuda_dtype, cpu_dtype);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _autocast_to_full_precision_generated_plumbing(const at::Tensor & self, bool cuda_enabled, bool cpu_enabled) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_autocast_to_full_precision::call(self, cuda_enabled, cpu_enabled);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, cuda_enabled, cpu_enabled);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _to_copy_generated_plumbing(const at::Tensor & self, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, bool non_blocking, c10::optional<at::MemoryFormat> memory_format) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_to_copy::call(self, dtype, layout, device, pin_memory, non_blocking, memory_format);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dtype, layout, device, pin_memory, non_blocking, memory_format);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor to_dtype_layout_generated_plumbing(const at::Tensor & self, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, bool non_blocking, bool copy, c10::optional<at::MemoryFormat> memory_format) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::to_dtype_layout::call(self, dtype, layout, device, pin_memory, non_blocking, copy, memory_format);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dtype, layout, device, pin_memory, non_blocking, copy, memory_format);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor to_device_generated_plumbing(const at::Tensor & self, at::Device device, at::ScalarType dtype, bool non_blocking, bool copy, c10::optional<at::MemoryFormat> memory_format) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::to_device::call(self, device, dtype, non_blocking, copy, memory_format);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, device, dtype, non_blocking, copy, memory_format);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor to_dtype_generated_plumbing(const at::Tensor & self, at::ScalarType dtype, bool non_blocking, bool copy, c10::optional<at::MemoryFormat> memory_format) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::to_dtype::call(self, dtype, non_blocking, copy, memory_format);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dtype, non_blocking, copy, memory_format);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor to_other_generated_plumbing(const at::Tensor & self, const at::Tensor & other, bool non_blocking, bool copy, c10::optional<at::MemoryFormat> memory_format) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::to_other::call(self, other, non_blocking, copy, memory_format);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, non_blocking, copy, memory_format);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> meshgrid_generated_plumbing(at::TensorList tensors) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(tensors, cur_level)) {
|
|
return at::_ops::meshgrid::call(tensors);
|
|
}
|
|
|
|
auto results = batch_rule(tensors);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> meshgrid_indexing_generated_plumbing(at::TensorList tensors, c10::string_view indexing) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(tensors, cur_level)) {
|
|
return at::_ops::meshgrid_indexing::call(tensors, indexing);
|
|
}
|
|
|
|
auto results = batch_rule(tensors, indexing);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor cartesian_prod_generated_plumbing(at::TensorList tensors) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(tensors, cur_level)) {
|
|
return at::_ops::cartesian_prod::call(tensors);
|
|
}
|
|
|
|
auto results = batch_rule(tensors);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor combinations_generated_plumbing(const at::Tensor & self, int64_t r, bool with_replacement) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::combinations::call(self, r, with_replacement);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, r, with_replacement);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _lstm_mps_generated_plumbing(const at::Tensor & input, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(params, cur_level)) {
|
|
return at::_ops::_lstm_mps::call(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
|
|
}
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
auto results = batch_rule(input_value, input_bdim, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level), makeBatched(std::get<8>(results), std::get<9>(results), cur_level), makeBatched(std::get<10>(results), std::get<11>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,::std::vector<at::Tensor>,::std::vector<at::Tensor>> lstm_mps_backward_generated_plumbing(const c10::optional<at::Tensor> & grad_y, const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, const at::Tensor & z_state, const at::Tensor & cell_state_fwd, const at::Tensor & input, const at::Tensor & layersOutputs, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad_y, cur_level) && !isBatchedAtLevel(grad_hy, cur_level) && !isBatchedAtLevel(grad_cy, cur_level) && !isBatchedAtLevel(z_state, cur_level) && !isBatchedAtLevel(cell_state_fwd, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(layersOutputs, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(params, cur_level)) {
|
|
return at::_ops::lstm_mps_backward::call(grad_y, grad_hy, grad_cy, z_state, cell_state_fwd, input, layersOutputs, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
|
|
}
|
|
Tensor z_state_value;
|
|
optional<int64_t> z_state_bdim;
|
|
std::tie(z_state_value, z_state_bdim) = unwrapTensorAtLevel(z_state, cur_level);
|
|
Tensor cell_state_fwd_value;
|
|
optional<int64_t> cell_state_fwd_bdim;
|
|
std::tie(cell_state_fwd_value, cell_state_fwd_bdim) = unwrapTensorAtLevel(cell_state_fwd, cur_level);
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
Tensor layersOutputs_value;
|
|
optional<int64_t> layersOutputs_bdim;
|
|
std::tie(layersOutputs_value, layersOutputs_bdim) = unwrapTensorAtLevel(layersOutputs, cur_level);
|
|
optional<Tensor> grad_y_value;
|
|
optional<int64_t> grad_y_bdim;
|
|
if (grad_y) {
|
|
std::tie(grad_y_value, grad_y_bdim) = unwrapTensorAtLevel(grad_y.value(), cur_level);
|
|
}
|
|
optional<Tensor> grad_hy_value;
|
|
optional<int64_t> grad_hy_bdim;
|
|
if (grad_hy) {
|
|
std::tie(grad_hy_value, grad_hy_bdim) = unwrapTensorAtLevel(grad_hy.value(), cur_level);
|
|
}
|
|
optional<Tensor> grad_cy_value;
|
|
optional<int64_t> grad_cy_bdim;
|
|
if (grad_cy) {
|
|
std::tie(grad_cy_value, grad_cy_bdim) = unwrapTensorAtLevel(grad_cy.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(grad_y_value, grad_y_bdim, grad_hy_value, grad_hy_bdim, grad_cy_value, grad_cy_bdim, z_state_value, z_state_bdim, cell_state_fwd_value, cell_state_fwd_bdim, input_value, input_bdim, layersOutputs_value, layersOutputs_bdim, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatchedVector(std::get<2>(results), std::get<3>(results), cur_level), makeBatchedVector(std::get<4>(results), std::get<5>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor,at::Tensor> _thnn_fused_lstm_cell_generated_plumbing(const at::Tensor & input_gates, const at::Tensor & hidden_gates, const at::Tensor & cx, const c10::optional<at::Tensor> & input_bias, const c10::optional<at::Tensor> & hidden_bias) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(input_gates, cur_level) && !isBatchedAtLevel(hidden_gates, cur_level) && !isBatchedAtLevel(cx, cur_level) && !isBatchedAtLevel(input_bias, cur_level) && !isBatchedAtLevel(hidden_bias, cur_level)) {
|
|
return at::_ops::_thnn_fused_lstm_cell::call(input_gates, hidden_gates, cx, input_bias, hidden_bias);
|
|
}
|
|
Tensor input_gates_value;
|
|
optional<int64_t> input_gates_bdim;
|
|
std::tie(input_gates_value, input_gates_bdim) = unwrapTensorAtLevel(input_gates, cur_level);
|
|
Tensor hidden_gates_value;
|
|
optional<int64_t> hidden_gates_bdim;
|
|
std::tie(hidden_gates_value, hidden_gates_bdim) = unwrapTensorAtLevel(hidden_gates, cur_level);
|
|
Tensor cx_value;
|
|
optional<int64_t> cx_bdim;
|
|
std::tie(cx_value, cx_bdim) = unwrapTensorAtLevel(cx, cur_level);
|
|
optional<Tensor> input_bias_value;
|
|
optional<int64_t> input_bias_bdim;
|
|
if (input_bias) {
|
|
std::tie(input_bias_value, input_bias_bdim) = unwrapTensorAtLevel(input_bias.value(), cur_level);
|
|
}
|
|
optional<Tensor> hidden_bias_value;
|
|
optional<int64_t> hidden_bias_bdim;
|
|
if (hidden_bias) {
|
|
std::tie(hidden_bias_value, hidden_bias_bdim) = unwrapTensorAtLevel(hidden_bias.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(input_gates_value, input_gates_bdim, hidden_gates_value, hidden_gates_bdim, cx_value, cx_bdim, input_bias_value, input_bias_bdim, hidden_bias_value, hidden_bias_bdim);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor,at::Tensor> _thnn_fused_lstm_cell_backward_impl_generated_plumbing(const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, const at::Tensor & cx, const at::Tensor & cy, const at::Tensor & workspace, bool has_bias) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad_hy, cur_level) && !isBatchedAtLevel(grad_cy, cur_level) && !isBatchedAtLevel(cx, cur_level) && !isBatchedAtLevel(cy, cur_level) && !isBatchedAtLevel(workspace, cur_level)) {
|
|
return at::_ops::_thnn_fused_lstm_cell_backward_impl::call(grad_hy, grad_cy, cx, cy, workspace, has_bias);
|
|
}
|
|
Tensor cx_value;
|
|
optional<int64_t> cx_bdim;
|
|
std::tie(cx_value, cx_bdim) = unwrapTensorAtLevel(cx, cur_level);
|
|
Tensor cy_value;
|
|
optional<int64_t> cy_bdim;
|
|
std::tie(cy_value, cy_bdim) = unwrapTensorAtLevel(cy, cur_level);
|
|
Tensor workspace_value;
|
|
optional<int64_t> workspace_bdim;
|
|
std::tie(workspace_value, workspace_bdim) = unwrapTensorAtLevel(workspace, cur_level);
|
|
optional<Tensor> grad_hy_value;
|
|
optional<int64_t> grad_hy_bdim;
|
|
if (grad_hy) {
|
|
std::tie(grad_hy_value, grad_hy_bdim) = unwrapTensorAtLevel(grad_hy.value(), cur_level);
|
|
}
|
|
optional<Tensor> grad_cy_value;
|
|
optional<int64_t> grad_cy_bdim;
|
|
if (grad_cy) {
|
|
std::tie(grad_cy_value, grad_cy_bdim) = unwrapTensorAtLevel(grad_cy.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(grad_hy_value, grad_hy_bdim, grad_cy_value, grad_cy_bdim, cx_value, cx_bdim, cy_value, cy_bdim, workspace_value, workspace_bdim, has_bias);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _thnn_fused_lstm_cell_backward_generated_plumbing(const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, const at::Tensor & cx, const at::Tensor & cy, const at::Tensor & workspace, bool has_bias) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad_hy, cur_level) && !isBatchedAtLevel(grad_cy, cur_level) && !isBatchedAtLevel(cx, cur_level) && !isBatchedAtLevel(cy, cur_level) && !isBatchedAtLevel(workspace, cur_level)) {
|
|
return at::_ops::_thnn_fused_lstm_cell_backward::call(grad_hy, grad_cy, cx, cy, workspace, has_bias);
|
|
}
|
|
Tensor cx_value;
|
|
optional<int64_t> cx_bdim;
|
|
std::tie(cx_value, cx_bdim) = unwrapTensorAtLevel(cx, cur_level);
|
|
Tensor cy_value;
|
|
optional<int64_t> cy_bdim;
|
|
std::tie(cy_value, cy_bdim) = unwrapTensorAtLevel(cy, cur_level);
|
|
Tensor workspace_value;
|
|
optional<int64_t> workspace_bdim;
|
|
std::tie(workspace_value, workspace_bdim) = unwrapTensorAtLevel(workspace, cur_level);
|
|
optional<Tensor> grad_hy_value;
|
|
optional<int64_t> grad_hy_bdim;
|
|
if (grad_hy) {
|
|
std::tie(grad_hy_value, grad_hy_bdim) = unwrapTensorAtLevel(grad_hy.value(), cur_level);
|
|
}
|
|
optional<Tensor> grad_cy_value;
|
|
optional<int64_t> grad_cy_bdim;
|
|
if (grad_cy) {
|
|
std::tie(grad_cy_value, grad_cy_bdim) = unwrapTensorAtLevel(grad_cy.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(grad_hy_value, grad_hy_bdim, grad_cy_value, grad_cy_bdim, cx_value, cx_bdim, cy_value, cy_bdim, workspace_value, workspace_bdim, has_bias);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level), makeBatched(std::get<8>(results), std::get<9>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _thnn_differentiable_lstm_cell_backward_generated_plumbing(const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, const at::Tensor & input_gates, const at::Tensor & hidden_gates, const c10::optional<at::Tensor> & input_bias, const c10::optional<at::Tensor> & hidden_bias, const at::Tensor & cx, const at::Tensor & cy) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad_hy, cur_level) && !isBatchedAtLevel(grad_cy, cur_level) && !isBatchedAtLevel(input_gates, cur_level) && !isBatchedAtLevel(hidden_gates, cur_level) && !isBatchedAtLevel(input_bias, cur_level) && !isBatchedAtLevel(hidden_bias, cur_level) && !isBatchedAtLevel(cx, cur_level) && !isBatchedAtLevel(cy, cur_level)) {
|
|
return at::_ops::_thnn_differentiable_lstm_cell_backward::call(grad_hy, grad_cy, input_gates, hidden_gates, input_bias, hidden_bias, cx, cy);
|
|
}
|
|
Tensor input_gates_value;
|
|
optional<int64_t> input_gates_bdim;
|
|
std::tie(input_gates_value, input_gates_bdim) = unwrapTensorAtLevel(input_gates, cur_level);
|
|
Tensor hidden_gates_value;
|
|
optional<int64_t> hidden_gates_bdim;
|
|
std::tie(hidden_gates_value, hidden_gates_bdim) = unwrapTensorAtLevel(hidden_gates, cur_level);
|
|
Tensor cx_value;
|
|
optional<int64_t> cx_bdim;
|
|
std::tie(cx_value, cx_bdim) = unwrapTensorAtLevel(cx, cur_level);
|
|
Tensor cy_value;
|
|
optional<int64_t> cy_bdim;
|
|
std::tie(cy_value, cy_bdim) = unwrapTensorAtLevel(cy, cur_level);
|
|
optional<Tensor> grad_hy_value;
|
|
optional<int64_t> grad_hy_bdim;
|
|
if (grad_hy) {
|
|
std::tie(grad_hy_value, grad_hy_bdim) = unwrapTensorAtLevel(grad_hy.value(), cur_level);
|
|
}
|
|
optional<Tensor> grad_cy_value;
|
|
optional<int64_t> grad_cy_bdim;
|
|
if (grad_cy) {
|
|
std::tie(grad_cy_value, grad_cy_bdim) = unwrapTensorAtLevel(grad_cy.value(), cur_level);
|
|
}
|
|
optional<Tensor> input_bias_value;
|
|
optional<int64_t> input_bias_bdim;
|
|
if (input_bias) {
|
|
std::tie(input_bias_value, input_bias_bdim) = unwrapTensorAtLevel(input_bias.value(), cur_level);
|
|
}
|
|
optional<Tensor> hidden_bias_value;
|
|
optional<int64_t> hidden_bias_bdim;
|
|
if (hidden_bias) {
|
|
std::tie(hidden_bias_value, hidden_bias_bdim) = unwrapTensorAtLevel(hidden_bias.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(grad_hy_value, grad_hy_bdim, grad_cy_value, grad_cy_bdim, input_gates_value, input_gates_bdim, hidden_gates_value, hidden_gates_bdim, input_bias_value, input_bias_bdim, hidden_bias_value, hidden_bias_bdim, cx_value, cx_bdim, cy_value, cy_bdim);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level), makeBatched(std::get<8>(results), std::get<9>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> _thnn_fused_gru_cell_generated_plumbing(const at::Tensor & input_gates, const at::Tensor & hidden_gates, const at::Tensor & hx, const c10::optional<at::Tensor> & input_bias, const c10::optional<at::Tensor> & hidden_bias) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(input_gates, cur_level) && !isBatchedAtLevel(hidden_gates, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(input_bias, cur_level) && !isBatchedAtLevel(hidden_bias, cur_level)) {
|
|
return at::_ops::_thnn_fused_gru_cell::call(input_gates, hidden_gates, hx, input_bias, hidden_bias);
|
|
}
|
|
Tensor input_gates_value;
|
|
optional<int64_t> input_gates_bdim;
|
|
std::tie(input_gates_value, input_gates_bdim) = unwrapTensorAtLevel(input_gates, cur_level);
|
|
Tensor hidden_gates_value;
|
|
optional<int64_t> hidden_gates_bdim;
|
|
std::tie(hidden_gates_value, hidden_gates_bdim) = unwrapTensorAtLevel(hidden_gates, cur_level);
|
|
Tensor hx_value;
|
|
optional<int64_t> hx_bdim;
|
|
std::tie(hx_value, hx_bdim) = unwrapTensorAtLevel(hx, cur_level);
|
|
optional<Tensor> input_bias_value;
|
|
optional<int64_t> input_bias_bdim;
|
|
if (input_bias) {
|
|
std::tie(input_bias_value, input_bias_bdim) = unwrapTensorAtLevel(input_bias.value(), cur_level);
|
|
}
|
|
optional<Tensor> hidden_bias_value;
|
|
optional<int64_t> hidden_bias_bdim;
|
|
if (hidden_bias) {
|
|
std::tie(hidden_bias_value, hidden_bias_bdim) = unwrapTensorAtLevel(hidden_bias.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(input_gates_value, input_gates_bdim, hidden_gates_value, hidden_gates_bdim, hx_value, hx_bdim, input_bias_value, input_bias_bdim, hidden_bias_value, hidden_bias_bdim);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _thnn_fused_gru_cell_backward_generated_plumbing(const at::Tensor & grad_hy, const at::Tensor & workspace, bool has_bias) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad_hy, cur_level) && !isBatchedAtLevel(workspace, cur_level)) {
|
|
return at::_ops::_thnn_fused_gru_cell_backward::call(grad_hy, workspace, has_bias);
|
|
}
|
|
Tensor grad_hy_value;
|
|
optional<int64_t> grad_hy_bdim;
|
|
std::tie(grad_hy_value, grad_hy_bdim) = unwrapTensorAtLevel(grad_hy, cur_level);
|
|
Tensor workspace_value;
|
|
optional<int64_t> workspace_bdim;
|
|
std::tie(workspace_value, workspace_bdim) = unwrapTensorAtLevel(workspace, cur_level);
|
|
auto results = batch_rule(grad_hy_value, grad_hy_bdim, workspace_value, workspace_bdim, has_bias);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level), makeBatched(std::get<8>(results), std::get<9>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _thnn_differentiable_gru_cell_backward_generated_plumbing(const at::Tensor & grad_hy, const at::Tensor & input_gates, const at::Tensor & hidden_gates, const at::Tensor & hx, const c10::optional<at::Tensor> & input_bias, const c10::optional<at::Tensor> & hidden_bias) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad_hy, cur_level) && !isBatchedAtLevel(input_gates, cur_level) && !isBatchedAtLevel(hidden_gates, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(input_bias, cur_level) && !isBatchedAtLevel(hidden_bias, cur_level)) {
|
|
return at::_ops::_thnn_differentiable_gru_cell_backward::call(grad_hy, input_gates, hidden_gates, hx, input_bias, hidden_bias);
|
|
}
|
|
Tensor grad_hy_value;
|
|
optional<int64_t> grad_hy_bdim;
|
|
std::tie(grad_hy_value, grad_hy_bdim) = unwrapTensorAtLevel(grad_hy, cur_level);
|
|
Tensor input_gates_value;
|
|
optional<int64_t> input_gates_bdim;
|
|
std::tie(input_gates_value, input_gates_bdim) = unwrapTensorAtLevel(input_gates, cur_level);
|
|
Tensor hidden_gates_value;
|
|
optional<int64_t> hidden_gates_bdim;
|
|
std::tie(hidden_gates_value, hidden_gates_bdim) = unwrapTensorAtLevel(hidden_gates, cur_level);
|
|
Tensor hx_value;
|
|
optional<int64_t> hx_bdim;
|
|
std::tie(hx_value, hx_bdim) = unwrapTensorAtLevel(hx, cur_level);
|
|
optional<Tensor> input_bias_value;
|
|
optional<int64_t> input_bias_bdim;
|
|
if (input_bias) {
|
|
std::tie(input_bias_value, input_bias_bdim) = unwrapTensorAtLevel(input_bias.value(), cur_level);
|
|
}
|
|
optional<Tensor> hidden_bias_value;
|
|
optional<int64_t> hidden_bias_bdim;
|
|
if (hidden_bias) {
|
|
std::tie(hidden_bias_value, hidden_bias_bdim) = unwrapTensorAtLevel(hidden_bias.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(grad_hy_value, grad_hy_bdim, input_gates_value, input_gates_bdim, hidden_gates_value, hidden_gates_bdim, hx_value, hx_bdim, input_bias_value, input_bias_bdim, hidden_bias_value, hidden_bias_bdim);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level), makeBatched(std::get<8>(results), std::get<9>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor,at::Tensor> lstm_input_generated_plumbing(const at::Tensor & input, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(params, cur_level)) {
|
|
return at::_ops::lstm_input::call(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
|
|
}
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
auto results = batch_rule(input_value, input_bdim, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor,at::Tensor> lstm_data_generated_plumbing(const at::Tensor & data, const at::Tensor & batch_sizes, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(data, cur_level) && !isBatchedAtLevel(batch_sizes, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(params, cur_level)) {
|
|
return at::_ops::lstm_data::call(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional);
|
|
}
|
|
Tensor data_value;
|
|
optional<int64_t> data_bdim;
|
|
std::tie(data_value, data_bdim) = unwrapTensorAtLevel(data, cur_level);
|
|
Tensor batch_sizes_value;
|
|
optional<int64_t> batch_sizes_bdim;
|
|
std::tie(batch_sizes_value, batch_sizes_bdim) = unwrapTensorAtLevel(batch_sizes, cur_level);
|
|
auto results = batch_rule(data_value, data_bdim, batch_sizes_value, batch_sizes_bdim, hx, params, has_biases, num_layers, dropout, train, bidirectional);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> gru_input_generated_plumbing(const at::Tensor & input, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(params, cur_level)) {
|
|
return at::_ops::gru_input::call(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
|
|
}
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
Tensor hx_value;
|
|
optional<int64_t> hx_bdim;
|
|
std::tie(hx_value, hx_bdim) = unwrapTensorAtLevel(hx, cur_level);
|
|
auto results = batch_rule(input_value, input_bdim, hx_value, hx_bdim, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> gru_data_generated_plumbing(const at::Tensor & data, const at::Tensor & batch_sizes, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(data, cur_level) && !isBatchedAtLevel(batch_sizes, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(params, cur_level)) {
|
|
return at::_ops::gru_data::call(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional);
|
|
}
|
|
Tensor data_value;
|
|
optional<int64_t> data_bdim;
|
|
std::tie(data_value, data_bdim) = unwrapTensorAtLevel(data, cur_level);
|
|
Tensor batch_sizes_value;
|
|
optional<int64_t> batch_sizes_bdim;
|
|
std::tie(batch_sizes_value, batch_sizes_bdim) = unwrapTensorAtLevel(batch_sizes, cur_level);
|
|
Tensor hx_value;
|
|
optional<int64_t> hx_bdim;
|
|
std::tie(hx_value, hx_bdim) = unwrapTensorAtLevel(hx, cur_level);
|
|
auto results = batch_rule(data_value, data_bdim, batch_sizes_value, batch_sizes_bdim, hx_value, hx_bdim, params, has_biases, num_layers, dropout, train, bidirectional);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> rnn_tanh_input_generated_plumbing(const at::Tensor & input, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(params, cur_level)) {
|
|
return at::_ops::rnn_tanh_input::call(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
|
|
}
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
Tensor hx_value;
|
|
optional<int64_t> hx_bdim;
|
|
std::tie(hx_value, hx_bdim) = unwrapTensorAtLevel(hx, cur_level);
|
|
auto results = batch_rule(input_value, input_bdim, hx_value, hx_bdim, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> rnn_tanh_data_generated_plumbing(const at::Tensor & data, const at::Tensor & batch_sizes, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(data, cur_level) && !isBatchedAtLevel(batch_sizes, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(params, cur_level)) {
|
|
return at::_ops::rnn_tanh_data::call(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional);
|
|
}
|
|
Tensor data_value;
|
|
optional<int64_t> data_bdim;
|
|
std::tie(data_value, data_bdim) = unwrapTensorAtLevel(data, cur_level);
|
|
Tensor batch_sizes_value;
|
|
optional<int64_t> batch_sizes_bdim;
|
|
std::tie(batch_sizes_value, batch_sizes_bdim) = unwrapTensorAtLevel(batch_sizes, cur_level);
|
|
Tensor hx_value;
|
|
optional<int64_t> hx_bdim;
|
|
std::tie(hx_value, hx_bdim) = unwrapTensorAtLevel(hx, cur_level);
|
|
auto results = batch_rule(data_value, data_bdim, batch_sizes_value, batch_sizes_bdim, hx_value, hx_bdim, params, has_biases, num_layers, dropout, train, bidirectional);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> rnn_relu_input_generated_plumbing(const at::Tensor & input, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(params, cur_level)) {
|
|
return at::_ops::rnn_relu_input::call(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
|
|
}
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
Tensor hx_value;
|
|
optional<int64_t> hx_bdim;
|
|
std::tie(hx_value, hx_bdim) = unwrapTensorAtLevel(hx, cur_level);
|
|
auto results = batch_rule(input_value, input_bdim, hx_value, hx_bdim, params, has_biases, num_layers, dropout, train, bidirectional, batch_first);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> rnn_relu_data_generated_plumbing(const at::Tensor & data, const at::Tensor & batch_sizes, const at::Tensor & hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(data, cur_level) && !isBatchedAtLevel(batch_sizes, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(params, cur_level)) {
|
|
return at::_ops::rnn_relu_data::call(data, batch_sizes, hx, params, has_biases, num_layers, dropout, train, bidirectional);
|
|
}
|
|
Tensor data_value;
|
|
optional<int64_t> data_bdim;
|
|
std::tie(data_value, data_bdim) = unwrapTensorAtLevel(data, cur_level);
|
|
Tensor batch_sizes_value;
|
|
optional<int64_t> batch_sizes_bdim;
|
|
std::tie(batch_sizes_value, batch_sizes_bdim) = unwrapTensorAtLevel(batch_sizes, cur_level);
|
|
Tensor hx_value;
|
|
optional<int64_t> hx_bdim;
|
|
std::tie(hx_value, hx_bdim) = unwrapTensorAtLevel(hx, cur_level);
|
|
auto results = batch_rule(data_value, data_bdim, batch_sizes_value, batch_sizes_bdim, hx_value, hx_bdim, params, has_biases, num_layers, dropout, train, bidirectional);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> lstm_cell_generated_plumbing(const at::Tensor & input, at::TensorList hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const c10::optional<at::Tensor> & b_ih, const c10::optional<at::Tensor> & b_hh) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(w_ih, cur_level) && !isBatchedAtLevel(w_hh, cur_level) && !isBatchedAtLevel(b_ih, cur_level) && !isBatchedAtLevel(b_hh, cur_level)) {
|
|
return at::_ops::lstm_cell::call(input, hx, w_ih, w_hh, b_ih, b_hh);
|
|
}
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
Tensor w_ih_value;
|
|
optional<int64_t> w_ih_bdim;
|
|
std::tie(w_ih_value, w_ih_bdim) = unwrapTensorAtLevel(w_ih, cur_level);
|
|
Tensor w_hh_value;
|
|
optional<int64_t> w_hh_bdim;
|
|
std::tie(w_hh_value, w_hh_bdim) = unwrapTensorAtLevel(w_hh, cur_level);
|
|
optional<Tensor> b_ih_value;
|
|
optional<int64_t> b_ih_bdim;
|
|
if (b_ih) {
|
|
std::tie(b_ih_value, b_ih_bdim) = unwrapTensorAtLevel(b_ih.value(), cur_level);
|
|
}
|
|
optional<Tensor> b_hh_value;
|
|
optional<int64_t> b_hh_bdim;
|
|
if (b_hh) {
|
|
std::tie(b_hh_value, b_hh_bdim) = unwrapTensorAtLevel(b_hh.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(input_value, input_bdim, hx, w_ih_value, w_ih_bdim, w_hh_value, w_hh_bdim, b_ih_value, b_ih_bdim, b_hh_value, b_hh_bdim);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor gru_cell_generated_plumbing(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const c10::optional<at::Tensor> & b_ih, const c10::optional<at::Tensor> & b_hh) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(w_ih, cur_level) && !isBatchedAtLevel(w_hh, cur_level) && !isBatchedAtLevel(b_ih, cur_level) && !isBatchedAtLevel(b_hh, cur_level)) {
|
|
return at::_ops::gru_cell::call(input, hx, w_ih, w_hh, b_ih, b_hh);
|
|
}
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
Tensor hx_value;
|
|
optional<int64_t> hx_bdim;
|
|
std::tie(hx_value, hx_bdim) = unwrapTensorAtLevel(hx, cur_level);
|
|
Tensor w_ih_value;
|
|
optional<int64_t> w_ih_bdim;
|
|
std::tie(w_ih_value, w_ih_bdim) = unwrapTensorAtLevel(w_ih, cur_level);
|
|
Tensor w_hh_value;
|
|
optional<int64_t> w_hh_bdim;
|
|
std::tie(w_hh_value, w_hh_bdim) = unwrapTensorAtLevel(w_hh, cur_level);
|
|
optional<Tensor> b_ih_value;
|
|
optional<int64_t> b_ih_bdim;
|
|
if (b_ih) {
|
|
std::tie(b_ih_value, b_ih_bdim) = unwrapTensorAtLevel(b_ih.value(), cur_level);
|
|
}
|
|
optional<Tensor> b_hh_value;
|
|
optional<int64_t> b_hh_bdim;
|
|
if (b_hh) {
|
|
std::tie(b_hh_value, b_hh_bdim) = unwrapTensorAtLevel(b_hh.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(input_value, input_bdim, hx_value, hx_bdim, w_ih_value, w_ih_bdim, w_hh_value, w_hh_bdim, b_ih_value, b_ih_bdim, b_hh_value, b_hh_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor rnn_tanh_cell_generated_plumbing(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const c10::optional<at::Tensor> & b_ih, const c10::optional<at::Tensor> & b_hh) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(w_ih, cur_level) && !isBatchedAtLevel(w_hh, cur_level) && !isBatchedAtLevel(b_ih, cur_level) && !isBatchedAtLevel(b_hh, cur_level)) {
|
|
return at::_ops::rnn_tanh_cell::call(input, hx, w_ih, w_hh, b_ih, b_hh);
|
|
}
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
Tensor hx_value;
|
|
optional<int64_t> hx_bdim;
|
|
std::tie(hx_value, hx_bdim) = unwrapTensorAtLevel(hx, cur_level);
|
|
Tensor w_ih_value;
|
|
optional<int64_t> w_ih_bdim;
|
|
std::tie(w_ih_value, w_ih_bdim) = unwrapTensorAtLevel(w_ih, cur_level);
|
|
Tensor w_hh_value;
|
|
optional<int64_t> w_hh_bdim;
|
|
std::tie(w_hh_value, w_hh_bdim) = unwrapTensorAtLevel(w_hh, cur_level);
|
|
optional<Tensor> b_ih_value;
|
|
optional<int64_t> b_ih_bdim;
|
|
if (b_ih) {
|
|
std::tie(b_ih_value, b_ih_bdim) = unwrapTensorAtLevel(b_ih.value(), cur_level);
|
|
}
|
|
optional<Tensor> b_hh_value;
|
|
optional<int64_t> b_hh_bdim;
|
|
if (b_hh) {
|
|
std::tie(b_hh_value, b_hh_bdim) = unwrapTensorAtLevel(b_hh.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(input_value, input_bdim, hx_value, hx_bdim, w_ih_value, w_ih_bdim, w_hh_value, w_hh_bdim, b_ih_value, b_ih_bdim, b_hh_value, b_hh_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor rnn_relu_cell_generated_plumbing(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const c10::optional<at::Tensor> & b_ih, const c10::optional<at::Tensor> & b_hh) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(w_ih, cur_level) && !isBatchedAtLevel(w_hh, cur_level) && !isBatchedAtLevel(b_ih, cur_level) && !isBatchedAtLevel(b_hh, cur_level)) {
|
|
return at::_ops::rnn_relu_cell::call(input, hx, w_ih, w_hh, b_ih, b_hh);
|
|
}
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
Tensor hx_value;
|
|
optional<int64_t> hx_bdim;
|
|
std::tie(hx_value, hx_bdim) = unwrapTensorAtLevel(hx, cur_level);
|
|
Tensor w_ih_value;
|
|
optional<int64_t> w_ih_bdim;
|
|
std::tie(w_ih_value, w_ih_bdim) = unwrapTensorAtLevel(w_ih, cur_level);
|
|
Tensor w_hh_value;
|
|
optional<int64_t> w_hh_bdim;
|
|
std::tie(w_hh_value, w_hh_bdim) = unwrapTensorAtLevel(w_hh, cur_level);
|
|
optional<Tensor> b_ih_value;
|
|
optional<int64_t> b_ih_bdim;
|
|
if (b_ih) {
|
|
std::tie(b_ih_value, b_ih_bdim) = unwrapTensorAtLevel(b_ih.value(), cur_level);
|
|
}
|
|
optional<Tensor> b_hh_value;
|
|
optional<int64_t> b_hh_bdim;
|
|
if (b_hh) {
|
|
std::tie(b_hh_value, b_hh_bdim) = unwrapTensorAtLevel(b_hh.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(input_value, input_bdim, hx_value, hx_bdim, w_ih_value, w_ih_bdim, w_hh_value, w_hh_bdim, b_ih_value, b_ih_bdim, b_hh_value, b_hh_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> quantized_lstm_cell_generated_plumbing(const at::Tensor & input, at::TensorList hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, const at::Scalar & scale_ih, const at::Scalar & scale_hh, const at::Scalar & zero_point_ih, const at::Scalar & zero_point_hh) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(w_ih, cur_level) && !isBatchedAtLevel(w_hh, cur_level) && !isBatchedAtLevel(b_ih, cur_level) && !isBatchedAtLevel(b_hh, cur_level) && !isBatchedAtLevel(packed_ih, cur_level) && !isBatchedAtLevel(packed_hh, cur_level) && !isBatchedAtLevel(col_offsets_ih, cur_level) && !isBatchedAtLevel(col_offsets_hh, cur_level)) {
|
|
return at::_ops::quantized_lstm_cell::call(input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh);
|
|
}
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
Tensor w_ih_value;
|
|
optional<int64_t> w_ih_bdim;
|
|
std::tie(w_ih_value, w_ih_bdim) = unwrapTensorAtLevel(w_ih, cur_level);
|
|
Tensor w_hh_value;
|
|
optional<int64_t> w_hh_bdim;
|
|
std::tie(w_hh_value, w_hh_bdim) = unwrapTensorAtLevel(w_hh, cur_level);
|
|
Tensor b_ih_value;
|
|
optional<int64_t> b_ih_bdim;
|
|
std::tie(b_ih_value, b_ih_bdim) = unwrapTensorAtLevel(b_ih, cur_level);
|
|
Tensor b_hh_value;
|
|
optional<int64_t> b_hh_bdim;
|
|
std::tie(b_hh_value, b_hh_bdim) = unwrapTensorAtLevel(b_hh, cur_level);
|
|
Tensor packed_ih_value;
|
|
optional<int64_t> packed_ih_bdim;
|
|
std::tie(packed_ih_value, packed_ih_bdim) = unwrapTensorAtLevel(packed_ih, cur_level);
|
|
Tensor packed_hh_value;
|
|
optional<int64_t> packed_hh_bdim;
|
|
std::tie(packed_hh_value, packed_hh_bdim) = unwrapTensorAtLevel(packed_hh, cur_level);
|
|
Tensor col_offsets_ih_value;
|
|
optional<int64_t> col_offsets_ih_bdim;
|
|
std::tie(col_offsets_ih_value, col_offsets_ih_bdim) = unwrapTensorAtLevel(col_offsets_ih, cur_level);
|
|
Tensor col_offsets_hh_value;
|
|
optional<int64_t> col_offsets_hh_bdim;
|
|
std::tie(col_offsets_hh_value, col_offsets_hh_bdim) = unwrapTensorAtLevel(col_offsets_hh, cur_level);
|
|
auto results = batch_rule(input_value, input_bdim, hx, w_ih_value, w_ih_bdim, w_hh_value, w_hh_bdim, b_ih_value, b_ih_bdim, b_hh_value, b_hh_bdim, packed_ih_value, packed_ih_bdim, packed_hh_value, packed_hh_bdim, col_offsets_ih_value, col_offsets_ih_bdim, col_offsets_hh_value, col_offsets_hh_bdim, scale_ih, scale_hh, zero_point_ih, zero_point_hh);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor quantized_gru_cell_generated_plumbing(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, const at::Scalar & scale_ih, const at::Scalar & scale_hh, const at::Scalar & zero_point_ih, const at::Scalar & zero_point_hh) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(w_ih, cur_level) && !isBatchedAtLevel(w_hh, cur_level) && !isBatchedAtLevel(b_ih, cur_level) && !isBatchedAtLevel(b_hh, cur_level) && !isBatchedAtLevel(packed_ih, cur_level) && !isBatchedAtLevel(packed_hh, cur_level) && !isBatchedAtLevel(col_offsets_ih, cur_level) && !isBatchedAtLevel(col_offsets_hh, cur_level)) {
|
|
return at::_ops::quantized_gru_cell::call(input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh);
|
|
}
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
Tensor hx_value;
|
|
optional<int64_t> hx_bdim;
|
|
std::tie(hx_value, hx_bdim) = unwrapTensorAtLevel(hx, cur_level);
|
|
Tensor w_ih_value;
|
|
optional<int64_t> w_ih_bdim;
|
|
std::tie(w_ih_value, w_ih_bdim) = unwrapTensorAtLevel(w_ih, cur_level);
|
|
Tensor w_hh_value;
|
|
optional<int64_t> w_hh_bdim;
|
|
std::tie(w_hh_value, w_hh_bdim) = unwrapTensorAtLevel(w_hh, cur_level);
|
|
Tensor b_ih_value;
|
|
optional<int64_t> b_ih_bdim;
|
|
std::tie(b_ih_value, b_ih_bdim) = unwrapTensorAtLevel(b_ih, cur_level);
|
|
Tensor b_hh_value;
|
|
optional<int64_t> b_hh_bdim;
|
|
std::tie(b_hh_value, b_hh_bdim) = unwrapTensorAtLevel(b_hh, cur_level);
|
|
Tensor packed_ih_value;
|
|
optional<int64_t> packed_ih_bdim;
|
|
std::tie(packed_ih_value, packed_ih_bdim) = unwrapTensorAtLevel(packed_ih, cur_level);
|
|
Tensor packed_hh_value;
|
|
optional<int64_t> packed_hh_bdim;
|
|
std::tie(packed_hh_value, packed_hh_bdim) = unwrapTensorAtLevel(packed_hh, cur_level);
|
|
Tensor col_offsets_ih_value;
|
|
optional<int64_t> col_offsets_ih_bdim;
|
|
std::tie(col_offsets_ih_value, col_offsets_ih_bdim) = unwrapTensorAtLevel(col_offsets_ih, cur_level);
|
|
Tensor col_offsets_hh_value;
|
|
optional<int64_t> col_offsets_hh_bdim;
|
|
std::tie(col_offsets_hh_value, col_offsets_hh_bdim) = unwrapTensorAtLevel(col_offsets_hh, cur_level);
|
|
auto results = batch_rule(input_value, input_bdim, hx_value, hx_bdim, w_ih_value, w_ih_bdim, w_hh_value, w_hh_bdim, b_ih_value, b_ih_bdim, b_hh_value, b_hh_bdim, packed_ih_value, packed_ih_bdim, packed_hh_value, packed_hh_bdim, col_offsets_ih_value, col_offsets_ih_bdim, col_offsets_hh_value, col_offsets_hh_bdim, scale_ih, scale_hh, zero_point_ih, zero_point_hh);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor quantized_rnn_relu_cell_generated_plumbing(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, const at::Scalar & scale_ih, const at::Scalar & scale_hh, const at::Scalar & zero_point_ih, const at::Scalar & zero_point_hh) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(w_ih, cur_level) && !isBatchedAtLevel(w_hh, cur_level) && !isBatchedAtLevel(b_ih, cur_level) && !isBatchedAtLevel(b_hh, cur_level) && !isBatchedAtLevel(packed_ih, cur_level) && !isBatchedAtLevel(packed_hh, cur_level) && !isBatchedAtLevel(col_offsets_ih, cur_level) && !isBatchedAtLevel(col_offsets_hh, cur_level)) {
|
|
return at::_ops::quantized_rnn_relu_cell::call(input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh);
|
|
}
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
Tensor hx_value;
|
|
optional<int64_t> hx_bdim;
|
|
std::tie(hx_value, hx_bdim) = unwrapTensorAtLevel(hx, cur_level);
|
|
Tensor w_ih_value;
|
|
optional<int64_t> w_ih_bdim;
|
|
std::tie(w_ih_value, w_ih_bdim) = unwrapTensorAtLevel(w_ih, cur_level);
|
|
Tensor w_hh_value;
|
|
optional<int64_t> w_hh_bdim;
|
|
std::tie(w_hh_value, w_hh_bdim) = unwrapTensorAtLevel(w_hh, cur_level);
|
|
Tensor b_ih_value;
|
|
optional<int64_t> b_ih_bdim;
|
|
std::tie(b_ih_value, b_ih_bdim) = unwrapTensorAtLevel(b_ih, cur_level);
|
|
Tensor b_hh_value;
|
|
optional<int64_t> b_hh_bdim;
|
|
std::tie(b_hh_value, b_hh_bdim) = unwrapTensorAtLevel(b_hh, cur_level);
|
|
Tensor packed_ih_value;
|
|
optional<int64_t> packed_ih_bdim;
|
|
std::tie(packed_ih_value, packed_ih_bdim) = unwrapTensorAtLevel(packed_ih, cur_level);
|
|
Tensor packed_hh_value;
|
|
optional<int64_t> packed_hh_bdim;
|
|
std::tie(packed_hh_value, packed_hh_bdim) = unwrapTensorAtLevel(packed_hh, cur_level);
|
|
Tensor col_offsets_ih_value;
|
|
optional<int64_t> col_offsets_ih_bdim;
|
|
std::tie(col_offsets_ih_value, col_offsets_ih_bdim) = unwrapTensorAtLevel(col_offsets_ih, cur_level);
|
|
Tensor col_offsets_hh_value;
|
|
optional<int64_t> col_offsets_hh_bdim;
|
|
std::tie(col_offsets_hh_value, col_offsets_hh_bdim) = unwrapTensorAtLevel(col_offsets_hh, cur_level);
|
|
auto results = batch_rule(input_value, input_bdim, hx_value, hx_bdim, w_ih_value, w_ih_bdim, w_hh_value, w_hh_bdim, b_ih_value, b_ih_bdim, b_hh_value, b_hh_bdim, packed_ih_value, packed_ih_bdim, packed_hh_value, packed_hh_bdim, col_offsets_ih_value, col_offsets_ih_bdim, col_offsets_hh_value, col_offsets_hh_bdim, scale_ih, scale_hh, zero_point_ih, zero_point_hh);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor quantized_rnn_tanh_cell_generated_plumbing(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, const at::Scalar & scale_ih, const at::Scalar & scale_hh, const at::Scalar & zero_point_ih, const at::Scalar & zero_point_hh) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(w_ih, cur_level) && !isBatchedAtLevel(w_hh, cur_level) && !isBatchedAtLevel(b_ih, cur_level) && !isBatchedAtLevel(b_hh, cur_level) && !isBatchedAtLevel(packed_ih, cur_level) && !isBatchedAtLevel(packed_hh, cur_level) && !isBatchedAtLevel(col_offsets_ih, cur_level) && !isBatchedAtLevel(col_offsets_hh, cur_level)) {
|
|
return at::_ops::quantized_rnn_tanh_cell::call(input, hx, w_ih, w_hh, b_ih, b_hh, packed_ih, packed_hh, col_offsets_ih, col_offsets_hh, scale_ih, scale_hh, zero_point_ih, zero_point_hh);
|
|
}
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
Tensor hx_value;
|
|
optional<int64_t> hx_bdim;
|
|
std::tie(hx_value, hx_bdim) = unwrapTensorAtLevel(hx, cur_level);
|
|
Tensor w_ih_value;
|
|
optional<int64_t> w_ih_bdim;
|
|
std::tie(w_ih_value, w_ih_bdim) = unwrapTensorAtLevel(w_ih, cur_level);
|
|
Tensor w_hh_value;
|
|
optional<int64_t> w_hh_bdim;
|
|
std::tie(w_hh_value, w_hh_bdim) = unwrapTensorAtLevel(w_hh, cur_level);
|
|
Tensor b_ih_value;
|
|
optional<int64_t> b_ih_bdim;
|
|
std::tie(b_ih_value, b_ih_bdim) = unwrapTensorAtLevel(b_ih, cur_level);
|
|
Tensor b_hh_value;
|
|
optional<int64_t> b_hh_bdim;
|
|
std::tie(b_hh_value, b_hh_bdim) = unwrapTensorAtLevel(b_hh, cur_level);
|
|
Tensor packed_ih_value;
|
|
optional<int64_t> packed_ih_bdim;
|
|
std::tie(packed_ih_value, packed_ih_bdim) = unwrapTensorAtLevel(packed_ih, cur_level);
|
|
Tensor packed_hh_value;
|
|
optional<int64_t> packed_hh_bdim;
|
|
std::tie(packed_hh_value, packed_hh_bdim) = unwrapTensorAtLevel(packed_hh, cur_level);
|
|
Tensor col_offsets_ih_value;
|
|
optional<int64_t> col_offsets_ih_bdim;
|
|
std::tie(col_offsets_ih_value, col_offsets_ih_bdim) = unwrapTensorAtLevel(col_offsets_ih, cur_level);
|
|
Tensor col_offsets_hh_value;
|
|
optional<int64_t> col_offsets_hh_bdim;
|
|
std::tie(col_offsets_hh_value, col_offsets_hh_bdim) = unwrapTensorAtLevel(col_offsets_hh, cur_level);
|
|
auto results = batch_rule(input_value, input_bdim, hx_value, hx_bdim, w_ih_value, w_ih_bdim, w_hh_value, w_hh_bdim, b_ih_value, b_ih_bdim, b_hh_value, b_hh_bdim, packed_ih_value, packed_ih_bdim, packed_hh_value, packed_hh_bdim, col_offsets_ih_value, col_offsets_ih_bdim, col_offsets_hh_value, col_offsets_hh_bdim, scale_ih, scale_hh, zero_point_ih, zero_point_hh);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> _pack_padded_sequence_generated_plumbing(const at::Tensor & input, const at::Tensor & lengths, bool batch_first) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(lengths, cur_level)) {
|
|
return at::_ops::_pack_padded_sequence::call(input, lengths, batch_first);
|
|
}
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
Tensor lengths_value;
|
|
optional<int64_t> lengths_bdim;
|
|
std::tie(lengths_value, lengths_bdim) = unwrapTensorAtLevel(lengths, cur_level);
|
|
auto results = batch_rule(input_value, input_bdim, lengths_value, lengths_bdim, batch_first);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _pack_padded_sequence_backward_generated_plumbing(const at::Tensor & grad, c10::SymIntArrayRef input_size, const at::Tensor & batch_sizes, bool batch_first) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(batch_sizes, cur_level)) {
|
|
return at::_ops::_pack_padded_sequence_backward::call(grad, input_size, batch_sizes, batch_first);
|
|
}
|
|
Tensor grad_value;
|
|
optional<int64_t> grad_bdim;
|
|
std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
|
|
Tensor batch_sizes_value;
|
|
optional<int64_t> batch_sizes_bdim;
|
|
std::tie(batch_sizes_value, batch_sizes_bdim) = unwrapTensorAtLevel(batch_sizes, cur_level);
|
|
auto results = batch_rule(grad_value, grad_bdim, input_size, batch_sizes_value, batch_sizes_bdim, batch_first);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> _pad_packed_sequence_generated_plumbing(const at::Tensor & data, const at::Tensor & batch_sizes, bool batch_first, const at::Scalar & padding_value, int64_t total_length) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(data, cur_level) && !isBatchedAtLevel(batch_sizes, cur_level)) {
|
|
return at::_ops::_pad_packed_sequence::call(data, batch_sizes, batch_first, padding_value, total_length);
|
|
}
|
|
Tensor data_value;
|
|
optional<int64_t> data_bdim;
|
|
std::tie(data_value, data_bdim) = unwrapTensorAtLevel(data, cur_level);
|
|
Tensor batch_sizes_value;
|
|
optional<int64_t> batch_sizes_bdim;
|
|
std::tie(batch_sizes_value, batch_sizes_bdim) = unwrapTensorAtLevel(batch_sizes, cur_level);
|
|
auto results = batch_rule(data_value, data_bdim, batch_sizes_value, batch_sizes_bdim, batch_first, padding_value, total_length);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor lift_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::lift::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor lift_fresh_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::lift_fresh::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor lift_fresh_copy_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::lift_fresh_copy::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & masked_fill__Scalar_generated_plumbing(at::Tensor & self, const at::Tensor & mask, const at::Scalar & value) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mask, cur_level)) {
|
|
return at::_ops::masked_fill__Scalar::call(self, mask, value);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor mask_value;
|
|
optional<int64_t> mask_bdim;
|
|
std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask, cur_level);
|
|
batch_rule(self_value, self_bdim, mask_value, mask_bdim, value);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor masked_fill_Scalar_generated_plumbing(const at::Tensor & self, const at::Tensor & mask, const at::Scalar & value) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mask, cur_level)) {
|
|
return at::_ops::masked_fill_Scalar::call(self, mask, value);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor mask_value;
|
|
optional<int64_t> mask_bdim;
|
|
std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, mask_value, mask_bdim, value);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & masked_fill__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & mask, const at::Tensor & value) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mask, cur_level) && !isBatchedAtLevel(value, cur_level)) {
|
|
return at::_ops::masked_fill__Tensor::call(self, mask, value);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor mask_value;
|
|
optional<int64_t> mask_bdim;
|
|
std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask, cur_level);
|
|
Tensor value_value;
|
|
optional<int64_t> value_bdim;
|
|
std::tie(value_value, value_bdim) = unwrapTensorAtLevel(value, cur_level);
|
|
batch_rule(self_value, self_bdim, mask_value, mask_bdim, value_value, value_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor masked_fill_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & mask, const at::Tensor & value) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mask, cur_level) && !isBatchedAtLevel(value, cur_level)) {
|
|
return at::_ops::masked_fill_Tensor::call(self, mask, value);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor mask_value;
|
|
optional<int64_t> mask_bdim;
|
|
std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask, cur_level);
|
|
Tensor value_value;
|
|
optional<int64_t> value_bdim;
|
|
std::tie(value_value, value_bdim) = unwrapTensorAtLevel(value, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, mask_value, mask_bdim, value_value, value_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & masked_scatter__generated_plumbing(at::Tensor & self, const at::Tensor & mask, const at::Tensor & source) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mask, cur_level) && !isBatchedAtLevel(source, cur_level)) {
|
|
return at::_ops::masked_scatter_::call(self, mask, source);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor mask_value;
|
|
optional<int64_t> mask_bdim;
|
|
std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask, cur_level);
|
|
Tensor source_value;
|
|
optional<int64_t> source_bdim;
|
|
std::tie(source_value, source_bdim) = unwrapTensorAtLevel(source, cur_level);
|
|
batch_rule(self_value, self_bdim, mask_value, mask_bdim, source_value, source_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor masked_scatter_generated_plumbing(const at::Tensor & self, const at::Tensor & mask, const at::Tensor & source) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mask, cur_level) && !isBatchedAtLevel(source, cur_level)) {
|
|
return at::_ops::masked_scatter::call(self, mask, source);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor mask_value;
|
|
optional<int64_t> mask_bdim;
|
|
std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask, cur_level);
|
|
Tensor source_value;
|
|
optional<int64_t> source_bdim;
|
|
std::tie(source_value, source_bdim) = unwrapTensorAtLevel(source, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, mask_value, mask_bdim, source_value, source_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor masked_scatter_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & mask, c10::SymIntArrayRef sizes) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(mask, cur_level)) {
|
|
return at::_ops::masked_scatter_backward::call(grad_output, mask, sizes);
|
|
}
|
|
Tensor grad_output_value;
|
|
optional<int64_t> grad_output_bdim;
|
|
std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
|
|
Tensor mask_value;
|
|
optional<int64_t> mask_bdim;
|
|
std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask, cur_level);
|
|
auto results = batch_rule(grad_output_value, grad_output_bdim, mask_value, mask_bdim, sizes);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _masked_softmax_generated_plumbing(const at::Tensor & self, const at::Tensor & mask, c10::optional<int64_t> dim, c10::optional<int64_t> mask_type) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mask, cur_level)) {
|
|
return at::_ops::_masked_softmax::call(self, mask, dim, mask_type);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor mask_value;
|
|
optional<int64_t> mask_bdim;
|
|
std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, mask_value, mask_bdim, dim, mask_type);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _masked_softmax_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & mask, c10::optional<int64_t> dim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(output, cur_level) && !isBatchedAtLevel(mask, cur_level)) {
|
|
return at::_ops::_masked_softmax_backward::call(grad_output, output, mask, dim);
|
|
}
|
|
Tensor grad_output_value;
|
|
optional<int64_t> grad_output_bdim;
|
|
std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
|
|
Tensor output_value;
|
|
optional<int64_t> output_bdim;
|
|
std::tie(output_value, output_bdim) = unwrapTensorAtLevel(output, cur_level);
|
|
Tensor mask_value;
|
|
optional<int64_t> mask_bdim;
|
|
std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask, cur_level);
|
|
auto results = batch_rule(grad_output_value, grad_output_bdim, output_value, output_bdim, mask_value, mask_bdim, dim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor view_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::view::call(self, size);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, size);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor view_dtype_generated_plumbing(const at::Tensor & self, at::ScalarType dtype) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::view_dtype::call(self, dtype);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dtype);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & put__generated_plumbing(at::Tensor & self, const at::Tensor & index, const at::Tensor & source, bool accumulate) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(source, cur_level)) {
|
|
return at::_ops::put_::call(self, index, source, accumulate);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor index_value;
|
|
optional<int64_t> index_bdim;
|
|
std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
|
|
Tensor source_value;
|
|
optional<int64_t> source_bdim;
|
|
std::tie(source_value, source_bdim) = unwrapTensorAtLevel(source, cur_level);
|
|
batch_rule(self_value, self_bdim, index_value, index_bdim, source_value, source_bdim, accumulate);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor put_generated_plumbing(const at::Tensor & self, const at::Tensor & index, const at::Tensor & source, bool accumulate) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(source, cur_level)) {
|
|
return at::_ops::put::call(self, index, source, accumulate);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor index_value;
|
|
optional<int64_t> index_bdim;
|
|
std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
|
|
Tensor source_value;
|
|
optional<int64_t> source_bdim;
|
|
std::tie(source_value, source_bdim) = unwrapTensorAtLevel(source, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, index_value, index_bdim, source_value, source_bdim, accumulate);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & index_add__generated_plumbing(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(source, cur_level)) {
|
|
return at::_ops::index_add_::call(self, dim, index, source, alpha);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor index_value;
|
|
optional<int64_t> index_bdim;
|
|
std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
|
|
Tensor source_value;
|
|
optional<int64_t> source_bdim;
|
|
std::tie(source_value, source_bdim) = unwrapTensorAtLevel(source, cur_level);
|
|
batch_rule(self_value, self_bdim, dim, index_value, index_bdim, source_value, source_bdim, alpha);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor index_add_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(source, cur_level)) {
|
|
return at::_ops::index_add::call(self, dim, index, source, alpha);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor index_value;
|
|
optional<int64_t> index_bdim;
|
|
std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
|
|
Tensor source_value;
|
|
optional<int64_t> source_bdim;
|
|
std::tie(source_value, source_bdim) = unwrapTensorAtLevel(source, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, source_value, source_bdim, alpha);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor index_add_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(source, cur_level)) {
|
|
return at::_ops::index_add_dimname::call(self, dim, index, source, alpha);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor index_value;
|
|
optional<int64_t> index_bdim;
|
|
std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
|
|
Tensor source_value;
|
|
optional<int64_t> source_bdim;
|
|
std::tie(source_value, source_bdim) = unwrapTensorAtLevel(source, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, source_value, source_bdim, alpha);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & index_reduce__generated_plumbing(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(source, cur_level)) {
|
|
return at::_ops::index_reduce_::call(self, dim, index, source, reduce, include_self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor index_value;
|
|
optional<int64_t> index_bdim;
|
|
std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
|
|
Tensor source_value;
|
|
optional<int64_t> source_bdim;
|
|
std::tie(source_value, source_bdim) = unwrapTensorAtLevel(source, cur_level);
|
|
batch_rule(self_value, self_bdim, dim, index_value, index_bdim, source_value, source_bdim, reduce, include_self);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor index_reduce_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, c10::string_view reduce, bool include_self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(source, cur_level)) {
|
|
return at::_ops::index_reduce::call(self, dim, index, source, reduce, include_self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor index_value;
|
|
optional<int64_t> index_bdim;
|
|
std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
|
|
Tensor source_value;
|
|
optional<int64_t> source_bdim;
|
|
std::tie(source_value, source_bdim) = unwrapTensorAtLevel(source, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, source_value, source_bdim, reduce, include_self);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & index_fill__int_Scalar_generated_plumbing(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) {
|
|
return at::_ops::index_fill__int_Scalar::call(self, dim, index, value);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor index_value;
|
|
optional<int64_t> index_bdim;
|
|
std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
|
|
batch_rule(self_value, self_bdim, dim, index_value, index_bdim, value);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor index_fill_int_Scalar_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) {
|
|
return at::_ops::index_fill_int_Scalar::call(self, dim, index, value);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor index_value;
|
|
optional<int64_t> index_bdim;
|
|
std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, value);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & index_fill__int_Tensor_generated_plumbing(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(value, cur_level)) {
|
|
return at::_ops::index_fill__int_Tensor::call(self, dim, index, value);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor index_value;
|
|
optional<int64_t> index_bdim;
|
|
std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
|
|
Tensor value_value;
|
|
optional<int64_t> value_bdim;
|
|
std::tie(value_value, value_bdim) = unwrapTensorAtLevel(value, cur_level);
|
|
batch_rule(self_value, self_bdim, dim, index_value, index_bdim, value_value, value_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor index_fill_int_Tensor_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(value, cur_level)) {
|
|
return at::_ops::index_fill_int_Tensor::call(self, dim, index, value);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor index_value;
|
|
optional<int64_t> index_bdim;
|
|
std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
|
|
Tensor value_value;
|
|
optional<int64_t> value_bdim;
|
|
std::tie(value_value, value_bdim) = unwrapTensorAtLevel(value, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, value_value, value_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & index_fill__Dimname_Scalar_generated_plumbing(at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Scalar & value) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) {
|
|
return at::_ops::index_fill__Dimname_Scalar::call(self, dim, index, value);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor index_value;
|
|
optional<int64_t> index_bdim;
|
|
std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
|
|
batch_rule(self_value, self_bdim, dim, index_value, index_bdim, value);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & index_fill__Dimname_Tensor_generated_plumbing(at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & value) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(value, cur_level)) {
|
|
return at::_ops::index_fill__Dimname_Tensor::call(self, dim, index, value);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor index_value;
|
|
optional<int64_t> index_bdim;
|
|
std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
|
|
Tensor value_value;
|
|
optional<int64_t> value_bdim;
|
|
std::tie(value_value, value_bdim) = unwrapTensorAtLevel(value, cur_level);
|
|
batch_rule(self_value, self_bdim, dim, index_value, index_bdim, value_value, value_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor index_fill_Dimname_Scalar_generated_plumbing(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Scalar & value) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) {
|
|
return at::_ops::index_fill_Dimname_Scalar::call(self, dim, index, value);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor index_value;
|
|
optional<int64_t> index_bdim;
|
|
std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, value);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor index_fill_Dimname_Tensor_generated_plumbing(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & value) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(value, cur_level)) {
|
|
return at::_ops::index_fill_Dimname_Tensor::call(self, dim, index, value);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor index_value;
|
|
optional<int64_t> index_bdim;
|
|
std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
|
|
Tensor value_value;
|
|
optional<int64_t> value_bdim;
|
|
std::tie(value_value, value_bdim) = unwrapTensorAtLevel(value, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, value_value, value_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor scatter_src_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(src, cur_level)) {
|
|
return at::_ops::scatter_src::call(self, dim, index, src);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor index_value;
|
|
optional<int64_t> index_bdim;
|
|
std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
|
|
Tensor src_value;
|
|
optional<int64_t> src_bdim;
|
|
std::tie(src_value, src_bdim) = unwrapTensorAtLevel(src, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, src_value, src_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & scatter__src_generated_plumbing(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(src, cur_level)) {
|
|
return at::_ops::scatter__src::call(self, dim, index, src);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor index_value;
|
|
optional<int64_t> index_bdim;
|
|
std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
|
|
Tensor src_value;
|
|
optional<int64_t> src_bdim;
|
|
std::tie(src_value, src_bdim) = unwrapTensorAtLevel(src, cur_level);
|
|
batch_rule(self_value, self_bdim, dim, index_value, index_bdim, src_value, src_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor scatter_value_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) {
|
|
return at::_ops::scatter_value::call(self, dim, index, value);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor index_value;
|
|
optional<int64_t> index_bdim;
|
|
std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, value);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & scatter__value_generated_plumbing(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) {
|
|
return at::_ops::scatter__value::call(self, dim, index, value);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor index_value;
|
|
optional<int64_t> index_bdim;
|
|
std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
|
|
batch_rule(self_value, self_bdim, dim, index_value, index_bdim, value);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor scatter_reduce_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(src, cur_level)) {
|
|
return at::_ops::scatter_reduce::call(self, dim, index, src, reduce);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor index_value;
|
|
optional<int64_t> index_bdim;
|
|
std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
|
|
Tensor src_value;
|
|
optional<int64_t> src_bdim;
|
|
std::tie(src_value, src_bdim) = unwrapTensorAtLevel(src, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, src_value, src_bdim, reduce);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & scatter__reduce_generated_plumbing(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(src, cur_level)) {
|
|
return at::_ops::scatter__reduce::call(self, dim, index, src, reduce);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor index_value;
|
|
optional<int64_t> index_bdim;
|
|
std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
|
|
Tensor src_value;
|
|
optional<int64_t> src_bdim;
|
|
std::tie(src_value, src_bdim) = unwrapTensorAtLevel(src, cur_level);
|
|
batch_rule(self_value, self_bdim, dim, index_value, index_bdim, src_value, src_bdim, reduce);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor scatter_value_reduce_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, c10::string_view reduce) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) {
|
|
return at::_ops::scatter_value_reduce::call(self, dim, index, value, reduce);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor index_value;
|
|
optional<int64_t> index_bdim;
|
|
std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, value, reduce);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & scatter__value_reduce_generated_plumbing(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, c10::string_view reduce) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) {
|
|
return at::_ops::scatter__value_reduce::call(self, dim, index, value, reduce);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor index_value;
|
|
optional<int64_t> index_bdim;
|
|
std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
|
|
batch_rule(self_value, self_bdim, dim, index_value, index_bdim, value, reduce);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor scatter_dimname_src_generated_plumbing(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & src) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(src, cur_level)) {
|
|
return at::_ops::scatter_dimname_src::call(self, dim, index, src);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor index_value;
|
|
optional<int64_t> index_bdim;
|
|
std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
|
|
Tensor src_value;
|
|
optional<int64_t> src_bdim;
|
|
std::tie(src_value, src_bdim) = unwrapTensorAtLevel(src, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, src_value, src_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor scatter_dimname_value_generated_plumbing(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Scalar & value) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) {
|
|
return at::_ops::scatter_dimname_value::call(self, dim, index, value);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor index_value;
|
|
optional<int64_t> index_bdim;
|
|
std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, value);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor scatter_add_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(src, cur_level)) {
|
|
return at::_ops::scatter_add::call(self, dim, index, src);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor index_value;
|
|
optional<int64_t> index_bdim;
|
|
std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
|
|
Tensor src_value;
|
|
optional<int64_t> src_bdim;
|
|
std::tie(src_value, src_bdim) = unwrapTensorAtLevel(src, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, src_value, src_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & scatter_add__generated_plumbing(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(src, cur_level)) {
|
|
return at::_ops::scatter_add_::call(self, dim, index, src);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor index_value;
|
|
optional<int64_t> index_bdim;
|
|
std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
|
|
Tensor src_value;
|
|
optional<int64_t> src_bdim;
|
|
std::tie(src_value, src_bdim) = unwrapTensorAtLevel(src, cur_level);
|
|
batch_rule(self_value, self_bdim, dim, index_value, index_bdim, src_value, src_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor scatter_add_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & src) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(src, cur_level)) {
|
|
return at::_ops::scatter_add_dimname::call(self, dim, index, src);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor index_value;
|
|
optional<int64_t> index_bdim;
|
|
std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
|
|
Tensor src_value;
|
|
optional<int64_t> src_bdim;
|
|
std::tie(src_value, src_bdim) = unwrapTensorAtLevel(src, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, src_value, src_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor scatter_reduce_two_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, bool include_self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(src, cur_level)) {
|
|
return at::_ops::scatter_reduce_two::call(self, dim, index, src, reduce, include_self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor index_value;
|
|
optional<int64_t> index_bdim;
|
|
std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
|
|
Tensor src_value;
|
|
optional<int64_t> src_bdim;
|
|
std::tie(src_value, src_bdim) = unwrapTensorAtLevel(src, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, src_value, src_bdim, reduce, include_self);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & scatter_reduce__two_generated_plumbing(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & src, c10::string_view reduce, bool include_self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(src, cur_level)) {
|
|
return at::_ops::scatter_reduce__two::call(self, dim, index, src, reduce, include_self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor index_value;
|
|
optional<int64_t> index_bdim;
|
|
std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
|
|
Tensor src_value;
|
|
optional<int64_t> src_bdim;
|
|
std::tie(src_value, src_bdim) = unwrapTensorAtLevel(src, cur_level);
|
|
batch_rule(self_value, self_bdim, dim, index_value, index_bdim, src_value, src_bdim, reduce, include_self);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & eq__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::eq__Scalar::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim, other);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & eq__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::eq__Tensor::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor bitwise_and_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::bitwise_and_Scalar::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor bitwise_and_Scalar_Tensor_generated_plumbing(const at::Scalar & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::bitwise_and_Scalar_Tensor::call(self, other);
|
|
}
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
auto results = batch_rule(self, other_value, other_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor bitwise_and_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::bitwise_and_Tensor::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & bitwise_and__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::bitwise_and__Scalar::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim, other);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & bitwise_and__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::bitwise_and__Tensor::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor __and___Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::__and___Scalar::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor __and___Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::__and___Tensor::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & __iand___Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::__iand___Scalar::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim, other);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & __iand___Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::__iand___Tensor::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor bitwise_or_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::bitwise_or_Scalar::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor bitwise_or_Scalar_Tensor_generated_plumbing(const at::Scalar & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::bitwise_or_Scalar_Tensor::call(self, other);
|
|
}
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
auto results = batch_rule(self, other_value, other_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor bitwise_or_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::bitwise_or_Tensor::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & bitwise_or__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::bitwise_or__Scalar::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim, other);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & bitwise_or__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::bitwise_or__Tensor::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor __or___Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::__or___Scalar::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor __or___Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::__or___Tensor::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & __ior___Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::__ior___Scalar::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim, other);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & __ior___Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::__ior___Tensor::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor bitwise_xor_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::bitwise_xor_Scalar::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor bitwise_xor_Scalar_Tensor_generated_plumbing(const at::Scalar & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::bitwise_xor_Scalar_Tensor::call(self, other);
|
|
}
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
auto results = batch_rule(self, other_value, other_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor bitwise_xor_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::bitwise_xor_Tensor::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & bitwise_xor__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::bitwise_xor__Scalar::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim, other);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & bitwise_xor__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::bitwise_xor__Tensor::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor __xor___Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::__xor___Scalar::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor __xor___Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::__xor___Tensor::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & __ixor___Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::__ixor___Scalar::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim, other);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & __ixor___Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::__ixor___Tensor::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor __lshift___Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::__lshift___Scalar::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor __lshift___Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::__lshift___Tensor::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & __ilshift___Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::__ilshift___Scalar::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim, other);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & __ilshift___Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::__ilshift___Tensor::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor bitwise_left_shift_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::bitwise_left_shift_Tensor::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & bitwise_left_shift__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::bitwise_left_shift__Tensor::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor bitwise_left_shift_Tensor_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::bitwise_left_shift_Tensor_Scalar::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & bitwise_left_shift__Tensor_Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::bitwise_left_shift__Tensor_Scalar::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim, other);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor bitwise_left_shift_Scalar_Tensor_generated_plumbing(const at::Scalar & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::bitwise_left_shift_Scalar_Tensor::call(self, other);
|
|
}
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
auto results = batch_rule(self, other_value, other_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor __rshift___Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::__rshift___Scalar::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor __rshift___Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::__rshift___Tensor::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & __irshift___Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::__irshift___Scalar::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim, other);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & __irshift___Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::__irshift___Tensor::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor bitwise_right_shift_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::bitwise_right_shift_Tensor::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & bitwise_right_shift__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::bitwise_right_shift__Tensor::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor bitwise_right_shift_Tensor_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::bitwise_right_shift_Tensor_Scalar::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & bitwise_right_shift__Tensor_Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::bitwise_right_shift__Tensor_Scalar::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim, other);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor bitwise_right_shift_Scalar_Tensor_generated_plumbing(const at::Scalar & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::bitwise_right_shift_Scalar_Tensor::call(self, other);
|
|
}
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
auto results = batch_rule(self, other_value, other_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & tril__generated_plumbing(at::Tensor & self, int64_t diagonal) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::tril_::call(self, diagonal);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim, diagonal);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & triu__generated_plumbing(at::Tensor & self, int64_t diagonal) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::triu_::call(self, diagonal);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim, diagonal);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & digamma__generated_plumbing(at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::digamma_::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & lerp__Scalar_generated_plumbing(at::Tensor & self, const at::Tensor & end, const at::Scalar & weight) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(end, cur_level)) {
|
|
return at::_ops::lerp__Scalar::call(self, end, weight);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor end_value;
|
|
optional<int64_t> end_bdim;
|
|
std::tie(end_value, end_bdim) = unwrapTensorAtLevel(end, cur_level);
|
|
batch_rule(self_value, self_bdim, end_value, end_bdim, weight);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & lerp__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & end, const at::Tensor & weight) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(end, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
|
|
return at::_ops::lerp__Tensor::call(self, end, weight);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor end_value;
|
|
optional<int64_t> end_bdim;
|
|
std::tie(end_value, end_bdim) = unwrapTensorAtLevel(end, cur_level);
|
|
Tensor weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
|
|
batch_rule(self_value, self_bdim, end_value, end_bdim, weight_value, weight_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & addbmm__generated_plumbing(at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(batch1, cur_level) && !isBatchedAtLevel(batch2, cur_level)) {
|
|
return at::_ops::addbmm_::call(self, batch1, batch2, beta, alpha);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor batch1_value;
|
|
optional<int64_t> batch1_bdim;
|
|
std::tie(batch1_value, batch1_bdim) = unwrapTensorAtLevel(batch1, cur_level);
|
|
Tensor batch2_value;
|
|
optional<int64_t> batch2_bdim;
|
|
std::tie(batch2_value, batch2_bdim) = unwrapTensorAtLevel(batch2, cur_level);
|
|
batch_rule(self_value, self_bdim, batch1_value, batch1_bdim, batch2_value, batch2_bdim, beta, alpha);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor addbmm_generated_plumbing(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(batch1, cur_level) && !isBatchedAtLevel(batch2, cur_level)) {
|
|
return at::_ops::addbmm::call(self, batch1, batch2, beta, alpha);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor batch1_value;
|
|
optional<int64_t> batch1_bdim;
|
|
std::tie(batch1_value, batch1_bdim) = unwrapTensorAtLevel(batch1, cur_level);
|
|
Tensor batch2_value;
|
|
optional<int64_t> batch2_bdim;
|
|
std::tie(batch2_value, batch2_bdim) = unwrapTensorAtLevel(batch2, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, batch1_value, batch1_bdim, batch2_value, batch2_bdim, beta, alpha);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & random__from_generated_plumbing(at::Tensor & self, int64_t from, c10::optional<int64_t> to, c10::optional<at::Generator> generator) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::random__from::call(self, from, to, generator);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim, from, to, generator);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & random__to_generated_plumbing(at::Tensor & self, int64_t to, c10::optional<at::Generator> generator) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::random__to::call(self, to, generator);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim, to, generator);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & random__generated_plumbing(at::Tensor & self, c10::optional<at::Generator> generator) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::random_::call(self, generator);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim, generator);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & uniform__generated_plumbing(at::Tensor & self, double from, double to, c10::optional<at::Generator> generator) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::uniform_::call(self, from, to, generator);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim, from, to, generator);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & cauchy__generated_plumbing(at::Tensor & self, double median, double sigma, c10::optional<at::Generator> generator) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::cauchy_::call(self, median, sigma, generator);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim, median, sigma, generator);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & log_normal__generated_plumbing(at::Tensor & self, double mean, double std, c10::optional<at::Generator> generator) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::log_normal_::call(self, mean, std, generator);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim, mean, std, generator);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & exponential__generated_plumbing(at::Tensor & self, double lambd, c10::optional<at::Generator> generator) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::exponential_::call(self, lambd, generator);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim, lambd, generator);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & geometric__generated_plumbing(at::Tensor & self, double p, c10::optional<at::Generator> generator) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::geometric_::call(self, p, generator);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim, p, generator);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor diag_generated_plumbing(const at::Tensor & self, int64_t diagonal) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::diag::call(self, diagonal);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, diagonal);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor cross_generated_plumbing(const at::Tensor & self, const at::Tensor & other, c10::optional<int64_t> dim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::cross::call(self, other, dim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, dim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor triu_generated_plumbing(const at::Tensor & self, int64_t diagonal) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::triu::call(self, diagonal);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, diagonal);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor tril_generated_plumbing(const at::Tensor & self, int64_t diagonal) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::tril::call(self, diagonal);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, diagonal);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor trace_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::trace::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor trace_backward_generated_plumbing(const at::Tensor & grad, c10::SymIntArrayRef sizes) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad, cur_level)) {
|
|
return at::_ops::trace_backward::call(grad, sizes);
|
|
}
|
|
Tensor grad_value;
|
|
optional<int64_t> grad_bdim;
|
|
std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
|
|
auto results = batch_rule(grad_value, grad_bdim, sizes);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor ne_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::ne_Scalar::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor ne_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::ne_Tensor::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & ne__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::ne__Scalar::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim, other);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & ne__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::ne__Tensor::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor not_equal_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::not_equal_Scalar::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor not_equal_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::not_equal_Tensor::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & not_equal__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::not_equal__Scalar::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim, other);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & not_equal__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::not_equal__Tensor::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor eq_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::eq_Scalar::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor eq_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::eq_Tensor::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor ge_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::ge_Scalar::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor ge_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::ge_Tensor::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & ge__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::ge__Scalar::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim, other);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & ge__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::ge__Tensor::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor greater_equal_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::greater_equal_Scalar::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor greater_equal_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::greater_equal_Tensor::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & greater_equal__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::greater_equal__Scalar::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim, other);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & greater_equal__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::greater_equal__Tensor::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor le_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::le_Scalar::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor le_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::le_Tensor::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & le__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::le__Scalar::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim, other);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & le__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::le__Tensor::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor less_equal_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::less_equal_Scalar::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor less_equal_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::less_equal_Tensor::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & less_equal__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::less_equal__Scalar::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim, other);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & less_equal__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::less_equal__Tensor::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor gt_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::gt_Scalar::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor gt_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::gt_Tensor::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & gt__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::gt__Scalar::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim, other);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & gt__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::gt__Tensor::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor greater_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::greater_Scalar::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor greater_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::greater_Tensor::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & greater__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::greater__Scalar::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim, other);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & greater__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::greater__Tensor::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor lt_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::lt_Scalar::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor lt_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::lt_Tensor::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & lt__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::lt__Scalar::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim, other);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & lt__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::lt__Tensor::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor less_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::less_Scalar::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor less_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::less_Tensor::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & less__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::less__Scalar::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim, other);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & less__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::less__Tensor::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor take_generated_plumbing(const at::Tensor & self, const at::Tensor & index) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) {
|
|
return at::_ops::take::call(self, index);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor index_value;
|
|
optional<int64_t> index_bdim;
|
|
std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, index_value, index_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor take_along_dim_generated_plumbing(const at::Tensor & self, const at::Tensor & indices, c10::optional<int64_t> dim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
|
|
return at::_ops::take_along_dim::call(self, indices, dim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor indices_value;
|
|
optional<int64_t> indices_bdim;
|
|
std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, indices_value, indices_bdim, dim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor index_select_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & index) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) {
|
|
return at::_ops::index_select::call(self, dim, index);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor index_value;
|
|
optional<int64_t> index_bdim;
|
|
std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor index_select_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, const at::Tensor & index) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) {
|
|
return at::_ops::index_select_dimname::call(self, dim, index);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor index_value;
|
|
optional<int64_t> index_bdim;
|
|
std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor index_select_backward_generated_plumbing(const at::Tensor & grad, c10::SymIntArrayRef self_sizes, int64_t dim, const at::Tensor & index) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(index, cur_level)) {
|
|
return at::_ops::index_select_backward::call(grad, self_sizes, dim, index);
|
|
}
|
|
Tensor grad_value;
|
|
optional<int64_t> grad_bdim;
|
|
std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
|
|
Tensor index_value;
|
|
optional<int64_t> index_bdim;
|
|
std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
|
|
auto results = batch_rule(grad_value, grad_bdim, self_sizes, dim, index_value, index_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor masked_select_generated_plumbing(const at::Tensor & self, const at::Tensor & mask) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(mask, cur_level)) {
|
|
return at::_ops::masked_select::call(self, mask);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor mask_value;
|
|
optional<int64_t> mask_bdim;
|
|
std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, mask_value, mask_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor masked_select_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & input, const at::Tensor & mask) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(mask, cur_level)) {
|
|
return at::_ops::masked_select_backward::call(grad, input, mask);
|
|
}
|
|
Tensor grad_value;
|
|
optional<int64_t> grad_bdim;
|
|
std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
Tensor mask_value;
|
|
optional<int64_t> mask_bdim;
|
|
std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask, cur_level);
|
|
auto results = batch_rule(grad_value, grad_bdim, input_value, input_bdim, mask_value, mask_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor nonzero_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::nonzero::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor nonzero_static_generated_plumbing(const at::Tensor & self, int64_t size, int64_t fill_value) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::nonzero_static::call(self, size, fill_value);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, size, fill_value);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> nonzero_numpy_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::nonzero_numpy::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor argwhere_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::argwhere::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor gather_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) {
|
|
return at::_ops::gather::call(self, dim, index, sparse_grad);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor index_value;
|
|
optional<int64_t> index_bdim;
|
|
std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, sparse_grad);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor gather_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & self, int64_t dim, const at::Tensor & index, bool sparse_grad) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) {
|
|
return at::_ops::gather_backward::call(grad, self, dim, index, sparse_grad);
|
|
}
|
|
Tensor grad_value;
|
|
optional<int64_t> grad_bdim;
|
|
std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor index_value;
|
|
optional<int64_t> index_bdim;
|
|
std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
|
|
auto results = batch_rule(grad_value, grad_bdim, self_value, self_bdim, dim, index_value, index_bdim, sparse_grad);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor gather_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, bool sparse_grad) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level)) {
|
|
return at::_ops::gather_dimname::call(self, dim, index, sparse_grad);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor index_value;
|
|
optional<int64_t> index_bdim;
|
|
std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, sparse_grad);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _gather_sparse_backward_generated_plumbing(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & grad) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(index, cur_level) && !isBatchedAtLevel(grad, cur_level)) {
|
|
return at::_ops::_gather_sparse_backward::call(self, dim, index, grad);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor index_value;
|
|
optional<int64_t> index_bdim;
|
|
std::tie(index_value, index_bdim) = unwrapTensorAtLevel(index, cur_level);
|
|
Tensor grad_value;
|
|
optional<int64_t> grad_bdim;
|
|
std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, index_value, index_bdim, grad_value, grad_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor addcmul_generated_plumbing(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level)) {
|
|
return at::_ops::addcmul::call(self, tensor1, tensor2, value);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor tensor1_value;
|
|
optional<int64_t> tensor1_bdim;
|
|
std::tie(tensor1_value, tensor1_bdim) = unwrapTensorAtLevel(tensor1, cur_level);
|
|
Tensor tensor2_value;
|
|
optional<int64_t> tensor2_bdim;
|
|
std::tie(tensor2_value, tensor2_bdim) = unwrapTensorAtLevel(tensor2, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, tensor1_value, tensor1_bdim, tensor2_value, tensor2_bdim, value);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & addcmul__generated_plumbing(at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level)) {
|
|
return at::_ops::addcmul_::call(self, tensor1, tensor2, value);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor tensor1_value;
|
|
optional<int64_t> tensor1_bdim;
|
|
std::tie(tensor1_value, tensor1_bdim) = unwrapTensorAtLevel(tensor1, cur_level);
|
|
Tensor tensor2_value;
|
|
optional<int64_t> tensor2_bdim;
|
|
std::tie(tensor2_value, tensor2_bdim) = unwrapTensorAtLevel(tensor2, cur_level);
|
|
batch_rule(self_value, self_bdim, tensor1_value, tensor1_bdim, tensor2_value, tensor2_bdim, value);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor addcdiv_generated_plumbing(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level)) {
|
|
return at::_ops::addcdiv::call(self, tensor1, tensor2, value);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor tensor1_value;
|
|
optional<int64_t> tensor1_bdim;
|
|
std::tie(tensor1_value, tensor1_bdim) = unwrapTensorAtLevel(tensor1, cur_level);
|
|
Tensor tensor2_value;
|
|
optional<int64_t> tensor2_bdim;
|
|
std::tie(tensor2_value, tensor2_bdim) = unwrapTensorAtLevel(tensor2, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, tensor1_value, tensor1_bdim, tensor2_value, tensor2_bdim, value);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & addcdiv__generated_plumbing(at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level)) {
|
|
return at::_ops::addcdiv_::call(self, tensor1, tensor2, value);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor tensor1_value;
|
|
optional<int64_t> tensor1_bdim;
|
|
std::tie(tensor1_value, tensor1_bdim) = unwrapTensorAtLevel(tensor1, cur_level);
|
|
Tensor tensor2_value;
|
|
optional<int64_t> tensor2_bdim;
|
|
std::tie(tensor2_value, tensor2_bdim) = unwrapTensorAtLevel(tensor2, cur_level);
|
|
batch_rule(self_value, self_bdim, tensor1_value, tensor1_bdim, tensor2_value, tensor2_bdim, value);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor cross_entropy_loss_generated_plumbing(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, double label_smoothing) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
|
|
return at::_ops::cross_entropy_loss::call(self, target, weight, reduction, ignore_index, label_smoothing);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor target_value;
|
|
optional<int64_t> target_bdim;
|
|
std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
|
|
optional<Tensor> weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
if (weight) {
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, weight_value, weight_bdim, reduction, ignore_index, label_smoothing);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> triangular_solve_generated_plumbing(const at::Tensor & self, const at::Tensor & A, bool upper, bool transpose, bool unitriangular) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(A, cur_level)) {
|
|
return at::_ops::triangular_solve::call(self, A, upper, transpose, unitriangular);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor A_value;
|
|
optional<int64_t> A_bdim;
|
|
std::tie(A_value, A_bdim) = unwrapTensorAtLevel(A, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, A_value, A_bdim, upper, transpose, unitriangular);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _linalg_check_errors_generated_plumbing(const at::Tensor & info, c10::string_view api_name, bool is_matrix) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(info, cur_level)) {
|
|
return at::_ops::_linalg_check_errors::call(info, api_name, is_matrix);
|
|
}
|
|
Tensor info_value;
|
|
optional<int64_t> info_bdim;
|
|
std::tie(info_value, info_bdim) = unwrapTensorAtLevel(info, cur_level);
|
|
batch_rule(info_value, info_bdim, api_name, is_matrix);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor linalg_solve_triangular_generated_plumbing(const at::Tensor & self, const at::Tensor & B, bool upper, bool left, bool unitriangular) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(B, cur_level)) {
|
|
return at::_ops::linalg_solve_triangular::call(self, B, upper, left, unitriangular);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor B_value;
|
|
optional<int64_t> B_bdim;
|
|
std::tie(B_value, B_bdim) = unwrapTensorAtLevel(B, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, B_value, B_bdim, upper, left, unitriangular);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor linalg_vander_generated_plumbing(const at::Tensor & x, c10::optional<c10::SymInt> N) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(x, cur_level)) {
|
|
return at::_ops::linalg_vander::call(x, N);
|
|
}
|
|
Tensor x_value;
|
|
optional<int64_t> x_bdim;
|
|
std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
|
|
auto results = batch_rule(x_value, x_bdim, N);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor,at::Tensor> svd_generated_plumbing(const at::Tensor & self, bool some, bool compute_uv) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::svd::call(self, some, compute_uv);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, some, compute_uv);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor swapaxes_generated_plumbing(const at::Tensor & self, int64_t axis0, int64_t axis1) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::swapaxes::call(self, axis0, axis1);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, axis0, axis1);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor swapdims_generated_plumbing(const at::Tensor & self, int64_t dim0, int64_t dim1) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::swapdims::call(self, dim0, dim1);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim0, dim1);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor cholesky_generated_plumbing(const at::Tensor & self, bool upper) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::cholesky::call(self, upper);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, upper);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor cholesky_solve_generated_plumbing(const at::Tensor & self, const at::Tensor & input2, bool upper) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(input2, cur_level)) {
|
|
return at::_ops::cholesky_solve::call(self, input2, upper);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor input2_value;
|
|
optional<int64_t> input2_bdim;
|
|
std::tie(input2_value, input2_bdim) = unwrapTensorAtLevel(input2, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, input2_value, input2_bdim, upper);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _cholesky_solve_helper_generated_plumbing(const at::Tensor & self, const at::Tensor & A, bool upper) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(A, cur_level)) {
|
|
return at::_ops::_cholesky_solve_helper::call(self, A, upper);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor A_value;
|
|
optional<int64_t> A_bdim;
|
|
std::tie(A_value, A_bdim) = unwrapTensorAtLevel(A, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, A_value, A_bdim, upper);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor cholesky_inverse_generated_plumbing(const at::Tensor & self, bool upper) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::cholesky_inverse::call(self, upper);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, upper);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> qr_generated_plumbing(const at::Tensor & self, bool some) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::qr::call(self, some);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, some);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> geqrf_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::geqrf::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor orgqr_generated_plumbing(const at::Tensor & self, const at::Tensor & input2) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(input2, cur_level)) {
|
|
return at::_ops::orgqr::call(self, input2);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor input2_value;
|
|
optional<int64_t> input2_bdim;
|
|
std::tie(input2_value, input2_bdim) = unwrapTensorAtLevel(input2, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, input2_value, input2_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor ormqr_generated_plumbing(const at::Tensor & self, const at::Tensor & input2, const at::Tensor & input3, bool left, bool transpose) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(input2, cur_level) && !isBatchedAtLevel(input3, cur_level)) {
|
|
return at::_ops::ormqr::call(self, input2, input3, left, transpose);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor input2_value;
|
|
optional<int64_t> input2_bdim;
|
|
std::tie(input2_value, input2_bdim) = unwrapTensorAtLevel(input2, cur_level);
|
|
Tensor input3_value;
|
|
optional<int64_t> input3_bdim;
|
|
std::tie(input3_value, input3_bdim) = unwrapTensorAtLevel(input3, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, input2_value, input2_bdim, input3_value, input3_bdim, left, transpose);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor,at::Tensor> _lu_with_info_generated_plumbing(const at::Tensor & self, bool pivot, bool check_errors) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_lu_with_info::call(self, pivot, check_errors);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, pivot, check_errors);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor lu_solve_generated_plumbing(const at::Tensor & self, const at::Tensor & LU_data, const at::Tensor & LU_pivots) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(LU_data, cur_level) && !isBatchedAtLevel(LU_pivots, cur_level)) {
|
|
return at::_ops::lu_solve::call(self, LU_data, LU_pivots);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor LU_data_value;
|
|
optional<int64_t> LU_data_bdim;
|
|
std::tie(LU_data_value, LU_data_bdim) = unwrapTensorAtLevel(LU_data, cur_level);
|
|
Tensor LU_pivots_value;
|
|
optional<int64_t> LU_pivots_bdim;
|
|
std::tie(LU_pivots_value, LU_pivots_bdim) = unwrapTensorAtLevel(LU_pivots, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, LU_data_value, LU_data_bdim, LU_pivots_value, LU_pivots_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor,at::Tensor> lu_unpack_generated_plumbing(const at::Tensor & LU_data, const at::Tensor & LU_pivots, bool unpack_data, bool unpack_pivots) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(LU_data, cur_level) && !isBatchedAtLevel(LU_pivots, cur_level)) {
|
|
return at::_ops::lu_unpack::call(LU_data, LU_pivots, unpack_data, unpack_pivots);
|
|
}
|
|
Tensor LU_data_value;
|
|
optional<int64_t> LU_data_bdim;
|
|
std::tie(LU_data_value, LU_data_bdim) = unwrapTensorAtLevel(LU_data, cur_level);
|
|
Tensor LU_pivots_value;
|
|
optional<int64_t> LU_pivots_bdim;
|
|
std::tie(LU_pivots_value, LU_pivots_bdim) = unwrapTensorAtLevel(LU_pivots, cur_level);
|
|
auto results = batch_rule(LU_data_value, LU_data_bdim, LU_pivots_value, LU_pivots_bdim, unpack_data, unpack_pivots);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor multinomial_generated_plumbing(const at::Tensor & self, int64_t num_samples, bool replacement, c10::optional<at::Generator> generator) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::multinomial::call(self, num_samples, replacement, generator);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, num_samples, replacement, generator);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & lgamma__generated_plumbing(at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::lgamma_::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor lgamma_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::lgamma::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor digamma_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::digamma::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor polygamma_generated_plumbing(int64_t n, const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::polygamma::call(n, self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(n, self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & polygamma__generated_plumbing(at::Tensor & self, int64_t n) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::polygamma_::call(self, n);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim, n);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor erfinv_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::erfinv::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & erfinv__generated_plumbing(at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::erfinv_::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor i0_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::i0::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & i0__generated_plumbing(at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::i0_::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor sign_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::sign::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & sign__generated_plumbing(at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::sign_::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor signbit_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::signbit::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor dist_generated_plumbing(const at::Tensor & self, const at::Tensor & other, const at::Scalar & p) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::dist::call(self, other, p);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, p);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & atan2__generated_plumbing(at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::atan2_::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor atan2_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::atan2::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor arctan2_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::arctan2::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & arctan2__generated_plumbing(at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::arctan2_::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor lerp_Scalar_generated_plumbing(const at::Tensor & self, const at::Tensor & end, const at::Scalar & weight) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(end, cur_level)) {
|
|
return at::_ops::lerp_Scalar::call(self, end, weight);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor end_value;
|
|
optional<int64_t> end_bdim;
|
|
std::tie(end_value, end_bdim) = unwrapTensorAtLevel(end, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, end_value, end_bdim, weight);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor lerp_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & end, const at::Tensor & weight) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(end, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
|
|
return at::_ops::lerp_Tensor::call(self, end, weight);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor end_value;
|
|
optional<int64_t> end_bdim;
|
|
std::tie(end_value, end_bdim) = unwrapTensorAtLevel(end, cur_level);
|
|
Tensor weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, end_value, end_bdim, weight_value, weight_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor histc_generated_plumbing(const at::Tensor & self, int64_t bins, const at::Scalar & min, const at::Scalar & max) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::histc::call(self, bins, min, max);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, bins, min, max);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> histogram_bins_tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & bins, const c10::optional<at::Tensor> & weight, bool density) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(bins, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
|
|
return at::_ops::histogram_bins_tensor::call(self, bins, weight, density);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor bins_value;
|
|
optional<int64_t> bins_bdim;
|
|
std::tie(bins_value, bins_bdim) = unwrapTensorAtLevel(bins, cur_level);
|
|
optional<Tensor> weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
if (weight) {
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(self_value, self_bdim, bins_value, bins_bdim, weight_value, weight_bdim, density);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> histogram_bin_ct_generated_plumbing(const at::Tensor & self, int64_t bins, c10::optional<at::ArrayRef<double>> range, const c10::optional<at::Tensor> & weight, bool density) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
|
|
return at::_ops::histogram_bin_ct::call(self, bins, range, weight, density);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
optional<Tensor> weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
if (weight) {
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(self_value, self_bdim, bins, range, weight_value, weight_bdim, density);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> _histogramdd_bin_edges_generated_plumbing(const at::Tensor & self, at::IntArrayRef bins, c10::optional<at::ArrayRef<double>> range, const c10::optional<at::Tensor> & weight, bool density) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
|
|
return at::_ops::_histogramdd_bin_edges::call(self, bins, range, weight, density);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
optional<Tensor> weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
if (weight) {
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(self_value, self_bdim, bins, range, weight_value, weight_bdim, density);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _histogramdd_from_bin_cts_generated_plumbing(const at::Tensor & self, at::IntArrayRef bins, c10::optional<at::ArrayRef<double>> range, const c10::optional<at::Tensor> & weight, bool density) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
|
|
return at::_ops::_histogramdd_from_bin_cts::call(self, bins, range, weight, density);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
optional<Tensor> weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
if (weight) {
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(self_value, self_bdim, bins, range, weight_value, weight_bdim, density);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _histogramdd_from_bin_tensors_generated_plumbing(const at::Tensor & self, at::TensorList bins, const c10::optional<at::Tensor> & weight, bool density) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(bins, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
|
|
return at::_ops::_histogramdd_from_bin_tensors::call(self, bins, weight, density);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
optional<Tensor> weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
if (weight) {
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(self_value, self_bdim, bins, weight_value, weight_bdim, density);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,::std::vector<at::Tensor>> histogramdd_generated_plumbing(const at::Tensor & self, at::IntArrayRef bins, c10::optional<at::ArrayRef<double>> range, const c10::optional<at::Tensor> & weight, bool density) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
|
|
return at::_ops::histogramdd::call(self, bins, range, weight, density);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
optional<Tensor> weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
if (weight) {
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(self_value, self_bdim, bins, range, weight_value, weight_bdim, density);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatchedVector(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,::std::vector<at::Tensor>> histogramdd_int_bins_generated_plumbing(const at::Tensor & self, int64_t bins, c10::optional<at::ArrayRef<double>> range, const c10::optional<at::Tensor> & weight, bool density) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
|
|
return at::_ops::histogramdd_int_bins::call(self, bins, range, weight, density);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
optional<Tensor> weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
if (weight) {
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(self_value, self_bdim, bins, range, weight_value, weight_bdim, density);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatchedVector(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,::std::vector<at::Tensor>> histogramdd_TensorList_bins_generated_plumbing(const at::Tensor & self, at::TensorList bins, c10::optional<at::ArrayRef<double>> range, const c10::optional<at::Tensor> & weight, bool density) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(bins, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
|
|
return at::_ops::histogramdd_TensorList_bins::call(self, bins, range, weight, density);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
optional<Tensor> weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
if (weight) {
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(self_value, self_bdim, bins, range, weight_value, weight_bdim, density);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatchedVector(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor fmod_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::fmod_Scalar::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & fmod__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::fmod__Scalar::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim, other);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor fmod_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::fmod_Tensor::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & fmod__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::fmod__Tensor::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor hypot_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::hypot::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & hypot__generated_plumbing(at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::hypot_::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor igamma_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::igamma::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & igamma__generated_plumbing(at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::igamma_::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor igammac_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::igammac::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & igammac__generated_plumbing(at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::igammac_::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor nextafter_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::nextafter::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & nextafter__generated_plumbing(at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::nextafter_::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor remainder_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::remainder_Scalar::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & remainder__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::remainder__Scalar::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim, other);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor remainder_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::remainder_Tensor::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & remainder__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::remainder__Tensor::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor remainder_Scalar_Tensor_generated_plumbing(const at::Scalar & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::remainder_Scalar_Tensor::call(self, other);
|
|
}
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
auto results = batch_rule(self, other_value, other_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor min_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::min::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor fmin_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::fmin::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor max_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::max::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor fmax_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::fmax::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor maximum_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::maximum::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor max_other_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::max_other::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor minimum_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::minimum::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor min_other_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::min_other::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor quantile_generated_plumbing(const at::Tensor & self, const at::Tensor & q, c10::optional<int64_t> dim, bool keepdim, c10::string_view interpolation) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(q, cur_level)) {
|
|
return at::_ops::quantile::call(self, q, dim, keepdim, interpolation);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor q_value;
|
|
optional<int64_t> q_bdim;
|
|
std::tie(q_value, q_bdim) = unwrapTensorAtLevel(q, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, q_value, q_bdim, dim, keepdim, interpolation);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor quantile_scalar_generated_plumbing(const at::Tensor & self, double q, c10::optional<int64_t> dim, bool keepdim, c10::string_view interpolation) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::quantile_scalar::call(self, q, dim, keepdim, interpolation);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, q, dim, keepdim, interpolation);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor nanquantile_generated_plumbing(const at::Tensor & self, const at::Tensor & q, c10::optional<int64_t> dim, bool keepdim, c10::string_view interpolation) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(q, cur_level)) {
|
|
return at::_ops::nanquantile::call(self, q, dim, keepdim, interpolation);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor q_value;
|
|
optional<int64_t> q_bdim;
|
|
std::tie(q_value, q_bdim) = unwrapTensorAtLevel(q, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, q_value, q_bdim, dim, keepdim, interpolation);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor nanquantile_scalar_generated_plumbing(const at::Tensor & self, double q, c10::optional<int64_t> dim, bool keepdim, c10::string_view interpolation) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::nanquantile_scalar::call(self, q, dim, keepdim, interpolation);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, q, dim, keepdim, interpolation);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> sort_generated_plumbing(const at::Tensor & self, int64_t dim, bool descending) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::sort::call(self, dim, descending);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, descending);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> sort_stable_generated_plumbing(const at::Tensor & self, c10::optional<bool> stable, int64_t dim, bool descending) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::sort_stable::call(self, stable, dim, descending);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, stable, dim, descending);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> sort_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, bool descending) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::sort_dimname::call(self, dim, descending);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, descending);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> sort_dimname_stable_generated_plumbing(const at::Tensor & self, c10::optional<bool> stable, at::Dimname dim, bool descending) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::sort_dimname_stable::call(self, stable, dim, descending);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, stable, dim, descending);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor msort_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::msort::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor argsort_generated_plumbing(const at::Tensor & self, int64_t dim, bool descending) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::argsort::call(self, dim, descending);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, descending);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor argsort_stable_generated_plumbing(const at::Tensor & self, bool stable, int64_t dim, bool descending) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::argsort_stable::call(self, stable, dim, descending);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, stable, dim, descending);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor argsort_dimname_generated_plumbing(const at::Tensor & self, at::Dimname dim, bool descending) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::argsort_dimname::call(self, dim, descending);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, descending);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> topk_generated_plumbing(const at::Tensor & self, c10::SymInt k, int64_t dim, bool largest, bool sorted) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::topk::call(self, k, dim, largest, sorted);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, k, dim, largest, sorted);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor all_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::all::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor any_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::any::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor renorm_generated_plumbing(const at::Tensor & self, const at::Scalar & p, int64_t dim, const at::Scalar & maxnorm) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::renorm::call(self, p, dim, maxnorm);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, p, dim, maxnorm);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & renorm__generated_plumbing(at::Tensor & self, const at::Scalar & p, int64_t dim, const at::Scalar & maxnorm) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::renorm_::call(self, p, dim, maxnorm);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim, p, dim, maxnorm);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor unfold_generated_plumbing(const at::Tensor & self, int64_t dimension, int64_t size, int64_t step) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::unfold::call(self, dimension, size, step);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dimension, size, step);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor unfold_backward_generated_plumbing(const at::Tensor & grad_in, c10::SymIntArrayRef input_sizes, int64_t dim, int64_t size, int64_t step) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad_in, cur_level)) {
|
|
return at::_ops::unfold_backward::call(grad_in, input_sizes, dim, size, step);
|
|
}
|
|
Tensor grad_in_value;
|
|
optional<int64_t> grad_in_bdim;
|
|
std::tie(grad_in_value, grad_in_bdim) = unwrapTensorAtLevel(grad_in, cur_level);
|
|
auto results = batch_rule(grad_in_value, grad_in_bdim, input_sizes, dim, size, step);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor pow_Tensor_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & exponent) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(exponent, cur_level)) {
|
|
return at::_ops::pow_Tensor_Tensor::call(self, exponent);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor exponent_value;
|
|
optional<int64_t> exponent_bdim;
|
|
std::tie(exponent_value, exponent_bdim) = unwrapTensorAtLevel(exponent, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, exponent_value, exponent_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor pow_Scalar_generated_plumbing(const at::Scalar & self, const at::Tensor & exponent) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(exponent, cur_level)) {
|
|
return at::_ops::pow_Scalar::call(self, exponent);
|
|
}
|
|
Tensor exponent_value;
|
|
optional<int64_t> exponent_bdim;
|
|
std::tie(exponent_value, exponent_bdim) = unwrapTensorAtLevel(exponent, cur_level);
|
|
auto results = batch_rule(self, exponent_value, exponent_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor pow_Tensor_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & exponent) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::pow_Tensor_Scalar::call(self, exponent);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, exponent);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & pow__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & exponent) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::pow__Scalar::call(self, exponent);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim, exponent);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & pow__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & exponent) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(exponent, cur_level)) {
|
|
return at::_ops::pow__Tensor::call(self, exponent);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor exponent_value;
|
|
optional<int64_t> exponent_bdim;
|
|
std::tie(exponent_value, exponent_bdim) = unwrapTensorAtLevel(exponent, cur_level);
|
|
batch_rule(self_value, self_bdim, exponent_value, exponent_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor float_power_Tensor_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & exponent) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(exponent, cur_level)) {
|
|
return at::_ops::float_power_Tensor_Tensor::call(self, exponent);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor exponent_value;
|
|
optional<int64_t> exponent_bdim;
|
|
std::tie(exponent_value, exponent_bdim) = unwrapTensorAtLevel(exponent, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, exponent_value, exponent_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor float_power_Scalar_generated_plumbing(const at::Scalar & self, const at::Tensor & exponent) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(exponent, cur_level)) {
|
|
return at::_ops::float_power_Scalar::call(self, exponent);
|
|
}
|
|
Tensor exponent_value;
|
|
optional<int64_t> exponent_bdim;
|
|
std::tie(exponent_value, exponent_bdim) = unwrapTensorAtLevel(exponent, cur_level);
|
|
auto results = batch_rule(self, exponent_value, exponent_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor float_power_Tensor_Scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & exponent) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::float_power_Tensor_Scalar::call(self, exponent);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, exponent);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & float_power__Scalar_generated_plumbing(at::Tensor & self, const at::Scalar & exponent) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::float_power__Scalar::call(self, exponent);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim, exponent);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & float_power__Tensor_generated_plumbing(at::Tensor & self, const at::Tensor & exponent) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(exponent, cur_level)) {
|
|
return at::_ops::float_power__Tensor::call(self, exponent);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor exponent_value;
|
|
optional<int64_t> exponent_bdim;
|
|
std::tie(exponent_value, exponent_bdim) = unwrapTensorAtLevel(exponent, cur_level);
|
|
batch_rule(self_value, self_bdim, exponent_value, exponent_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & normal__generated_plumbing(at::Tensor & self, double mean, double std, c10::optional<at::Generator> generator) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::normal_::call(self, mean, std, generator);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim, mean, std, generator);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor normal_functional_generated_plumbing(const at::Tensor & self, double mean, double std, c10::optional<at::Generator> generator) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::normal_functional::call(self, mean, std, generator);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, mean, std, generator);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor normal_Tensor_float_generated_plumbing(const at::Tensor & mean, double std, c10::optional<at::Generator> generator) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(mean, cur_level)) {
|
|
return at::_ops::normal_Tensor_float::call(mean, std, generator);
|
|
}
|
|
Tensor mean_value;
|
|
optional<int64_t> mean_bdim;
|
|
std::tie(mean_value, mean_bdim) = unwrapTensorAtLevel(mean, cur_level);
|
|
auto results = batch_rule(mean_value, mean_bdim, std, generator);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor normal_float_Tensor_generated_plumbing(double mean, const at::Tensor & std, c10::optional<at::Generator> generator) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(std, cur_level)) {
|
|
return at::_ops::normal_float_Tensor::call(mean, std, generator);
|
|
}
|
|
Tensor std_value;
|
|
optional<int64_t> std_bdim;
|
|
std::tie(std_value, std_bdim) = unwrapTensorAtLevel(std, cur_level);
|
|
auto results = batch_rule(mean, std_value, std_bdim, generator);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor normal_Tensor_Tensor_generated_plumbing(const at::Tensor & mean, const at::Tensor & std, c10::optional<at::Generator> generator) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(mean, cur_level) && !isBatchedAtLevel(std, cur_level)) {
|
|
return at::_ops::normal_Tensor_Tensor::call(mean, std, generator);
|
|
}
|
|
Tensor mean_value;
|
|
optional<int64_t> mean_bdim;
|
|
std::tie(mean_value, mean_bdim) = unwrapTensorAtLevel(mean, cur_level);
|
|
Tensor std_value;
|
|
optional<int64_t> std_bdim;
|
|
std::tie(std_value, std_bdim) = unwrapTensorAtLevel(std, cur_level);
|
|
auto results = batch_rule(mean_value, mean_bdim, std_value, std_bdim, generator);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor alias_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::alias::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _amp_foreach_non_finite_check_and_unscale__generated_plumbing(at::TensorList self, at::Tensor & found_inf, const at::Tensor & inv_scale) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(found_inf, cur_level) && !isBatchedAtLevel(inv_scale, cur_level)) {
|
|
return at::_ops::_amp_foreach_non_finite_check_and_unscale_::call(self, found_inf, inv_scale);
|
|
}
|
|
Tensor found_inf_value;
|
|
optional<int64_t> found_inf_bdim;
|
|
std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf, cur_level);
|
|
Tensor inv_scale_value;
|
|
optional<int64_t> inv_scale_bdim;
|
|
std::tie(inv_scale_value, inv_scale_bdim) = unwrapTensorAtLevel(inv_scale, cur_level);
|
|
batch_rule(self, found_inf_value, found_inf_bdim, inv_scale_value, inv_scale_bdim);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> _foreach_add_Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_foreach_add_Scalar::call(self, scalar);
|
|
}
|
|
|
|
auto results = batch_rule(self, scalar);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_add__Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_foreach_add__Scalar::call(self, scalar);
|
|
}
|
|
|
|
batch_rule(self, scalar);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> _foreach_add_List_generated_plumbing(at::TensorList self, at::TensorList other, const at::Scalar & alpha) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::_foreach_add_List::call(self, other, alpha);
|
|
}
|
|
|
|
auto results = batch_rule(self, other, alpha);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_add__List_generated_plumbing(at::TensorList self, at::TensorList other, const at::Scalar & alpha) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::_foreach_add__List::call(self, other, alpha);
|
|
}
|
|
|
|
batch_rule(self, other, alpha);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> _foreach_add_ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_foreach_add_ScalarList::call(self, scalars);
|
|
}
|
|
|
|
auto results = batch_rule(self, scalars);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_add__ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_foreach_add__ScalarList::call(self, scalars);
|
|
}
|
|
|
|
batch_rule(self, scalars);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> _foreach_add_Tensor_generated_plumbing(at::TensorList self, const at::Tensor & other, const at::Scalar & alpha) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::_foreach_add_Tensor::call(self, other, alpha);
|
|
}
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
auto results = batch_rule(self, other_value, other_bdim, alpha);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_add__Tensor_generated_plumbing(at::TensorList self, const at::Tensor & other, const at::Scalar & alpha) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::_foreach_add__Tensor::call(self, other, alpha);
|
|
}
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
batch_rule(self, other_value, other_bdim, alpha);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> _foreach_sub_Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_foreach_sub_Scalar::call(self, scalar);
|
|
}
|
|
|
|
auto results = batch_rule(self, scalar);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_sub__Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_foreach_sub__Scalar::call(self, scalar);
|
|
}
|
|
|
|
batch_rule(self, scalar);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> _foreach_sub_List_generated_plumbing(at::TensorList self, at::TensorList other, const at::Scalar & alpha) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::_foreach_sub_List::call(self, other, alpha);
|
|
}
|
|
|
|
auto results = batch_rule(self, other, alpha);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_sub__List_generated_plumbing(at::TensorList self, at::TensorList other, const at::Scalar & alpha) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::_foreach_sub__List::call(self, other, alpha);
|
|
}
|
|
|
|
batch_rule(self, other, alpha);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> _foreach_sub_ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_foreach_sub_ScalarList::call(self, scalars);
|
|
}
|
|
|
|
auto results = batch_rule(self, scalars);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_sub__ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_foreach_sub__ScalarList::call(self, scalars);
|
|
}
|
|
|
|
batch_rule(self, scalars);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> _foreach_mul_Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_foreach_mul_Scalar::call(self, scalar);
|
|
}
|
|
|
|
auto results = batch_rule(self, scalar);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_mul__Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_foreach_mul__Scalar::call(self, scalar);
|
|
}
|
|
|
|
batch_rule(self, scalar);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> _foreach_mul_List_generated_plumbing(at::TensorList self, at::TensorList other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::_foreach_mul_List::call(self, other);
|
|
}
|
|
|
|
auto results = batch_rule(self, other);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_mul__List_generated_plumbing(at::TensorList self, at::TensorList other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::_foreach_mul__List::call(self, other);
|
|
}
|
|
|
|
batch_rule(self, other);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> _foreach_mul_ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_foreach_mul_ScalarList::call(self, scalars);
|
|
}
|
|
|
|
auto results = batch_rule(self, scalars);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_mul__ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_foreach_mul__ScalarList::call(self, scalars);
|
|
}
|
|
|
|
batch_rule(self, scalars);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> _foreach_mul_Tensor_generated_plumbing(at::TensorList self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::_foreach_mul_Tensor::call(self, other);
|
|
}
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
auto results = batch_rule(self, other_value, other_bdim);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_mul__Tensor_generated_plumbing(at::TensorList self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::_foreach_mul__Tensor::call(self, other);
|
|
}
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
batch_rule(self, other_value, other_bdim);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> _foreach_div_Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_foreach_div_Scalar::call(self, scalar);
|
|
}
|
|
|
|
auto results = batch_rule(self, scalar);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_div__Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_foreach_div__Scalar::call(self, scalar);
|
|
}
|
|
|
|
batch_rule(self, scalar);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> _foreach_div_List_generated_plumbing(at::TensorList self, at::TensorList other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::_foreach_div_List::call(self, other);
|
|
}
|
|
|
|
auto results = batch_rule(self, other);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_div__List_generated_plumbing(at::TensorList self, at::TensorList other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::_foreach_div__List::call(self, other);
|
|
}
|
|
|
|
batch_rule(self, other);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> _foreach_div_ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_foreach_div_ScalarList::call(self, scalars);
|
|
}
|
|
|
|
auto results = batch_rule(self, scalars);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_div__ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_foreach_div__ScalarList::call(self, scalars);
|
|
}
|
|
|
|
batch_rule(self, scalars);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> _foreach_div_Tensor_generated_plumbing(at::TensorList self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::_foreach_div_Tensor::call(self, other);
|
|
}
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
auto results = batch_rule(self, other_value, other_bdim);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_div__Tensor_generated_plumbing(at::TensorList self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::_foreach_div__Tensor::call(self, other);
|
|
}
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
batch_rule(self, other_value, other_bdim);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> _foreach_clamp_max_Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_foreach_clamp_max_Scalar::call(self, scalar);
|
|
}
|
|
|
|
auto results = batch_rule(self, scalar);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_clamp_max__Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_foreach_clamp_max__Scalar::call(self, scalar);
|
|
}
|
|
|
|
batch_rule(self, scalar);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> _foreach_clamp_max_List_generated_plumbing(at::TensorList self, at::TensorList other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::_foreach_clamp_max_List::call(self, other);
|
|
}
|
|
|
|
auto results = batch_rule(self, other);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_clamp_max__List_generated_plumbing(at::TensorList self, at::TensorList other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::_foreach_clamp_max__List::call(self, other);
|
|
}
|
|
|
|
batch_rule(self, other);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> _foreach_clamp_max_ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_foreach_clamp_max_ScalarList::call(self, scalars);
|
|
}
|
|
|
|
auto results = batch_rule(self, scalars);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_clamp_max__ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_foreach_clamp_max__ScalarList::call(self, scalars);
|
|
}
|
|
|
|
batch_rule(self, scalars);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> _foreach_clamp_min_Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_foreach_clamp_min_Scalar::call(self, scalar);
|
|
}
|
|
|
|
auto results = batch_rule(self, scalar);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_clamp_min__Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_foreach_clamp_min__Scalar::call(self, scalar);
|
|
}
|
|
|
|
batch_rule(self, scalar);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> _foreach_clamp_min_List_generated_plumbing(at::TensorList self, at::TensorList other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::_foreach_clamp_min_List::call(self, other);
|
|
}
|
|
|
|
auto results = batch_rule(self, other);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_clamp_min__List_generated_plumbing(at::TensorList self, at::TensorList other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::_foreach_clamp_min__List::call(self, other);
|
|
}
|
|
|
|
batch_rule(self, other);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> _foreach_clamp_min_ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_foreach_clamp_min_ScalarList::call(self, scalars);
|
|
}
|
|
|
|
auto results = batch_rule(self, scalars);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_clamp_min__ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_foreach_clamp_min__ScalarList::call(self, scalars);
|
|
}
|
|
|
|
batch_rule(self, scalars);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> _foreach_maximum_Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_foreach_maximum_Scalar::call(self, scalar);
|
|
}
|
|
|
|
auto results = batch_rule(self, scalar);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_maximum__Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_foreach_maximum__Scalar::call(self, scalar);
|
|
}
|
|
|
|
batch_rule(self, scalar);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> _foreach_maximum_List_generated_plumbing(at::TensorList self, at::TensorList other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::_foreach_maximum_List::call(self, other);
|
|
}
|
|
|
|
auto results = batch_rule(self, other);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_maximum__List_generated_plumbing(at::TensorList self, at::TensorList other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::_foreach_maximum__List::call(self, other);
|
|
}
|
|
|
|
batch_rule(self, other);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> _foreach_maximum_ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_foreach_maximum_ScalarList::call(self, scalars);
|
|
}
|
|
|
|
auto results = batch_rule(self, scalars);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_maximum__ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_foreach_maximum__ScalarList::call(self, scalars);
|
|
}
|
|
|
|
batch_rule(self, scalars);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> _foreach_minimum_Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_foreach_minimum_Scalar::call(self, scalar);
|
|
}
|
|
|
|
auto results = batch_rule(self, scalar);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_minimum__Scalar_generated_plumbing(at::TensorList self, const at::Scalar & scalar) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_foreach_minimum__Scalar::call(self, scalar);
|
|
}
|
|
|
|
batch_rule(self, scalar);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> _foreach_minimum_List_generated_plumbing(at::TensorList self, at::TensorList other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::_foreach_minimum_List::call(self, other);
|
|
}
|
|
|
|
auto results = batch_rule(self, other);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_minimum__List_generated_plumbing(at::TensorList self, at::TensorList other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::_foreach_minimum__List::call(self, other);
|
|
}
|
|
|
|
batch_rule(self, other);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> _foreach_minimum_ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_foreach_minimum_ScalarList::call(self, scalars);
|
|
}
|
|
|
|
auto results = batch_rule(self, scalars);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_minimum__ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_foreach_minimum__ScalarList::call(self, scalars);
|
|
}
|
|
|
|
batch_rule(self, scalars);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> _foreach_addcdiv_Scalar_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level)) {
|
|
return at::_ops::_foreach_addcdiv_Scalar::call(self, tensor1, tensor2, value);
|
|
}
|
|
|
|
auto results = batch_rule(self, tensor1, tensor2, value);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> _foreach_addcdiv_ScalarList_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef<at::Scalar> scalars) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level)) {
|
|
return at::_ops::_foreach_addcdiv_ScalarList::call(self, tensor1, tensor2, scalars);
|
|
}
|
|
|
|
auto results = batch_rule(self, tensor1, tensor2, scalars);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> _foreach_addcdiv_Tensor_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level) && !isBatchedAtLevel(scalars, cur_level)) {
|
|
return at::_ops::_foreach_addcdiv_Tensor::call(self, tensor1, tensor2, scalars);
|
|
}
|
|
Tensor scalars_value;
|
|
optional<int64_t> scalars_bdim;
|
|
std::tie(scalars_value, scalars_bdim) = unwrapTensorAtLevel(scalars, cur_level);
|
|
auto results = batch_rule(self, tensor1, tensor2, scalars_value, scalars_bdim);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_addcdiv__Scalar_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level)) {
|
|
return at::_ops::_foreach_addcdiv__Scalar::call(self, tensor1, tensor2, value);
|
|
}
|
|
|
|
batch_rule(self, tensor1, tensor2, value);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_addcdiv__ScalarList_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef<at::Scalar> scalars) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level)) {
|
|
return at::_ops::_foreach_addcdiv__ScalarList::call(self, tensor1, tensor2, scalars);
|
|
}
|
|
|
|
batch_rule(self, tensor1, tensor2, scalars);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_addcdiv__Tensor_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level) && !isBatchedAtLevel(scalars, cur_level)) {
|
|
return at::_ops::_foreach_addcdiv__Tensor::call(self, tensor1, tensor2, scalars);
|
|
}
|
|
Tensor scalars_value;
|
|
optional<int64_t> scalars_bdim;
|
|
std::tie(scalars_value, scalars_bdim) = unwrapTensorAtLevel(scalars, cur_level);
|
|
batch_rule(self, tensor1, tensor2, scalars_value, scalars_bdim);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> _foreach_addcmul_Scalar_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level)) {
|
|
return at::_ops::_foreach_addcmul_Scalar::call(self, tensor1, tensor2, value);
|
|
}
|
|
|
|
auto results = batch_rule(self, tensor1, tensor2, value);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> _foreach_addcmul_ScalarList_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef<at::Scalar> scalars) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level)) {
|
|
return at::_ops::_foreach_addcmul_ScalarList::call(self, tensor1, tensor2, scalars);
|
|
}
|
|
|
|
auto results = batch_rule(self, tensor1, tensor2, scalars);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> _foreach_addcmul_Tensor_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level) && !isBatchedAtLevel(scalars, cur_level)) {
|
|
return at::_ops::_foreach_addcmul_Tensor::call(self, tensor1, tensor2, scalars);
|
|
}
|
|
Tensor scalars_value;
|
|
optional<int64_t> scalars_bdim;
|
|
std::tie(scalars_value, scalars_bdim) = unwrapTensorAtLevel(scalars, cur_level);
|
|
auto results = batch_rule(self, tensor1, tensor2, scalars_value, scalars_bdim);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_addcmul__Scalar_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level)) {
|
|
return at::_ops::_foreach_addcmul__Scalar::call(self, tensor1, tensor2, value);
|
|
}
|
|
|
|
batch_rule(self, tensor1, tensor2, value);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_addcmul__ScalarList_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef<at::Scalar> scalars) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level)) {
|
|
return at::_ops::_foreach_addcmul__ScalarList::call(self, tensor1, tensor2, scalars);
|
|
}
|
|
|
|
batch_rule(self, tensor1, tensor2, scalars);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_addcmul__Tensor_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level) && !isBatchedAtLevel(scalars, cur_level)) {
|
|
return at::_ops::_foreach_addcmul__Tensor::call(self, tensor1, tensor2, scalars);
|
|
}
|
|
Tensor scalars_value;
|
|
optional<int64_t> scalars_bdim;
|
|
std::tie(scalars_value, scalars_bdim) = unwrapTensorAtLevel(scalars, cur_level);
|
|
batch_rule(self, tensor1, tensor2, scalars_value, scalars_bdim);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> _foreach_abs_generated_plumbing(at::TensorList self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_foreach_abs::call(self);
|
|
}
|
|
|
|
auto results = batch_rule(self);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_abs__generated_plumbing(at::TensorList self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_foreach_abs_::call(self);
|
|
}
|
|
|
|
batch_rule(self);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> _foreach_acos_generated_plumbing(at::TensorList self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_foreach_acos::call(self);
|
|
}
|
|
|
|
auto results = batch_rule(self);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_acos__generated_plumbing(at::TensorList self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_foreach_acos_::call(self);
|
|
}
|
|
|
|
batch_rule(self);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> _foreach_asin_generated_plumbing(at::TensorList self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_foreach_asin::call(self);
|
|
}
|
|
|
|
auto results = batch_rule(self);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_asin__generated_plumbing(at::TensorList self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_foreach_asin_::call(self);
|
|
}
|
|
|
|
batch_rule(self);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> _foreach_atan_generated_plumbing(at::TensorList self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_foreach_atan::call(self);
|
|
}
|
|
|
|
auto results = batch_rule(self);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_atan__generated_plumbing(at::TensorList self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_foreach_atan_::call(self);
|
|
}
|
|
|
|
batch_rule(self);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> _foreach_ceil_generated_plumbing(at::TensorList self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_foreach_ceil::call(self);
|
|
}
|
|
|
|
auto results = batch_rule(self);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_ceil__generated_plumbing(at::TensorList self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_foreach_ceil_::call(self);
|
|
}
|
|
|
|
batch_rule(self);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> _foreach_cos_generated_plumbing(at::TensorList self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_foreach_cos::call(self);
|
|
}
|
|
|
|
auto results = batch_rule(self);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_cos__generated_plumbing(at::TensorList self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_foreach_cos_::call(self);
|
|
}
|
|
|
|
batch_rule(self);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> _foreach_cosh_generated_plumbing(at::TensorList self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_foreach_cosh::call(self);
|
|
}
|
|
|
|
auto results = batch_rule(self);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_cosh__generated_plumbing(at::TensorList self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_foreach_cosh_::call(self);
|
|
}
|
|
|
|
batch_rule(self);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> _foreach_erf_generated_plumbing(at::TensorList self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_foreach_erf::call(self);
|
|
}
|
|
|
|
auto results = batch_rule(self);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_erf__generated_plumbing(at::TensorList self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_foreach_erf_::call(self);
|
|
}
|
|
|
|
batch_rule(self);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> _foreach_erfc_generated_plumbing(at::TensorList self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_foreach_erfc::call(self);
|
|
}
|
|
|
|
auto results = batch_rule(self);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_erfc__generated_plumbing(at::TensorList self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_foreach_erfc_::call(self);
|
|
}
|
|
|
|
batch_rule(self);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> _foreach_exp_generated_plumbing(at::TensorList self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_foreach_exp::call(self);
|
|
}
|
|
|
|
auto results = batch_rule(self);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_exp__generated_plumbing(at::TensorList self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_foreach_exp_::call(self);
|
|
}
|
|
|
|
batch_rule(self);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> _foreach_expm1_generated_plumbing(at::TensorList self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_foreach_expm1::call(self);
|
|
}
|
|
|
|
auto results = batch_rule(self);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_expm1__generated_plumbing(at::TensorList self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_foreach_expm1_::call(self);
|
|
}
|
|
|
|
batch_rule(self);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> _foreach_floor_generated_plumbing(at::TensorList self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_foreach_floor::call(self);
|
|
}
|
|
|
|
auto results = batch_rule(self);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_floor__generated_plumbing(at::TensorList self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_foreach_floor_::call(self);
|
|
}
|
|
|
|
batch_rule(self);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> _foreach_frac_generated_plumbing(at::TensorList self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_foreach_frac::call(self);
|
|
}
|
|
|
|
auto results = batch_rule(self);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_frac__generated_plumbing(at::TensorList self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_foreach_frac_::call(self);
|
|
}
|
|
|
|
batch_rule(self);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> _foreach_lerp_List_generated_plumbing(at::TensorList self, at::TensorList tensors1, at::TensorList weights) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensors1, cur_level) && !isBatchedAtLevel(weights, cur_level)) {
|
|
return at::_ops::_foreach_lerp_List::call(self, tensors1, weights);
|
|
}
|
|
|
|
auto results = batch_rule(self, tensors1, weights);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_lerp__List_generated_plumbing(at::TensorList self, at::TensorList tensors1, at::TensorList weights) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensors1, cur_level) && !isBatchedAtLevel(weights, cur_level)) {
|
|
return at::_ops::_foreach_lerp__List::call(self, tensors1, weights);
|
|
}
|
|
|
|
batch_rule(self, tensors1, weights);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> _foreach_lerp_Scalar_generated_plumbing(at::TensorList self, at::TensorList tensors1, const at::Scalar & weight) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensors1, cur_level)) {
|
|
return at::_ops::_foreach_lerp_Scalar::call(self, tensors1, weight);
|
|
}
|
|
|
|
auto results = batch_rule(self, tensors1, weight);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_lerp__Scalar_generated_plumbing(at::TensorList self, at::TensorList tensors1, const at::Scalar & weight) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensors1, cur_level)) {
|
|
return at::_ops::_foreach_lerp__Scalar::call(self, tensors1, weight);
|
|
}
|
|
|
|
batch_rule(self, tensors1, weight);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> _foreach_lgamma_generated_plumbing(at::TensorList self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_foreach_lgamma::call(self);
|
|
}
|
|
|
|
auto results = batch_rule(self);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_lgamma__generated_plumbing(at::TensorList self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_foreach_lgamma_::call(self);
|
|
}
|
|
|
|
batch_rule(self);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> _foreach_log_generated_plumbing(at::TensorList self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_foreach_log::call(self);
|
|
}
|
|
|
|
auto results = batch_rule(self);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_log__generated_plumbing(at::TensorList self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_foreach_log_::call(self);
|
|
}
|
|
|
|
batch_rule(self);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> _foreach_log10_generated_plumbing(at::TensorList self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_foreach_log10::call(self);
|
|
}
|
|
|
|
auto results = batch_rule(self);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_log10__generated_plumbing(at::TensorList self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_foreach_log10_::call(self);
|
|
}
|
|
|
|
batch_rule(self);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> _foreach_log1p_generated_plumbing(at::TensorList self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_foreach_log1p::call(self);
|
|
}
|
|
|
|
auto results = batch_rule(self);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_log1p__generated_plumbing(at::TensorList self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_foreach_log1p_::call(self);
|
|
}
|
|
|
|
batch_rule(self);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> _foreach_log2_generated_plumbing(at::TensorList self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_foreach_log2::call(self);
|
|
}
|
|
|
|
auto results = batch_rule(self);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_log2__generated_plumbing(at::TensorList self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_foreach_log2_::call(self);
|
|
}
|
|
|
|
batch_rule(self);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> _foreach_neg_generated_plumbing(at::TensorList self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_foreach_neg::call(self);
|
|
}
|
|
|
|
auto results = batch_rule(self);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_neg__generated_plumbing(at::TensorList self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_foreach_neg_::call(self);
|
|
}
|
|
|
|
batch_rule(self);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> _foreach_norm_Scalar_generated_plumbing(at::TensorList self, const at::Scalar & ord) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_foreach_norm_Scalar::call(self, ord);
|
|
}
|
|
|
|
auto results = batch_rule(self, ord);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> _foreach_pow_List_generated_plumbing(at::TensorList self, at::TensorList exponent) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(exponent, cur_level)) {
|
|
return at::_ops::_foreach_pow_List::call(self, exponent);
|
|
}
|
|
|
|
auto results = batch_rule(self, exponent);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> _foreach_pow_Scalar_generated_plumbing(at::TensorList self, const at::Scalar & exponent) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_foreach_pow_Scalar::call(self, exponent);
|
|
}
|
|
|
|
auto results = batch_rule(self, exponent);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> _foreach_pow_ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> exponent) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_foreach_pow_ScalarList::call(self, exponent);
|
|
}
|
|
|
|
auto results = batch_rule(self, exponent);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> _foreach_pow_ScalarAndTensor_generated_plumbing(const at::Scalar & self, at::TensorList exponent) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(exponent, cur_level)) {
|
|
return at::_ops::_foreach_pow_ScalarAndTensor::call(self, exponent);
|
|
}
|
|
|
|
auto results = batch_rule(self, exponent);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_pow__List_generated_plumbing(at::TensorList self, at::TensorList exponent) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(exponent, cur_level)) {
|
|
return at::_ops::_foreach_pow__List::call(self, exponent);
|
|
}
|
|
|
|
batch_rule(self, exponent);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_pow__Scalar_generated_plumbing(at::TensorList self, const at::Scalar & exponent) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_foreach_pow__Scalar::call(self, exponent);
|
|
}
|
|
|
|
batch_rule(self, exponent);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_pow__ScalarList_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> exponent) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_foreach_pow__ScalarList::call(self, exponent);
|
|
}
|
|
|
|
batch_rule(self, exponent);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> _foreach_reciprocal_generated_plumbing(at::TensorList self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_foreach_reciprocal::call(self);
|
|
}
|
|
|
|
auto results = batch_rule(self);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_reciprocal__generated_plumbing(at::TensorList self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_foreach_reciprocal_::call(self);
|
|
}
|
|
|
|
batch_rule(self);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> _foreach_round_generated_plumbing(at::TensorList self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_foreach_round::call(self);
|
|
}
|
|
|
|
auto results = batch_rule(self);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_round__generated_plumbing(at::TensorList self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_foreach_round_::call(self);
|
|
}
|
|
|
|
batch_rule(self);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> _foreach_sigmoid_generated_plumbing(at::TensorList self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_foreach_sigmoid::call(self);
|
|
}
|
|
|
|
auto results = batch_rule(self);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_sigmoid__generated_plumbing(at::TensorList self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_foreach_sigmoid_::call(self);
|
|
}
|
|
|
|
batch_rule(self);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> _foreach_sign_generated_plumbing(at::TensorList self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_foreach_sign::call(self);
|
|
}
|
|
|
|
auto results = batch_rule(self);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_sign__generated_plumbing(at::TensorList self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_foreach_sign_::call(self);
|
|
}
|
|
|
|
batch_rule(self);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> _foreach_sin_generated_plumbing(at::TensorList self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_foreach_sin::call(self);
|
|
}
|
|
|
|
auto results = batch_rule(self);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_sin__generated_plumbing(at::TensorList self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_foreach_sin_::call(self);
|
|
}
|
|
|
|
batch_rule(self);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> _foreach_sinh_generated_plumbing(at::TensorList self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_foreach_sinh::call(self);
|
|
}
|
|
|
|
auto results = batch_rule(self);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_sinh__generated_plumbing(at::TensorList self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_foreach_sinh_::call(self);
|
|
}
|
|
|
|
batch_rule(self);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> _foreach_sqrt_generated_plumbing(at::TensorList self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_foreach_sqrt::call(self);
|
|
}
|
|
|
|
auto results = batch_rule(self);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_sqrt__generated_plumbing(at::TensorList self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_foreach_sqrt_::call(self);
|
|
}
|
|
|
|
batch_rule(self);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> _foreach_tan_generated_plumbing(at::TensorList self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_foreach_tan::call(self);
|
|
}
|
|
|
|
auto results = batch_rule(self);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_tan__generated_plumbing(at::TensorList self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_foreach_tan_::call(self);
|
|
}
|
|
|
|
batch_rule(self);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> _foreach_tanh_generated_plumbing(at::TensorList self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_foreach_tanh::call(self);
|
|
}
|
|
|
|
auto results = batch_rule(self);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_tanh__generated_plumbing(at::TensorList self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_foreach_tanh_::call(self);
|
|
}
|
|
|
|
batch_rule(self);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> _foreach_trunc_generated_plumbing(at::TensorList self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_foreach_trunc::call(self);
|
|
}
|
|
|
|
auto results = batch_rule(self);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_trunc__generated_plumbing(at::TensorList self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_foreach_trunc_::call(self);
|
|
}
|
|
|
|
batch_rule(self);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_zero__generated_plumbing(at::TensorList self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_foreach_zero_::call(self);
|
|
}
|
|
|
|
batch_rule(self);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_copy__generated_plumbing(at::TensorList self, at::TensorList src, bool non_blocking) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(src, cur_level)) {
|
|
return at::_ops::_foreach_copy_::call(self, src, non_blocking);
|
|
}
|
|
|
|
batch_rule(self, src, non_blocking);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor bucketize_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & boundaries, bool out_int32, bool right) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(boundaries, cur_level)) {
|
|
return at::_ops::bucketize_Tensor::call(self, boundaries, out_int32, right);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor boundaries_value;
|
|
optional<int64_t> boundaries_bdim;
|
|
std::tie(boundaries_value, boundaries_bdim) = unwrapTensorAtLevel(boundaries, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, boundaries_value, boundaries_bdim, out_int32, right);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor bucketize_Scalar_generated_plumbing(const at::Scalar & self, const at::Tensor & boundaries, bool out_int32, bool right) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(boundaries, cur_level)) {
|
|
return at::_ops::bucketize_Scalar::call(self, boundaries, out_int32, right);
|
|
}
|
|
Tensor boundaries_value;
|
|
optional<int64_t> boundaries_bdim;
|
|
std::tie(boundaries_value, boundaries_bdim) = unwrapTensorAtLevel(boundaries, cur_level);
|
|
auto results = batch_rule(self, boundaries_value, boundaries_bdim, out_int32, right);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor searchsorted_Tensor_generated_plumbing(const at::Tensor & sorted_sequence, const at::Tensor & self, bool out_int32, bool right, c10::optional<c10::string_view> side, const c10::optional<at::Tensor> & sorter) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(sorted_sequence, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(sorter, cur_level)) {
|
|
return at::_ops::searchsorted_Tensor::call(sorted_sequence, self, out_int32, right, side, sorter);
|
|
}
|
|
Tensor sorted_sequence_value;
|
|
optional<int64_t> sorted_sequence_bdim;
|
|
std::tie(sorted_sequence_value, sorted_sequence_bdim) = unwrapTensorAtLevel(sorted_sequence, cur_level);
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
optional<Tensor> sorter_value;
|
|
optional<int64_t> sorter_bdim;
|
|
if (sorter) {
|
|
std::tie(sorter_value, sorter_bdim) = unwrapTensorAtLevel(sorter.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(sorted_sequence_value, sorted_sequence_bdim, self_value, self_bdim, out_int32, right, side, sorter_value, sorter_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor searchsorted_Scalar_generated_plumbing(const at::Tensor & sorted_sequence, const at::Scalar & self, bool out_int32, bool right, c10::optional<c10::string_view> side, const c10::optional<at::Tensor> & sorter) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(sorted_sequence, cur_level) && !isBatchedAtLevel(sorter, cur_level)) {
|
|
return at::_ops::searchsorted_Scalar::call(sorted_sequence, self, out_int32, right, side, sorter);
|
|
}
|
|
Tensor sorted_sequence_value;
|
|
optional<int64_t> sorted_sequence_bdim;
|
|
std::tie(sorted_sequence_value, sorted_sequence_bdim) = unwrapTensorAtLevel(sorted_sequence, cur_level);
|
|
optional<Tensor> sorter_value;
|
|
optional<int64_t> sorter_bdim;
|
|
if (sorter) {
|
|
std::tie(sorter_value, sorter_bdim) = unwrapTensorAtLevel(sorter.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(sorted_sequence_value, sorted_sequence_bdim, self, out_int32, right, side, sorter_value, sorter_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _convert_indices_from_coo_to_csr_generated_plumbing(const at::Tensor & self, int64_t size, bool out_int32) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_convert_indices_from_coo_to_csr::call(self, size, out_int32);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, size, out_int32);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _convert_indices_from_csr_to_coo_generated_plumbing(const at::Tensor & crow_indices, const at::Tensor & col_indices, bool out_int32, bool transpose) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(crow_indices, cur_level) && !isBatchedAtLevel(col_indices, cur_level)) {
|
|
return at::_ops::_convert_indices_from_csr_to_coo::call(crow_indices, col_indices, out_int32, transpose);
|
|
}
|
|
Tensor crow_indices_value;
|
|
optional<int64_t> crow_indices_bdim;
|
|
std::tie(crow_indices_value, crow_indices_bdim) = unwrapTensorAtLevel(crow_indices, cur_level);
|
|
Tensor col_indices_value;
|
|
optional<int64_t> col_indices_bdim;
|
|
std::tie(col_indices_value, col_indices_bdim) = unwrapTensorAtLevel(col_indices, cur_level);
|
|
auto results = batch_rule(crow_indices_value, crow_indices_bdim, col_indices_value, col_indices_bdim, out_int32, transpose);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor mse_loss_generated_plumbing(const at::Tensor & self, const at::Tensor & target, int64_t reduction) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level)) {
|
|
return at::_ops::mse_loss::call(self, target, reduction);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor target_value;
|
|
optional<int64_t> target_bdim;
|
|
std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, reduction);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor mse_loss_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level)) {
|
|
return at::_ops::mse_loss_backward::call(grad_output, self, target, reduction);
|
|
}
|
|
Tensor grad_output_value;
|
|
optional<int64_t> grad_output_bdim;
|
|
std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor target_value;
|
|
optional<int64_t> target_bdim;
|
|
std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
|
|
auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, target_value, target_bdim, reduction);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor l1_loss_generated_plumbing(const at::Tensor & self, const at::Tensor & target, int64_t reduction) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level)) {
|
|
return at::_ops::l1_loss::call(self, target, reduction);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor target_value;
|
|
optional<int64_t> target_bdim;
|
|
std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, reduction);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor multi_margin_loss_generated_plumbing(const at::Tensor & self, const at::Tensor & target, const at::Scalar & p, const at::Scalar & margin, const c10::optional<at::Tensor> & weight, int64_t reduction) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
|
|
return at::_ops::multi_margin_loss::call(self, target, p, margin, weight, reduction);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor target_value;
|
|
optional<int64_t> target_bdim;
|
|
std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
|
|
optional<Tensor> weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
if (weight) {
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, p, margin, weight_value, weight_bdim, reduction);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor multi_margin_loss_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const at::Scalar & p, const at::Scalar & margin, const c10::optional<at::Tensor> & weight, int64_t reduction) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
|
|
return at::_ops::multi_margin_loss_backward::call(grad_output, self, target, p, margin, weight, reduction);
|
|
}
|
|
Tensor grad_output_value;
|
|
optional<int64_t> grad_output_bdim;
|
|
std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor target_value;
|
|
optional<int64_t> target_bdim;
|
|
std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
|
|
optional<Tensor> weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
if (weight) {
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, target_value, target_bdim, p, margin, weight_value, weight_bdim, reduction);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor multilabel_margin_loss_generated_plumbing(const at::Tensor & self, const at::Tensor & target, int64_t reduction) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level)) {
|
|
return at::_ops::multilabel_margin_loss::call(self, target, reduction);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor target_value;
|
|
optional<int64_t> target_bdim;
|
|
std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, reduction);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> multilabel_margin_loss_forward_generated_plumbing(const at::Tensor & self, const at::Tensor & target, int64_t reduction) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level)) {
|
|
return at::_ops::multilabel_margin_loss_forward::call(self, target, reduction);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor target_value;
|
|
optional<int64_t> target_bdim;
|
|
std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, reduction);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor multilabel_margin_loss_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, const at::Tensor & is_target) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level) && !isBatchedAtLevel(is_target, cur_level)) {
|
|
return at::_ops::multilabel_margin_loss_backward::call(grad_output, self, target, reduction, is_target);
|
|
}
|
|
Tensor grad_output_value;
|
|
optional<int64_t> grad_output_bdim;
|
|
std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor target_value;
|
|
optional<int64_t> target_bdim;
|
|
std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
|
|
Tensor is_target_value;
|
|
optional<int64_t> is_target_bdim;
|
|
std::tie(is_target_value, is_target_bdim) = unwrapTensorAtLevel(is_target, cur_level);
|
|
auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, target_value, target_bdim, reduction, is_target_value, is_target_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor nll_loss_nd_generated_plumbing(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
|
|
return at::_ops::nll_loss_nd::call(self, target, weight, reduction, ignore_index);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor target_value;
|
|
optional<int64_t> target_bdim;
|
|
std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
|
|
optional<Tensor> weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
if (weight) {
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, weight_value, weight_bdim, reduction, ignore_index);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor nll_loss_generated_plumbing(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
|
|
return at::_ops::nll_loss::call(self, target, weight, reduction, ignore_index);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor target_value;
|
|
optional<int64_t> target_bdim;
|
|
std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
|
|
optional<Tensor> weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
if (weight) {
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, weight_value, weight_bdim, reduction, ignore_index);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> nll_loss_forward_generated_plumbing(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
|
|
return at::_ops::nll_loss_forward::call(self, target, weight, reduction, ignore_index);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor target_value;
|
|
optional<int64_t> target_bdim;
|
|
std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
|
|
optional<Tensor> weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
if (weight) {
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, weight_value, weight_bdim, reduction, ignore_index);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor nll_loss_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor & total_weight) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(total_weight, cur_level)) {
|
|
return at::_ops::nll_loss_backward::call(grad_output, self, target, weight, reduction, ignore_index, total_weight);
|
|
}
|
|
Tensor grad_output_value;
|
|
optional<int64_t> grad_output_bdim;
|
|
std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor target_value;
|
|
optional<int64_t> target_bdim;
|
|
std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
|
|
Tensor total_weight_value;
|
|
optional<int64_t> total_weight_bdim;
|
|
std::tie(total_weight_value, total_weight_bdim) = unwrapTensorAtLevel(total_weight, cur_level);
|
|
optional<Tensor> weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
if (weight) {
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, target_value, target_bdim, weight_value, weight_bdim, reduction, ignore_index, total_weight_value, total_weight_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor nll_loss2d_generated_plumbing(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
|
|
return at::_ops::nll_loss2d::call(self, target, weight, reduction, ignore_index);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor target_value;
|
|
optional<int64_t> target_bdim;
|
|
std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
|
|
optional<Tensor> weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
if (weight) {
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, weight_value, weight_bdim, reduction, ignore_index);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> nll_loss2d_forward_generated_plumbing(const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
|
|
return at::_ops::nll_loss2d_forward::call(self, target, weight, reduction, ignore_index);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor target_value;
|
|
optional<int64_t> target_bdim;
|
|
std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
|
|
optional<Tensor> weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
if (weight) {
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, weight_value, weight_bdim, reduction, ignore_index);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor nll_loss2d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, const c10::optional<at::Tensor> & weight, int64_t reduction, c10::SymInt ignore_index, const at::Tensor & total_weight) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(total_weight, cur_level)) {
|
|
return at::_ops::nll_loss2d_backward::call(grad_output, self, target, weight, reduction, ignore_index, total_weight);
|
|
}
|
|
Tensor grad_output_value;
|
|
optional<int64_t> grad_output_bdim;
|
|
std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor target_value;
|
|
optional<int64_t> target_bdim;
|
|
std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
|
|
Tensor total_weight_value;
|
|
optional<int64_t> total_weight_bdim;
|
|
std::tie(total_weight_value, total_weight_bdim) = unwrapTensorAtLevel(total_weight, cur_level);
|
|
optional<Tensor> weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
if (weight) {
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, target_value, target_bdim, weight_value, weight_bdim, reduction, ignore_index, total_weight_value, total_weight_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor smooth_l1_loss_generated_plumbing(const at::Tensor & self, const at::Tensor & target, int64_t reduction, double beta) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level)) {
|
|
return at::_ops::smooth_l1_loss::call(self, target, reduction, beta);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor target_value;
|
|
optional<int64_t> target_bdim;
|
|
std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, reduction, beta);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor smooth_l1_loss_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double beta) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level)) {
|
|
return at::_ops::smooth_l1_loss_backward::call(grad_output, self, target, reduction, beta);
|
|
}
|
|
Tensor grad_output_value;
|
|
optional<int64_t> grad_output_bdim;
|
|
std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor target_value;
|
|
optional<int64_t> target_bdim;
|
|
std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
|
|
auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, target_value, target_bdim, reduction, beta);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor huber_loss_generated_plumbing(const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level)) {
|
|
return at::_ops::huber_loss::call(self, target, reduction, delta);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor target_value;
|
|
optional<int64_t> target_bdim;
|
|
std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, reduction, delta);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor huber_loss_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level)) {
|
|
return at::_ops::huber_loss_backward::call(grad_output, self, target, reduction, delta);
|
|
}
|
|
Tensor grad_output_value;
|
|
optional<int64_t> grad_output_bdim;
|
|
std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor target_value;
|
|
optional<int64_t> target_bdim;
|
|
std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
|
|
auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, target_value, target_bdim, reduction, delta);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor soft_margin_loss_generated_plumbing(const at::Tensor & self, const at::Tensor & target, int64_t reduction) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level)) {
|
|
return at::_ops::soft_margin_loss::call(self, target, reduction);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor target_value;
|
|
optional<int64_t> target_bdim;
|
|
std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, target_value, target_bdim, reduction);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor soft_margin_loss_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(target, cur_level)) {
|
|
return at::_ops::soft_margin_loss_backward::call(grad_output, self, target, reduction);
|
|
}
|
|
Tensor grad_output_value;
|
|
optional<int64_t> grad_output_bdim;
|
|
std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor target_value;
|
|
optional<int64_t> target_bdim;
|
|
std::tie(target_value, target_bdim) = unwrapTensorAtLevel(target, cur_level);
|
|
auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, target_value, target_bdim, reduction);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor elu_generated_plumbing(const at::Tensor & self, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::elu::call(self, alpha, scale, input_scale);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, alpha, scale, input_scale);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor elu_backward_generated_plumbing(const at::Tensor & grad_output, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale, bool is_result, const at::Tensor & self_or_result) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self_or_result, cur_level)) {
|
|
return at::_ops::elu_backward::call(grad_output, alpha, scale, input_scale, is_result, self_or_result);
|
|
}
|
|
Tensor grad_output_value;
|
|
optional<int64_t> grad_output_bdim;
|
|
std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
|
|
Tensor self_or_result_value;
|
|
optional<int64_t> self_or_result_bdim;
|
|
std::tie(self_or_result_value, self_or_result_bdim) = unwrapTensorAtLevel(self_or_result, cur_level);
|
|
auto results = batch_rule(grad_output_value, grad_output_bdim, alpha, scale, input_scale, is_result, self_or_result_value, self_or_result_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & elu__generated_plumbing(at::Tensor & self, const at::Scalar & alpha, const at::Scalar & scale, const at::Scalar & input_scale) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::elu_::call(self, alpha, scale, input_scale);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim, alpha, scale, input_scale);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor glu_generated_plumbing(const at::Tensor & self, int64_t dim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::glu::call(self, dim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor glu_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, int64_t dim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::glu_backward::call(grad_output, self, dim);
|
|
}
|
|
Tensor grad_output_value;
|
|
optional<int64_t> grad_output_bdim;
|
|
std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, dim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor glu_jvp_generated_plumbing(const at::Tensor & glu, const at::Tensor & x, const at::Tensor & dx, int64_t dim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(glu, cur_level) && !isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(dx, cur_level)) {
|
|
return at::_ops::glu_jvp::call(glu, x, dx, dim);
|
|
}
|
|
Tensor glu_value;
|
|
optional<int64_t> glu_bdim;
|
|
std::tie(glu_value, glu_bdim) = unwrapTensorAtLevel(glu, cur_level);
|
|
Tensor x_value;
|
|
optional<int64_t> x_bdim;
|
|
std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
|
|
Tensor dx_value;
|
|
optional<int64_t> dx_bdim;
|
|
std::tie(dx_value, dx_bdim) = unwrapTensorAtLevel(dx, cur_level);
|
|
auto results = batch_rule(glu_value, glu_bdim, x_value, x_bdim, dx_value, dx_bdim, dim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor glu_backward_jvp_generated_plumbing(const at::Tensor & grad_x, const at::Tensor & grad_glu, const at::Tensor & x, const at::Tensor & dgrad_glu, const at::Tensor & dx, int64_t dim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad_x, cur_level) && !isBatchedAtLevel(grad_glu, cur_level) && !isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(dgrad_glu, cur_level) && !isBatchedAtLevel(dx, cur_level)) {
|
|
return at::_ops::glu_backward_jvp::call(grad_x, grad_glu, x, dgrad_glu, dx, dim);
|
|
}
|
|
Tensor grad_x_value;
|
|
optional<int64_t> grad_x_bdim;
|
|
std::tie(grad_x_value, grad_x_bdim) = unwrapTensorAtLevel(grad_x, cur_level);
|
|
Tensor grad_glu_value;
|
|
optional<int64_t> grad_glu_bdim;
|
|
std::tie(grad_glu_value, grad_glu_bdim) = unwrapTensorAtLevel(grad_glu, cur_level);
|
|
Tensor x_value;
|
|
optional<int64_t> x_bdim;
|
|
std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
|
|
Tensor dgrad_glu_value;
|
|
optional<int64_t> dgrad_glu_bdim;
|
|
std::tie(dgrad_glu_value, dgrad_glu_bdim) = unwrapTensorAtLevel(dgrad_glu, cur_level);
|
|
Tensor dx_value;
|
|
optional<int64_t> dx_bdim;
|
|
std::tie(dx_value, dx_bdim) = unwrapTensorAtLevel(dx, cur_level);
|
|
auto results = batch_rule(grad_x_value, grad_x_bdim, grad_glu_value, grad_glu_bdim, x_value, x_bdim, dgrad_glu_value, dgrad_glu_bdim, dx_value, dx_bdim, dim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor hardsigmoid_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::hardsigmoid::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & hardsigmoid__generated_plumbing(at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::hardsigmoid_::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor hardsigmoid_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::hardsigmoid_backward::call(grad_output, self);
|
|
}
|
|
Tensor grad_output_value;
|
|
optional<int64_t> grad_output_bdim;
|
|
std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor hardtanh_generated_plumbing(const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::hardtanh::call(self, min_val, max_val);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, min_val, max_val);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor hardtanh_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::hardtanh_backward::call(grad_output, self, min_val, max_val);
|
|
}
|
|
Tensor grad_output_value;
|
|
optional<int64_t> grad_output_bdim;
|
|
std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, min_val, max_val);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & hardtanh__generated_plumbing(at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::hardtanh_::call(self, min_val, max_val);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim, min_val, max_val);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor hardswish_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::hardswish::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & hardswish__generated_plumbing(at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::hardswish_::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor hardswish_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::hardswish_backward::call(grad_output, self);
|
|
}
|
|
Tensor grad_output_value;
|
|
optional<int64_t> grad_output_bdim;
|
|
std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor leaky_relu_generated_plumbing(const at::Tensor & self, const at::Scalar & negative_slope) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::leaky_relu::call(self, negative_slope);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, negative_slope);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor leaky_relu_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::leaky_relu_backward::call(grad_output, self, negative_slope, self_is_result);
|
|
}
|
|
Tensor grad_output_value;
|
|
optional<int64_t> grad_output_bdim;
|
|
std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, negative_slope, self_is_result);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & leaky_relu__generated_plumbing(at::Tensor & self, const at::Scalar & negative_slope) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::leaky_relu_::call(self, negative_slope);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim, negative_slope);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor log_sigmoid_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::log_sigmoid::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> log_sigmoid_forward_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::log_sigmoid_forward::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor log_sigmoid_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & buffer) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(buffer, cur_level)) {
|
|
return at::_ops::log_sigmoid_backward::call(grad_output, self, buffer);
|
|
}
|
|
Tensor grad_output_value;
|
|
optional<int64_t> grad_output_bdim;
|
|
std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor buffer_value;
|
|
optional<int64_t> buffer_bdim;
|
|
std::tie(buffer_value, buffer_bdim) = unwrapTensorAtLevel(buffer, cur_level);
|
|
auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, buffer_value, buffer_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor rrelu_with_noise_generated_plumbing(const at::Tensor & self, const at::Tensor & noise, const at::Scalar & lower, const at::Scalar & upper, bool training, c10::optional<at::Generator> generator) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(noise, cur_level)) {
|
|
return at::_ops::rrelu_with_noise::call(self, noise, lower, upper, training, generator);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor noise_value;
|
|
optional<int64_t> noise_bdim;
|
|
std::tie(noise_value, noise_bdim) = unwrapTensorAtLevel(noise, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, noise_value, noise_bdim, lower, upper, training, generator);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor rrelu_with_noise_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & noise, const at::Scalar & lower, const at::Scalar & upper, bool training, bool self_is_result) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(noise, cur_level)) {
|
|
return at::_ops::rrelu_with_noise_backward::call(grad_output, self, noise, lower, upper, training, self_is_result);
|
|
}
|
|
Tensor grad_output_value;
|
|
optional<int64_t> grad_output_bdim;
|
|
std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor noise_value;
|
|
optional<int64_t> noise_bdim;
|
|
std::tie(noise_value, noise_bdim) = unwrapTensorAtLevel(noise, cur_level);
|
|
auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, noise_value, noise_bdim, lower, upper, training, self_is_result);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & rrelu_with_noise__generated_plumbing(at::Tensor & self, const at::Tensor & noise, const at::Scalar & lower, const at::Scalar & upper, bool training, c10::optional<at::Generator> generator) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(noise, cur_level)) {
|
|
return at::_ops::rrelu_with_noise_::call(self, noise, lower, upper, training, generator);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor noise_value;
|
|
optional<int64_t> noise_bdim;
|
|
std::tie(noise_value, noise_bdim) = unwrapTensorAtLevel(noise, cur_level);
|
|
batch_rule(self_value, self_bdim, noise_value, noise_bdim, lower, upper, training, generator);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor softplus_generated_plumbing(const at::Tensor & self, const at::Scalar & beta, const at::Scalar & threshold) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::softplus::call(self, beta, threshold);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, beta, threshold);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor softplus_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & beta, const at::Scalar & threshold) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::softplus_backward::call(grad_output, self, beta, threshold);
|
|
}
|
|
Tensor grad_output_value;
|
|
optional<int64_t> grad_output_bdim;
|
|
std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, beta, threshold);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor softshrink_generated_plumbing(const at::Tensor & self, const at::Scalar & lambd) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::softshrink::call(self, lambd);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, lambd);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor softshrink_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & lambd) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::softshrink_backward::call(grad_output, self, lambd);
|
|
}
|
|
Tensor grad_output_value;
|
|
optional<int64_t> grad_output_bdim;
|
|
std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, lambd);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor adaptive_avg_pool2d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::adaptive_avg_pool2d::call(self, output_size);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, output_size);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor mkldnn_adaptive_avg_pool2d_generated_plumbing(const at::Tensor & self, at::IntArrayRef output_size) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::mkldnn_adaptive_avg_pool2d::call(self, output_size);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, output_size);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor mkldnn_adaptive_avg_pool2d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::mkldnn_adaptive_avg_pool2d_backward::call(grad_output, self);
|
|
}
|
|
Tensor grad_output_value;
|
|
optional<int64_t> grad_output_bdim;
|
|
std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _adaptive_avg_pool2d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_adaptive_avg_pool2d::call(self, output_size);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, output_size);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _adaptive_avg_pool2d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_adaptive_avg_pool2d_backward::call(grad_output, self);
|
|
}
|
|
Tensor grad_output_value;
|
|
optional<int64_t> grad_output_bdim;
|
|
std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor adaptive_avg_pool3d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::adaptive_avg_pool3d::call(self, output_size);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, output_size);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _adaptive_avg_pool3d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_adaptive_avg_pool3d::call(self, output_size);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, output_size);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _adaptive_avg_pool3d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_adaptive_avg_pool3d_backward::call(grad_output, self);
|
|
}
|
|
Tensor grad_output_value;
|
|
optional<int64_t> grad_output_bdim;
|
|
std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> adaptive_max_pool2d_generated_plumbing(const at::Tensor & self, at::IntArrayRef output_size) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::adaptive_max_pool2d::call(self, output_size);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, output_size);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor adaptive_max_pool2d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
|
|
return at::_ops::adaptive_max_pool2d_backward::call(grad_output, self, indices);
|
|
}
|
|
Tensor grad_output_value;
|
|
optional<int64_t> grad_output_bdim;
|
|
std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor indices_value;
|
|
optional<int64_t> indices_bdim;
|
|
std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
|
|
auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, indices_value, indices_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> adaptive_max_pool3d_generated_plumbing(const at::Tensor & self, at::IntArrayRef output_size) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::adaptive_max_pool3d::call(self, output_size);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, output_size);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor adaptive_max_pool3d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & indices) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
|
|
return at::_ops::adaptive_max_pool3d_backward::call(grad_output, self, indices);
|
|
}
|
|
Tensor grad_output_value;
|
|
optional<int64_t> grad_output_bdim;
|
|
std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor indices_value;
|
|
optional<int64_t> indices_bdim;
|
|
std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
|
|
auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, indices_value, indices_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor avg_pool2d_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::avg_pool2d::call(self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor avg_pool2d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::avg_pool2d_backward::call(grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
|
|
}
|
|
Tensor grad_output_value;
|
|
optional<int64_t> grad_output_bdim;
|
|
std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor avg_pool3d_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::avg_pool3d::call(self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor avg_pool3d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::avg_pool3d_backward::call(grad_output, self, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
|
|
}
|
|
Tensor grad_output_value;
|
|
optional<int64_t> grad_output_bdim;
|
|
std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, kernel_size, stride, padding, ceil_mode, count_include_pad, divisor_override);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> fractional_max_pool2d_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(random_samples, cur_level)) {
|
|
return at::_ops::fractional_max_pool2d::call(self, kernel_size, output_size, random_samples);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor random_samples_value;
|
|
optional<int64_t> random_samples_bdim;
|
|
std::tie(random_samples_value, random_samples_bdim) = unwrapTensorAtLevel(random_samples, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, kernel_size, output_size, random_samples_value, random_samples_bdim);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor fractional_max_pool2d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
|
|
return at::_ops::fractional_max_pool2d_backward::call(grad_output, self, kernel_size, output_size, indices);
|
|
}
|
|
Tensor grad_output_value;
|
|
optional<int64_t> grad_output_bdim;
|
|
std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor indices_value;
|
|
optional<int64_t> indices_bdim;
|
|
std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
|
|
auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, kernel_size, output_size, indices_value, indices_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> fractional_max_pool3d_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & random_samples) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(random_samples, cur_level)) {
|
|
return at::_ops::fractional_max_pool3d::call(self, kernel_size, output_size, random_samples);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor random_samples_value;
|
|
optional<int64_t> random_samples_bdim;
|
|
std::tie(random_samples_value, random_samples_bdim) = unwrapTensorAtLevel(random_samples, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, kernel_size, output_size, random_samples_value, random_samples_bdim);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor fractional_max_pool3d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
|
|
return at::_ops::fractional_max_pool3d_backward::call(grad_output, self, kernel_size, output_size, indices);
|
|
}
|
|
Tensor grad_output_value;
|
|
optional<int64_t> grad_output_bdim;
|
|
std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor indices_value;
|
|
optional<int64_t> indices_bdim;
|
|
std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
|
|
auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, kernel_size, output_size, indices_value, indices_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> max_pool2d_with_indices_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::max_pool2d_with_indices::call(self, kernel_size, stride, padding, dilation, ceil_mode);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, kernel_size, stride, padding, dilation, ceil_mode);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor max_pool2d_with_indices_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
|
|
return at::_ops::max_pool2d_with_indices_backward::call(grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices);
|
|
}
|
|
Tensor grad_output_value;
|
|
optional<int64_t> grad_output_bdim;
|
|
std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor indices_value;
|
|
optional<int64_t> indices_bdim;
|
|
std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
|
|
auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, kernel_size, stride, padding, dilation, ceil_mode, indices_value, indices_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> max_pool3d_with_indices_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::max_pool3d_with_indices::call(self, kernel_size, stride, padding, dilation, ceil_mode);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, kernel_size, stride, padding, dilation, ceil_mode);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor max_pool3d_with_indices_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
|
|
return at::_ops::max_pool3d_with_indices_backward::call(grad_output, self, kernel_size, stride, padding, dilation, ceil_mode, indices);
|
|
}
|
|
Tensor grad_output_value;
|
|
optional<int64_t> grad_output_bdim;
|
|
std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor indices_value;
|
|
optional<int64_t> indices_bdim;
|
|
std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
|
|
auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, kernel_size, stride, padding, dilation, ceil_mode, indices_value, indices_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor max_unpool2d_generated_plumbing(const at::Tensor & self, const at::Tensor & indices, c10::SymIntArrayRef output_size) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
|
|
return at::_ops::max_unpool2d::call(self, indices, output_size);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor indices_value;
|
|
optional<int64_t> indices_bdim;
|
|
std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, indices_value, indices_bdim, output_size);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor max_unpool3d_generated_plumbing(const at::Tensor & self, const at::Tensor & indices, c10::SymIntArrayRef output_size, at::IntArrayRef stride, at::IntArrayRef padding) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
|
|
return at::_ops::max_unpool3d::call(self, indices, output_size, stride, padding);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor indices_value;
|
|
optional<int64_t> indices_bdim;
|
|
std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, indices_value, indices_bdim, output_size, stride, padding);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor reflection_pad1d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef padding) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::reflection_pad1d::call(self, padding);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, padding);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor reflection_pad1d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::reflection_pad1d_backward::call(grad_output, self, padding);
|
|
}
|
|
Tensor grad_output_value;
|
|
optional<int64_t> grad_output_bdim;
|
|
std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, padding);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor reflection_pad2d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef padding) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::reflection_pad2d::call(self, padding);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, padding);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor reflection_pad2d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::reflection_pad2d_backward::call(grad_output, self, padding);
|
|
}
|
|
Tensor grad_output_value;
|
|
optional<int64_t> grad_output_bdim;
|
|
std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, padding);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor reflection_pad3d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef padding) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::reflection_pad3d::call(self, padding);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, padding);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor reflection_pad3d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::reflection_pad3d_backward::call(grad_output, self, padding);
|
|
}
|
|
Tensor grad_output_value;
|
|
optional<int64_t> grad_output_bdim;
|
|
std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, padding);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor replication_pad1d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef padding) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::replication_pad1d::call(self, padding);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, padding);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor replication_pad1d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::replication_pad1d_backward::call(grad_output, self, padding);
|
|
}
|
|
Tensor grad_output_value;
|
|
optional<int64_t> grad_output_bdim;
|
|
std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, padding);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor replication_pad2d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef padding) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::replication_pad2d::call(self, padding);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, padding);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor replication_pad2d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::replication_pad2d_backward::call(grad_output, self, padding);
|
|
}
|
|
Tensor grad_output_value;
|
|
optional<int64_t> grad_output_bdim;
|
|
std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, padding);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor replication_pad3d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef padding) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::replication_pad3d::call(self, padding);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, padding);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor replication_pad3d_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::replication_pad3d_backward::call(grad_output, self, padding);
|
|
}
|
|
Tensor grad_output_value;
|
|
optional<int64_t> grad_output_bdim;
|
|
std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, padding);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _pad_circular_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef pad) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_pad_circular::call(self, pad);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, pad);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _pad_enum_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef pad, int64_t mode, c10::optional<double> value) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_pad_enum::call(self, pad, mode, value);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, pad, mode, value);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor pad_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef pad, c10::string_view mode, c10::optional<double> value) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::pad::call(self, pad, mode, value);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, pad, mode, value);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor upsample_linear1d_vec_generated_plumbing(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(input, cur_level)) {
|
|
return at::_ops::upsample_linear1d_vec::call(input, output_size, align_corners, scale_factors);
|
|
}
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
auto results = batch_rule(input_value, input_bdim, output_size, align_corners, scale_factors);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor upsample_bilinear2d_vec_generated_plumbing(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(input, cur_level)) {
|
|
return at::_ops::upsample_bilinear2d_vec::call(input, output_size, align_corners, scale_factors);
|
|
}
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
auto results = batch_rule(input_value, input_bdim, output_size, align_corners, scale_factors);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _upsample_bilinear2d_aa_vec_generated_plumbing(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(input, cur_level)) {
|
|
return at::_ops::_upsample_bilinear2d_aa_vec::call(input, output_size, align_corners, scale_factors);
|
|
}
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
auto results = batch_rule(input_value, input_bdim, output_size, align_corners, scale_factors);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor upsample_trilinear3d_vec_generated_plumbing(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(input, cur_level)) {
|
|
return at::_ops::upsample_trilinear3d_vec::call(input, output_size, align_corners, scale_factors);
|
|
}
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
auto results = batch_rule(input_value, input_bdim, output_size, align_corners, scale_factors);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor upsample_bicubic2d_vec_generated_plumbing(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(input, cur_level)) {
|
|
return at::_ops::upsample_bicubic2d_vec::call(input, output_size, align_corners, scale_factors);
|
|
}
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
auto results = batch_rule(input_value, input_bdim, output_size, align_corners, scale_factors);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _upsample_bicubic2d_aa_vec_generated_plumbing(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, bool align_corners, c10::optional<at::ArrayRef<double>> scale_factors) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(input, cur_level)) {
|
|
return at::_ops::_upsample_bicubic2d_aa_vec::call(input, output_size, align_corners, scale_factors);
|
|
}
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
auto results = batch_rule(input_value, input_bdim, output_size, align_corners, scale_factors);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor upsample_nearest1d_vec_generated_plumbing(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional<at::ArrayRef<double>> scale_factors) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(input, cur_level)) {
|
|
return at::_ops::upsample_nearest1d_vec::call(input, output_size, scale_factors);
|
|
}
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
auto results = batch_rule(input_value, input_bdim, output_size, scale_factors);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _upsample_nearest_exact1d_vec_generated_plumbing(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional<at::ArrayRef<double>> scale_factors) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(input, cur_level)) {
|
|
return at::_ops::_upsample_nearest_exact1d_vec::call(input, output_size, scale_factors);
|
|
}
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
auto results = batch_rule(input_value, input_bdim, output_size, scale_factors);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor upsample_nearest2d_vec_generated_plumbing(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional<at::ArrayRef<double>> scale_factors) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(input, cur_level)) {
|
|
return at::_ops::upsample_nearest2d_vec::call(input, output_size, scale_factors);
|
|
}
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
auto results = batch_rule(input_value, input_bdim, output_size, scale_factors);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _upsample_nearest_exact2d_vec_generated_plumbing(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional<at::ArrayRef<double>> scale_factors) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(input, cur_level)) {
|
|
return at::_ops::_upsample_nearest_exact2d_vec::call(input, output_size, scale_factors);
|
|
}
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
auto results = batch_rule(input_value, input_bdim, output_size, scale_factors);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor upsample_nearest3d_vec_generated_plumbing(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional<at::ArrayRef<double>> scale_factors) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(input, cur_level)) {
|
|
return at::_ops::upsample_nearest3d_vec::call(input, output_size, scale_factors);
|
|
}
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
auto results = batch_rule(input_value, input_bdim, output_size, scale_factors);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _upsample_nearest_exact3d_vec_generated_plumbing(const at::Tensor & input, at::OptionalSymIntArrayRef output_size, c10::optional<at::ArrayRef<double>> scale_factors) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(input, cur_level)) {
|
|
return at::_ops::_upsample_nearest_exact3d_vec::call(input, output_size, scale_factors);
|
|
}
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
auto results = batch_rule(input_value, input_bdim, output_size, scale_factors);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor upsample_linear1d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::upsample_linear1d::call(self, output_size, align_corners, scales);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, output_size, align_corners, scales);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor upsample_linear1d_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad_output, cur_level)) {
|
|
return at::_ops::upsample_linear1d_backward::call(grad_output, output_size, input_size, align_corners, scales);
|
|
}
|
|
Tensor grad_output_value;
|
|
optional<int64_t> grad_output_bdim;
|
|
std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
|
|
auto results = batch_rule(grad_output_value, grad_output_bdim, output_size, input_size, align_corners, scales);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor upsample_bilinear2d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::upsample_bilinear2d::call(self, output_size, align_corners, scales_h, scales_w);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, output_size, align_corners, scales_h, scales_w);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor upsample_bilinear2d_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad_output, cur_level)) {
|
|
return at::_ops::upsample_bilinear2d_backward::call(grad_output, output_size, input_size, align_corners, scales_h, scales_w);
|
|
}
|
|
Tensor grad_output_value;
|
|
optional<int64_t> grad_output_bdim;
|
|
std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
|
|
auto results = batch_rule(grad_output_value, grad_output_bdim, output_size, input_size, align_corners, scales_h, scales_w);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _upsample_bilinear2d_aa_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_upsample_bilinear2d_aa::call(self, output_size, align_corners, scales_h, scales_w);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, output_size, align_corners, scales_h, scales_w);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _upsample_bilinear2d_aa_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad_output, cur_level)) {
|
|
return at::_ops::_upsample_bilinear2d_aa_backward::call(grad_output, output_size, input_size, align_corners, scales_h, scales_w);
|
|
}
|
|
Tensor grad_output_value;
|
|
optional<int64_t> grad_output_bdim;
|
|
std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
|
|
auto results = batch_rule(grad_output_value, grad_output_bdim, output_size, input_size, align_corners, scales_h, scales_w);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor upsample_bicubic2d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::upsample_bicubic2d::call(self, output_size, align_corners, scales_h, scales_w);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, output_size, align_corners, scales_h, scales_w);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor upsample_bicubic2d_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad_output, cur_level)) {
|
|
return at::_ops::upsample_bicubic2d_backward::call(grad_output, output_size, input_size, align_corners, scales_h, scales_w);
|
|
}
|
|
Tensor grad_output_value;
|
|
optional<int64_t> grad_output_bdim;
|
|
std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
|
|
auto results = batch_rule(grad_output_value, grad_output_bdim, output_size, input_size, align_corners, scales_h, scales_w);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _upsample_bicubic2d_aa_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_upsample_bicubic2d_aa::call(self, output_size, align_corners, scales_h, scales_w);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, output_size, align_corners, scales_h, scales_w);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _upsample_bicubic2d_aa_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales_h, c10::optional<double> scales_w) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad_output, cur_level)) {
|
|
return at::_ops::_upsample_bicubic2d_aa_backward::call(grad_output, output_size, input_size, align_corners, scales_h, scales_w);
|
|
}
|
|
Tensor grad_output_value;
|
|
optional<int64_t> grad_output_bdim;
|
|
std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
|
|
auto results = batch_rule(grad_output_value, grad_output_bdim, output_size, input_size, align_corners, scales_h, scales_w);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor upsample_trilinear3d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::upsample_trilinear3d::call(self, output_size, align_corners, scales_d, scales_h, scales_w);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, output_size, align_corners, scales_d, scales_h, scales_w);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor upsample_trilinear3d_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad_output, cur_level)) {
|
|
return at::_ops::upsample_trilinear3d_backward::call(grad_output, output_size, input_size, align_corners, scales_d, scales_h, scales_w);
|
|
}
|
|
Tensor grad_output_value;
|
|
optional<int64_t> grad_output_bdim;
|
|
std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
|
|
auto results = batch_rule(grad_output_value, grad_output_bdim, output_size, input_size, align_corners, scales_d, scales_h, scales_w);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor upsample_nearest1d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::upsample_nearest1d::call(self, output_size, scales);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, output_size, scales);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _upsample_nearest_exact1d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_upsample_nearest_exact1d::call(self, output_size, scales);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, output_size, scales);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor upsample_nearest1d_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad_output, cur_level)) {
|
|
return at::_ops::upsample_nearest1d_backward::call(grad_output, output_size, input_size, scales);
|
|
}
|
|
Tensor grad_output_value;
|
|
optional<int64_t> grad_output_bdim;
|
|
std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
|
|
auto results = batch_rule(grad_output_value, grad_output_bdim, output_size, input_size, scales);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _upsample_nearest_exact1d_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad_output, cur_level)) {
|
|
return at::_ops::_upsample_nearest_exact1d_backward::call(grad_output, output_size, input_size, scales);
|
|
}
|
|
Tensor grad_output_value;
|
|
optional<int64_t> grad_output_bdim;
|
|
std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
|
|
auto results = batch_rule(grad_output_value, grad_output_bdim, output_size, input_size, scales);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor upsample_nearest2d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales_h, c10::optional<double> scales_w) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::upsample_nearest2d::call(self, output_size, scales_h, scales_w);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, output_size, scales_h, scales_w);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _upsample_nearest_exact2d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales_h, c10::optional<double> scales_w) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_upsample_nearest_exact2d::call(self, output_size, scales_h, scales_w);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, output_size, scales_h, scales_w);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor upsample_nearest2d_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales_h, c10::optional<double> scales_w) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad_output, cur_level)) {
|
|
return at::_ops::upsample_nearest2d_backward::call(grad_output, output_size, input_size, scales_h, scales_w);
|
|
}
|
|
Tensor grad_output_value;
|
|
optional<int64_t> grad_output_bdim;
|
|
std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
|
|
auto results = batch_rule(grad_output_value, grad_output_bdim, output_size, input_size, scales_h, scales_w);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _upsample_nearest_exact2d_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales_h, c10::optional<double> scales_w) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad_output, cur_level)) {
|
|
return at::_ops::_upsample_nearest_exact2d_backward::call(grad_output, output_size, input_size, scales_h, scales_w);
|
|
}
|
|
Tensor grad_output_value;
|
|
optional<int64_t> grad_output_bdim;
|
|
std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
|
|
auto results = batch_rule(grad_output_value, grad_output_bdim, output_size, input_size, scales_h, scales_w);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor upsample_nearest3d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::upsample_nearest3d::call(self, output_size, scales_d, scales_h, scales_w);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, output_size, scales_d, scales_h, scales_w);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _upsample_nearest_exact3d_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_upsample_nearest_exact3d::call(self, output_size, scales_d, scales_h, scales_w);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, output_size, scales_d, scales_h, scales_w);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor upsample_nearest3d_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad_output, cur_level)) {
|
|
return at::_ops::upsample_nearest3d_backward::call(grad_output, output_size, input_size, scales_d, scales_h, scales_w);
|
|
}
|
|
Tensor grad_output_value;
|
|
optional<int64_t> grad_output_bdim;
|
|
std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
|
|
auto results = batch_rule(grad_output_value, grad_output_bdim, output_size, input_size, scales_d, scales_h, scales_w);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _upsample_nearest_exact3d_backward_generated_plumbing(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional<double> scales_d, c10::optional<double> scales_h, c10::optional<double> scales_w) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad_output, cur_level)) {
|
|
return at::_ops::_upsample_nearest_exact3d_backward::call(grad_output, output_size, input_size, scales_d, scales_h, scales_w);
|
|
}
|
|
Tensor grad_output_value;
|
|
optional<int64_t> grad_output_bdim;
|
|
std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
|
|
auto results = batch_rule(grad_output_value, grad_output_bdim, output_size, input_size, scales_d, scales_h, scales_w);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor sigmoid_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & output) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(output, cur_level)) {
|
|
return at::_ops::sigmoid_backward::call(grad_output, output);
|
|
}
|
|
Tensor grad_output_value;
|
|
optional<int64_t> grad_output_bdim;
|
|
std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
|
|
Tensor output_value;
|
|
optional<int64_t> output_bdim;
|
|
std::tie(output_value, output_bdim) = unwrapTensorAtLevel(output, cur_level);
|
|
auto results = batch_rule(grad_output_value, grad_output_bdim, output_value, output_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor logit_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, c10::optional<double> eps) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::logit_backward::call(grad_output, self, eps);
|
|
}
|
|
Tensor grad_output_value;
|
|
optional<int64_t> grad_output_bdim;
|
|
std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, eps);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor tanh_backward_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & output) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(output, cur_level)) {
|
|
return at::_ops::tanh_backward::call(grad_output, output);
|
|
}
|
|
Tensor grad_output_value;
|
|
optional<int64_t> grad_output_bdim;
|
|
std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
|
|
Tensor output_value;
|
|
optional<int64_t> output_bdim;
|
|
std::tie(output_value, output_bdim) = unwrapTensorAtLevel(output, cur_level);
|
|
auto results = batch_rule(grad_output_value, grad_output_bdim, output_value, output_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor slow_conv_transpose2d_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef dilation) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
|
|
return at::_ops::slow_conv_transpose2d::call(self, weight, kernel_size, bias, stride, padding, output_padding, dilation);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
|
|
optional<Tensor> bias_value;
|
|
optional<int64_t> bias_bdim;
|
|
if (bias) {
|
|
std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, kernel_size, bias_value, bias_bdim, stride, padding, output_padding, dilation);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor slow_conv_transpose3d_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, c10::SymIntArrayRef dilation) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
|
|
return at::_ops::slow_conv_transpose3d::call(self, weight, kernel_size, bias, stride, padding, output_padding, dilation);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
|
|
optional<Tensor> bias_value;
|
|
optional<int64_t> bias_bdim;
|
|
if (bias) {
|
|
std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, kernel_size, bias_value, bias_bdim, stride, padding, output_padding, dilation);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor thnn_conv2d_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
|
|
return at::_ops::thnn_conv2d::call(self, weight, kernel_size, bias, stride, padding);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
|
|
optional<Tensor> bias_value;
|
|
optional<int64_t> bias_bdim;
|
|
if (bias) {
|
|
std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, kernel_size, bias_value, bias_bdim, stride, padding);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _slow_conv2d_forward_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
|
|
return at::_ops::_slow_conv2d_forward::call(self, weight, kernel_size, bias, stride, padding);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
|
|
optional<Tensor> bias_value;
|
|
optional<int64_t> bias_bdim;
|
|
if (bias) {
|
|
std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, kernel_size, bias_value, bias_bdim, stride, padding);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor,at::Tensor> _slow_conv2d_backward_output_mask_generated_plumbing(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, ::std::array<bool,3> output_mask) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level)) {
|
|
return at::_ops::_slow_conv2d_backward_output_mask::call(grad_output, self, weight, kernel_size, stride, padding, output_mask);
|
|
}
|
|
Tensor grad_output_value;
|
|
optional<int64_t> grad_output_bdim;
|
|
std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output, cur_level);
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
|
|
auto results = batch_rule(grad_output_value, grad_output_bdim, self_value, self_bdim, weight_value, weight_bdim, kernel_size, stride, padding, output_mask);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _conv_depthwise2d_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
|
|
return at::_ops::_conv_depthwise2d::call(self, weight, kernel_size, bias, stride, padding, dilation);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
|
|
optional<Tensor> bias_value;
|
|
optional<int64_t> bias_bdim;
|
|
if (bias) {
|
|
std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, kernel_size, bias_value, bias_bdim, stride, padding, dilation);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor conv_depthwise3d_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
|
|
return at::_ops::conv_depthwise3d::call(self, weight, kernel_size, bias, stride, padding, dilation);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
|
|
optional<Tensor> bias_value;
|
|
optional<int64_t> bias_bdim;
|
|
if (bias) {
|
|
std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, kernel_size, bias_value, bias_bdim, stride, padding, dilation);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor slow_conv3d_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
|
|
return at::_ops::slow_conv3d::call(self, weight, kernel_size, bias, stride, padding);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
|
|
optional<Tensor> bias_value;
|
|
optional<int64_t> bias_bdim;
|
|
if (bias) {
|
|
std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, kernel_size, bias_value, bias_bdim, stride, padding);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor slow_conv3d_forward_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
|
|
return at::_ops::slow_conv3d_forward::call(self, weight, kernel_size, bias, stride, padding);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
|
|
optional<Tensor> bias_value;
|
|
optional<int64_t> bias_bdim;
|
|
if (bias) {
|
|
std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, kernel_size, bias_value, bias_bdim, stride, padding);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor slow_conv_dilated2d_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
|
|
return at::_ops::slow_conv_dilated2d::call(self, weight, kernel_size, bias, stride, padding, dilation);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
|
|
optional<Tensor> bias_value;
|
|
optional<int64_t> bias_bdim;
|
|
if (bias) {
|
|
std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, kernel_size, bias_value, bias_bdim, stride, padding, dilation);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor slow_conv_dilated3d_generated_plumbing(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const c10::optional<at::Tensor> & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level)) {
|
|
return at::_ops::slow_conv_dilated3d::call(self, weight, kernel_size, bias, stride, padding, dilation);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight, cur_level);
|
|
optional<Tensor> bias_value;
|
|
optional<int64_t> bias_bdim;
|
|
if (bias) {
|
|
std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(self_value, self_bdim, weight_value, weight_bdim, kernel_size, bias_value, bias_bdim, stride, padding, dilation);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor col2im_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::col2im::call(self, output_size, kernel_size, dilation, padding, stride);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, output_size, kernel_size, dilation, padding, stride);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor column_stack_generated_plumbing(at::TensorList tensors) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(tensors, cur_level)) {
|
|
return at::_ops::column_stack::call(tensors);
|
|
}
|
|
|
|
auto results = batch_rule(tensors);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor im2col_generated_plumbing(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::im2col::call(self, kernel_size, dilation, padding, stride);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, kernel_size, dilation, padding, stride);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor isfinite_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::isfinite::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor isinf_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::isinf::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void record_stream_generated_plumbing(at::Tensor & self, at::Stream s) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::record_stream::call(self, s);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim, s);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor isposinf_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::isposinf::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor isneginf_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::isneginf::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _add_batch_dim_generated_plumbing(const at::Tensor & self, int64_t batch_dim, int64_t level) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_add_batch_dim::call(self, batch_dim, level);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, batch_dim, level);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _remove_batch_dim_generated_plumbing(const at::Tensor & self, int64_t level, int64_t batch_size, int64_t out_dim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_remove_batch_dim::call(self, level, batch_size, out_dim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, level, batch_size, out_dim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor special_entr_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::special_entr::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor special_ndtri_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::special_ndtri::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor special_log_ndtr_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::special_log_ndtr::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor special_expm1_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::special_expm1::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor special_exp2_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::special_exp2::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor special_psi_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::special_psi::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor special_digamma_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::special_digamma::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor special_gammaln_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::special_gammaln::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor special_erf_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::special_erf::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor special_erfc_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::special_erfc::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor special_erfcx_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::special_erfcx::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor special_erfinv_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::special_erfinv::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor special_ndtr_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::special_ndtr::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor special_xlog1py_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::special_xlog1py::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor special_xlog1py_self_scalar_generated_plumbing(const at::Scalar & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::special_xlog1py_self_scalar::call(self, other);
|
|
}
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
auto results = batch_rule(self, other_value, other_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor special_xlog1py_other_scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::special_xlog1py_other_scalar::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor special_xlogy_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::special_xlogy::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor special_xlogy_self_scalar_generated_plumbing(const at::Scalar & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::special_xlogy_self_scalar::call(self, other);
|
|
}
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
auto results = batch_rule(self, other_value, other_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor special_xlogy_other_scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::special_xlogy_other_scalar::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor special_zeta_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::special_zeta::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor special_zeta_self_scalar_generated_plumbing(const at::Scalar & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::special_zeta_self_scalar::call(self, other);
|
|
}
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
auto results = batch_rule(self, other_value, other_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor special_zeta_other_scalar_generated_plumbing(const at::Tensor & self, const at::Scalar & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::special_zeta_other_scalar::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor special_i0_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::special_i0::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor special_i0e_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::special_i0e::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor special_i1_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::special_i1::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor special_i1e_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::special_i1e::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor special_logit_generated_plumbing(const at::Tensor & self, c10::optional<double> eps) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::special_logit::call(self, eps);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, eps);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor special_polygamma_generated_plumbing(int64_t n, const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::special_polygamma::call(n, self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(n, self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor special_logsumexp_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim, bool keepdim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::special_logsumexp::call(self, dim, keepdim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, keepdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor special_expit_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::special_expit::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor special_sinc_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::special_sinc::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor special_round_generated_plumbing(const at::Tensor & self, int64_t decimals) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::special_round::call(self, decimals);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, decimals);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor special_log1p_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::special_log1p::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor special_log_softmax_generated_plumbing(const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::special_log_softmax::call(self, dim, dtype);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, dtype);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor special_gammainc_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::special_gammainc::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor special_gammaincc_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::special_gammaincc::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor special_multigammaln_generated_plumbing(const at::Tensor & self, int64_t p) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::special_multigammaln::call(self, p);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, p);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor special_softmax_generated_plumbing(const at::Tensor & self, int64_t dim, c10::optional<at::ScalarType> dtype) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::special_softmax::call(self, dim, dtype);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, dtype);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor fft_fft_generated_plumbing(const at::Tensor & self, c10::optional<c10::SymInt> n, int64_t dim, c10::optional<c10::string_view> norm) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::fft_fft::call(self, n, dim, norm);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, n, dim, norm);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor fft_ifft_generated_plumbing(const at::Tensor & self, c10::optional<c10::SymInt> n, int64_t dim, c10::optional<c10::string_view> norm) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::fft_ifft::call(self, n, dim, norm);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, n, dim, norm);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor fft_rfft_generated_plumbing(const at::Tensor & self, c10::optional<c10::SymInt> n, int64_t dim, c10::optional<c10::string_view> norm) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::fft_rfft::call(self, n, dim, norm);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, n, dim, norm);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor fft_irfft_generated_plumbing(const at::Tensor & self, c10::optional<c10::SymInt> n, int64_t dim, c10::optional<c10::string_view> norm) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::fft_irfft::call(self, n, dim, norm);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, n, dim, norm);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor fft_hfft_generated_plumbing(const at::Tensor & self, c10::optional<c10::SymInt> n, int64_t dim, c10::optional<c10::string_view> norm) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::fft_hfft::call(self, n, dim, norm);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, n, dim, norm);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor fft_ihfft_generated_plumbing(const at::Tensor & self, c10::optional<c10::SymInt> n, int64_t dim, c10::optional<c10::string_view> norm) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::fft_ihfft::call(self, n, dim, norm);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, n, dim, norm);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor fft_fft2_generated_plumbing(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::fft_fft2::call(self, s, dim, norm);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, s, dim, norm);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor fft_ifft2_generated_plumbing(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::fft_ifft2::call(self, s, dim, norm);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, s, dim, norm);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor fft_rfft2_generated_plumbing(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::fft_rfft2::call(self, s, dim, norm);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, s, dim, norm);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor fft_irfft2_generated_plumbing(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::fft_irfft2::call(self, s, dim, norm);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, s, dim, norm);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor fft_hfft2_generated_plumbing(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::fft_hfft2::call(self, s, dim, norm);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, s, dim, norm);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor fft_ihfft2_generated_plumbing(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::IntArrayRef dim, c10::optional<c10::string_view> norm) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::fft_ihfft2::call(self, s, dim, norm);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, s, dim, norm);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor fft_fftn_generated_plumbing(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::fft_fftn::call(self, s, dim, norm);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, s, dim, norm);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor fft_ifftn_generated_plumbing(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::fft_ifftn::call(self, s, dim, norm);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, s, dim, norm);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor fft_rfftn_generated_plumbing(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::fft_rfftn::call(self, s, dim, norm);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, s, dim, norm);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor fft_irfftn_generated_plumbing(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::fft_irfftn::call(self, s, dim, norm);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, s, dim, norm);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor fft_hfftn_generated_plumbing(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::fft_hfftn::call(self, s, dim, norm);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, s, dim, norm);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor fft_ihfftn_generated_plumbing(const at::Tensor & self, at::OptionalSymIntArrayRef s, at::OptionalIntArrayRef dim, c10::optional<c10::string_view> norm) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::fft_ihfftn::call(self, s, dim, norm);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, s, dim, norm);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor fft_fftshift_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::fft_fftshift::call(self, dim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor fft_ifftshift_generated_plumbing(const at::Tensor & self, at::OptionalIntArrayRef dim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::fft_ifftshift::call(self, dim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> linalg_cholesky_ex_generated_plumbing(const at::Tensor & self, bool upper, bool check_errors) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::linalg_cholesky_ex::call(self, upper, check_errors);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, upper, check_errors);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor linalg_cholesky_generated_plumbing(const at::Tensor & self, bool upper) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::linalg_cholesky::call(self, upper);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, upper);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor linalg_cross_generated_plumbing(const at::Tensor & self, const at::Tensor & other, int64_t dim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::linalg_cross::call(self, other, dim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, dim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> linalg_lu_factor_generated_plumbing(const at::Tensor & A, bool pivot) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(A, cur_level)) {
|
|
return at::_ops::linalg_lu_factor::call(A, pivot);
|
|
}
|
|
Tensor A_value;
|
|
optional<int64_t> A_bdim;
|
|
std::tie(A_value, A_bdim) = unwrapTensorAtLevel(A, cur_level);
|
|
auto results = batch_rule(A_value, A_bdim, pivot);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor,at::Tensor> linalg_lu_factor_ex_generated_plumbing(const at::Tensor & A, bool pivot, bool check_errors) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(A, cur_level)) {
|
|
return at::_ops::linalg_lu_factor_ex::call(A, pivot, check_errors);
|
|
}
|
|
Tensor A_value;
|
|
optional<int64_t> A_bdim;
|
|
std::tie(A_value, A_bdim) = unwrapTensorAtLevel(A, cur_level);
|
|
auto results = batch_rule(A_value, A_bdim, pivot, check_errors);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor,at::Tensor> linalg_lu_generated_plumbing(const at::Tensor & A, bool pivot) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(A, cur_level)) {
|
|
return at::_ops::linalg_lu::call(A, pivot);
|
|
}
|
|
Tensor A_value;
|
|
optional<int64_t> A_bdim;
|
|
std::tie(A_value, A_bdim) = unwrapTensorAtLevel(A, cur_level);
|
|
auto results = batch_rule(A_value, A_bdim, pivot);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor linalg_lu_solve_generated_plumbing(const at::Tensor & LU, const at::Tensor & pivots, const at::Tensor & B, bool left, bool adjoint) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(LU, cur_level) && !isBatchedAtLevel(pivots, cur_level) && !isBatchedAtLevel(B, cur_level)) {
|
|
return at::_ops::linalg_lu_solve::call(LU, pivots, B, left, adjoint);
|
|
}
|
|
Tensor LU_value;
|
|
optional<int64_t> LU_bdim;
|
|
std::tie(LU_value, LU_bdim) = unwrapTensorAtLevel(LU, cur_level);
|
|
Tensor pivots_value;
|
|
optional<int64_t> pivots_bdim;
|
|
std::tie(pivots_value, pivots_bdim) = unwrapTensorAtLevel(pivots, cur_level);
|
|
Tensor B_value;
|
|
optional<int64_t> B_bdim;
|
|
std::tie(B_value, B_bdim) = unwrapTensorAtLevel(B, cur_level);
|
|
auto results = batch_rule(LU_value, LU_bdim, pivots_value, pivots_bdim, B_value, B_bdim, left, adjoint);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor,at::Tensor> _linalg_det_generated_plumbing(const at::Tensor & A) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(A, cur_level)) {
|
|
return at::_ops::_linalg_det::call(A);
|
|
}
|
|
Tensor A_value;
|
|
optional<int64_t> A_bdim;
|
|
std::tie(A_value, A_bdim) = unwrapTensorAtLevel(A, cur_level);
|
|
auto results = batch_rule(A_value, A_bdim);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor linalg_det_generated_plumbing(const at::Tensor & A) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(A, cur_level)) {
|
|
return at::_ops::linalg_det::call(A);
|
|
}
|
|
Tensor A_value;
|
|
optional<int64_t> A_bdim;
|
|
std::tie(A_value, A_bdim) = unwrapTensorAtLevel(A, cur_level);
|
|
auto results = batch_rule(A_value, A_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor det_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::det::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor,at::Tensor> linalg_ldl_factor_ex_generated_plumbing(const at::Tensor & self, bool hermitian, bool check_errors) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::linalg_ldl_factor_ex::call(self, hermitian, check_errors);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, hermitian, check_errors);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> linalg_ldl_factor_generated_plumbing(const at::Tensor & self, bool hermitian) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::linalg_ldl_factor::call(self, hermitian);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, hermitian);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor linalg_ldl_solve_generated_plumbing(const at::Tensor & LD, const at::Tensor & pivots, const at::Tensor & B, bool hermitian) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(LD, cur_level) && !isBatchedAtLevel(pivots, cur_level) && !isBatchedAtLevel(B, cur_level)) {
|
|
return at::_ops::linalg_ldl_solve::call(LD, pivots, B, hermitian);
|
|
}
|
|
Tensor LD_value;
|
|
optional<int64_t> LD_bdim;
|
|
std::tie(LD_value, LD_bdim) = unwrapTensorAtLevel(LD, cur_level);
|
|
Tensor pivots_value;
|
|
optional<int64_t> pivots_bdim;
|
|
std::tie(pivots_value, pivots_bdim) = unwrapTensorAtLevel(pivots, cur_level);
|
|
Tensor B_value;
|
|
optional<int64_t> B_bdim;
|
|
std::tie(B_value, B_bdim) = unwrapTensorAtLevel(B, cur_level);
|
|
auto results = batch_rule(LD_value, LD_bdim, pivots_value, pivots_bdim, B_value, B_bdim, hermitian);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> linalg_lstsq_generated_plumbing(const at::Tensor & self, const at::Tensor & b, c10::optional<double> rcond, c10::optional<c10::string_view> driver) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(b, cur_level)) {
|
|
return at::_ops::linalg_lstsq::call(self, b, rcond, driver);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor b_value;
|
|
optional<int64_t> b_bdim;
|
|
std::tie(b_value, b_bdim) = unwrapTensorAtLevel(b, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, b_value, b_bdim, rcond, driver);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor linalg_matmul_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::linalg_matmul::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor linalg_vecdot_generated_plumbing(const at::Tensor & x, const at::Tensor & y, int64_t dim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(y, cur_level)) {
|
|
return at::_ops::linalg_vecdot::call(x, y, dim);
|
|
}
|
|
Tensor x_value;
|
|
optional<int64_t> x_bdim;
|
|
std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
|
|
Tensor y_value;
|
|
optional<int64_t> y_bdim;
|
|
std::tie(y_value, y_bdim) = unwrapTensorAtLevel(y, cur_level);
|
|
auto results = batch_rule(x_value, x_bdim, y_value, y_bdim, dim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor linalg_matrix_exp_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::linalg_matrix_exp::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> _linalg_slogdet_generated_plumbing(const at::Tensor & A) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(A, cur_level)) {
|
|
return at::_ops::_linalg_slogdet::call(A);
|
|
}
|
|
Tensor A_value;
|
|
optional<int64_t> A_bdim;
|
|
std::tie(A_value, A_bdim) = unwrapTensorAtLevel(A, cur_level);
|
|
auto results = batch_rule(A_value, A_bdim);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> linalg_slogdet_generated_plumbing(const at::Tensor & A) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(A, cur_level)) {
|
|
return at::_ops::linalg_slogdet::call(A);
|
|
}
|
|
Tensor A_value;
|
|
optional<int64_t> A_bdim;
|
|
std::tie(A_value, A_bdim) = unwrapTensorAtLevel(A, cur_level);
|
|
auto results = batch_rule(A_value, A_bdim);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> slogdet_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::slogdet::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor logdet_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::logdet::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> linalg_eig_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::linalg_eig::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _linalg_eigvals_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_linalg_eigvals::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor linalg_eigvals_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::linalg_eigvals::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> _linalg_eigh_generated_plumbing(const at::Tensor & A, c10::string_view UPLO, bool compute_v) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(A, cur_level)) {
|
|
return at::_ops::_linalg_eigh::call(A, UPLO, compute_v);
|
|
}
|
|
Tensor A_value;
|
|
optional<int64_t> A_bdim;
|
|
std::tie(A_value, A_bdim) = unwrapTensorAtLevel(A, cur_level);
|
|
auto results = batch_rule(A_value, A_bdim, UPLO, compute_v);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> linalg_eigh_generated_plumbing(const at::Tensor & self, c10::string_view UPLO) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::linalg_eigh::call(self, UPLO);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, UPLO);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor linalg_eigvalsh_generated_plumbing(const at::Tensor & self, c10::string_view UPLO) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::linalg_eigvalsh::call(self, UPLO);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, UPLO);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor linalg_householder_product_generated_plumbing(const at::Tensor & input, const at::Tensor & tau) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(tau, cur_level)) {
|
|
return at::_ops::linalg_householder_product::call(input, tau);
|
|
}
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
Tensor tau_value;
|
|
optional<int64_t> tau_bdim;
|
|
std::tie(tau_value, tau_bdim) = unwrapTensorAtLevel(tau, cur_level);
|
|
auto results = batch_rule(input_value, input_bdim, tau_value, tau_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> linalg_inv_ex_generated_plumbing(const at::Tensor & A, bool check_errors) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(A, cur_level)) {
|
|
return at::_ops::linalg_inv_ex::call(A, check_errors);
|
|
}
|
|
Tensor A_value;
|
|
optional<int64_t> A_bdim;
|
|
std::tie(A_value, A_bdim) = unwrapTensorAtLevel(A, cur_level);
|
|
auto results = batch_rule(A_value, A_bdim, check_errors);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor linalg_inv_generated_plumbing(const at::Tensor & A) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(A, cur_level)) {
|
|
return at::_ops::linalg_inv::call(A);
|
|
}
|
|
Tensor A_value;
|
|
optional<int64_t> A_bdim;
|
|
std::tie(A_value, A_bdim) = unwrapTensorAtLevel(A, cur_level);
|
|
auto results = batch_rule(A_value, A_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor inverse_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::inverse::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor inner_generated_plumbing(const at::Tensor & self, const at::Tensor & other) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::inner::call(self, other);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other_value, other_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor outer_generated_plumbing(const at::Tensor & self, const at::Tensor & vec2) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(vec2, cur_level)) {
|
|
return at::_ops::outer::call(self, vec2);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor vec2_value;
|
|
optional<int64_t> vec2_bdim;
|
|
std::tie(vec2_value, vec2_bdim) = unwrapTensorAtLevel(vec2, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, vec2_value, vec2_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor ger_generated_plumbing(const at::Tensor & self, const at::Tensor & vec2) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(vec2, cur_level)) {
|
|
return at::_ops::ger::call(self, vec2);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor vec2_value;
|
|
optional<int64_t> vec2_bdim;
|
|
std::tie(vec2_value, vec2_bdim) = unwrapTensorAtLevel(vec2, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, vec2_value, vec2_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor linalg_norm_generated_plumbing(const at::Tensor & self, const c10::optional<at::Scalar> & ord, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::linalg_norm::call(self, ord, dim, keepdim, dtype);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, ord, dim, keepdim, dtype);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor linalg_norm_ord_str_generated_plumbing(const at::Tensor & self, c10::string_view ord, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::linalg_norm_ord_str::call(self, ord, dim, keepdim, dtype);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, ord, dim, keepdim, dtype);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor linalg_vector_norm_generated_plumbing(const at::Tensor & self, const at::Scalar & ord, at::OptionalIntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::linalg_vector_norm::call(self, ord, dim, keepdim, dtype);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, ord, dim, keepdim, dtype);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor linalg_matrix_norm_generated_plumbing(const at::Tensor & self, const at::Scalar & ord, at::IntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::linalg_matrix_norm::call(self, ord, dim, keepdim, dtype);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, ord, dim, keepdim, dtype);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor linalg_matrix_norm_str_ord_generated_plumbing(const at::Tensor & self, c10::string_view ord, at::IntArrayRef dim, bool keepdim, c10::optional<at::ScalarType> dtype) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::linalg_matrix_norm_str_ord::call(self, ord, dim, keepdim, dtype);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, ord, dim, keepdim, dtype);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor,at::Tensor> _linalg_svd_generated_plumbing(const at::Tensor & A, bool full_matrices, bool compute_uv, c10::optional<c10::string_view> driver) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(A, cur_level)) {
|
|
return at::_ops::_linalg_svd::call(A, full_matrices, compute_uv, driver);
|
|
}
|
|
Tensor A_value;
|
|
optional<int64_t> A_bdim;
|
|
std::tie(A_value, A_bdim) = unwrapTensorAtLevel(A, cur_level);
|
|
auto results = batch_rule(A_value, A_bdim, full_matrices, compute_uv, driver);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor,at::Tensor> linalg_svd_generated_plumbing(const at::Tensor & A, bool full_matrices, c10::optional<c10::string_view> driver) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(A, cur_level)) {
|
|
return at::_ops::linalg_svd::call(A, full_matrices, driver);
|
|
}
|
|
Tensor A_value;
|
|
optional<int64_t> A_bdim;
|
|
std::tie(A_value, A_bdim) = unwrapTensorAtLevel(A, cur_level);
|
|
auto results = batch_rule(A_value, A_bdim, full_matrices, driver);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor linalg_svdvals_generated_plumbing(const at::Tensor & A, c10::optional<c10::string_view> driver) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(A, cur_level)) {
|
|
return at::_ops::linalg_svdvals::call(A, driver);
|
|
}
|
|
Tensor A_value;
|
|
optional<int64_t> A_bdim;
|
|
std::tie(A_value, A_bdim) = unwrapTensorAtLevel(A, cur_level);
|
|
auto results = batch_rule(A_value, A_bdim, driver);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor linalg_cond_generated_plumbing(const at::Tensor & self, const c10::optional<at::Scalar> & p) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::linalg_cond::call(self, p);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, p);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor linalg_cond_p_str_generated_plumbing(const at::Tensor & self, c10::string_view p) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::linalg_cond_p_str::call(self, p);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, p);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor linalg_pinv_atol_rtol_tensor_generated_plumbing(const at::Tensor & self, const c10::optional<at::Tensor> & atol, const c10::optional<at::Tensor> & rtol, bool hermitian) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(atol, cur_level) && !isBatchedAtLevel(rtol, cur_level)) {
|
|
return at::_ops::linalg_pinv_atol_rtol_tensor::call(self, atol, rtol, hermitian);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
optional<Tensor> atol_value;
|
|
optional<int64_t> atol_bdim;
|
|
if (atol) {
|
|
std::tie(atol_value, atol_bdim) = unwrapTensorAtLevel(atol.value(), cur_level);
|
|
}
|
|
optional<Tensor> rtol_value;
|
|
optional<int64_t> rtol_bdim;
|
|
if (rtol) {
|
|
std::tie(rtol_value, rtol_bdim) = unwrapTensorAtLevel(rtol.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(self_value, self_bdim, atol_value, atol_bdim, rtol_value, rtol_bdim, hermitian);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor linalg_pinv_atol_rtol_float_generated_plumbing(const at::Tensor & self, c10::optional<double> atol, c10::optional<double> rtol, bool hermitian) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::linalg_pinv_atol_rtol_float::call(self, atol, rtol, hermitian);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, atol, rtol, hermitian);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor linalg_pinv_generated_plumbing(const at::Tensor & self, double rcond, bool hermitian) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::linalg_pinv::call(self, rcond, hermitian);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, rcond, hermitian);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor linalg_pinv_rcond_tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & rcond, bool hermitian) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(rcond, cur_level)) {
|
|
return at::_ops::linalg_pinv_rcond_tensor::call(self, rcond, hermitian);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor rcond_value;
|
|
optional<int64_t> rcond_bdim;
|
|
std::tie(rcond_value, rcond_bdim) = unwrapTensorAtLevel(rcond, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, rcond_value, rcond_bdim, hermitian);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> _linalg_solve_ex_generated_plumbing(const at::Tensor & A, const at::Tensor & B, bool left, bool check_errors) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(A, cur_level) && !isBatchedAtLevel(B, cur_level)) {
|
|
return at::_ops::_linalg_solve_ex::call(A, B, left, check_errors);
|
|
}
|
|
Tensor A_value;
|
|
optional<int64_t> A_bdim;
|
|
std::tie(A_value, A_bdim) = unwrapTensorAtLevel(A, cur_level);
|
|
Tensor B_value;
|
|
optional<int64_t> B_bdim;
|
|
std::tie(B_value, B_bdim) = unwrapTensorAtLevel(B, cur_level);
|
|
auto results = batch_rule(A_value, A_bdim, B_value, B_bdim, left, check_errors);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> linalg_solve_ex_generated_plumbing(const at::Tensor & A, const at::Tensor & B, bool left, bool check_errors) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(A, cur_level) && !isBatchedAtLevel(B, cur_level)) {
|
|
return at::_ops::linalg_solve_ex::call(A, B, left, check_errors);
|
|
}
|
|
Tensor A_value;
|
|
optional<int64_t> A_bdim;
|
|
std::tie(A_value, A_bdim) = unwrapTensorAtLevel(A, cur_level);
|
|
Tensor B_value;
|
|
optional<int64_t> B_bdim;
|
|
std::tie(B_value, B_bdim) = unwrapTensorAtLevel(B, cur_level);
|
|
auto results = batch_rule(A_value, A_bdim, B_value, B_bdim, left, check_errors);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor linalg_solve_generated_plumbing(const at::Tensor & A, const at::Tensor & B, bool left) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(A, cur_level) && !isBatchedAtLevel(B, cur_level)) {
|
|
return at::_ops::linalg_solve::call(A, B, left);
|
|
}
|
|
Tensor A_value;
|
|
optional<int64_t> A_bdim;
|
|
std::tie(A_value, A_bdim) = unwrapTensorAtLevel(A, cur_level);
|
|
Tensor B_value;
|
|
optional<int64_t> B_bdim;
|
|
std::tie(B_value, B_bdim) = unwrapTensorAtLevel(B, cur_level);
|
|
auto results = batch_rule(A_value, A_bdim, B_value, B_bdim, left);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor linalg_tensorinv_generated_plumbing(const at::Tensor & self, int64_t ind) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::linalg_tensorinv::call(self, ind);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, ind);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor linalg_tensorsolve_generated_plumbing(const at::Tensor & self, const at::Tensor & other, at::OptionalIntArrayRef dims) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::linalg_tensorsolve::call(self, other, dims);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, dims);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> linalg_qr_generated_plumbing(const at::Tensor & A, c10::string_view mode) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(A, cur_level)) {
|
|
return at::_ops::linalg_qr::call(A, mode);
|
|
}
|
|
Tensor A_value;
|
|
optional<int64_t> A_bdim;
|
|
std::tie(A_value, A_bdim) = unwrapTensorAtLevel(A, cur_level);
|
|
auto results = batch_rule(A_value, A_bdim, mode);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor linalg_matrix_power_generated_plumbing(const at::Tensor & self, int64_t n) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::linalg_matrix_power::call(self, n);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, n);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor linalg_matrix_rank_atol_rtol_tensor_generated_plumbing(const at::Tensor & input, const c10::optional<at::Tensor> & atol, const c10::optional<at::Tensor> & rtol, bool hermitian) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(atol, cur_level) && !isBatchedAtLevel(rtol, cur_level)) {
|
|
return at::_ops::linalg_matrix_rank_atol_rtol_tensor::call(input, atol, rtol, hermitian);
|
|
}
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
optional<Tensor> atol_value;
|
|
optional<int64_t> atol_bdim;
|
|
if (atol) {
|
|
std::tie(atol_value, atol_bdim) = unwrapTensorAtLevel(atol.value(), cur_level);
|
|
}
|
|
optional<Tensor> rtol_value;
|
|
optional<int64_t> rtol_bdim;
|
|
if (rtol) {
|
|
std::tie(rtol_value, rtol_bdim) = unwrapTensorAtLevel(rtol.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(input_value, input_bdim, atol_value, atol_bdim, rtol_value, rtol_bdim, hermitian);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor linalg_matrix_rank_atol_rtol_float_generated_plumbing(const at::Tensor & self, c10::optional<double> atol, c10::optional<double> rtol, bool hermitian) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::linalg_matrix_rank_atol_rtol_float::call(self, atol, rtol, hermitian);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, atol, rtol, hermitian);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor linalg_matrix_rank_generated_plumbing(const at::Tensor & self, double tol, bool hermitian) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::linalg_matrix_rank::call(self, tol, hermitian);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, tol, hermitian);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor linalg_matrix_rank_tol_tensor_generated_plumbing(const at::Tensor & input, const at::Tensor & tol, bool hermitian) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(tol, cur_level)) {
|
|
return at::_ops::linalg_matrix_rank_tol_tensor::call(input, tol, hermitian);
|
|
}
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
Tensor tol_value;
|
|
optional<int64_t> tol_bdim;
|
|
std::tie(tol_value, tol_bdim) = unwrapTensorAtLevel(tol, cur_level);
|
|
auto results = batch_rule(input_value, input_bdim, tol_value, tol_bdim, hermitian);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor linalg_multi_dot_generated_plumbing(at::TensorList tensors) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(tensors, cur_level)) {
|
|
return at::_ops::linalg_multi_dot::call(tensors);
|
|
}
|
|
|
|
auto results = batch_rule(tensors);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor nested_to_padded_tensor_generated_plumbing(const at::Tensor & self, double padding, at::OptionalIntArrayRef output_size) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::nested_to_padded_tensor::call(self, padding, output_size);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, padding, output_size);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _test_serialization_subcmul_generated_plumbing(const at::Tensor & self, const at::Tensor & other, const at::Scalar & alpha) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level)) {
|
|
return at::_ops::_test_serialization_subcmul::call(self, other, alpha);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, other_value, other_bdim, alpha);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _test_parallel_materialize_generated_plumbing(const at::Tensor & self, int64_t num_parallel, bool skip_first) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_test_parallel_materialize::call(self, num_parallel, skip_first);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, num_parallel, skip_first);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _test_optional_intlist_generated_plumbing(const at::Tensor & values, at::OptionalIntArrayRef addends) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(values, cur_level)) {
|
|
return at::_ops::_test_optional_intlist::call(values, addends);
|
|
}
|
|
Tensor values_value;
|
|
optional<int64_t> values_bdim;
|
|
std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
|
|
auto results = batch_rule(values_value, values_bdim, addends);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _test_optional_filled_intlist_generated_plumbing(const at::Tensor & values, at::OptionalIntArrayRef addends) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(values, cur_level)) {
|
|
return at::_ops::_test_optional_filled_intlist::call(values, addends);
|
|
}
|
|
Tensor values_value;
|
|
optional<int64_t> values_bdim;
|
|
std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
|
|
auto results = batch_rule(values_value, values_bdim, addends);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _test_optional_floatlist_generated_plumbing(const at::Tensor & values, c10::optional<at::ArrayRef<double>> addends) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(values, cur_level)) {
|
|
return at::_ops::_test_optional_floatlist::call(values, addends);
|
|
}
|
|
Tensor values_value;
|
|
optional<int64_t> values_bdim;
|
|
std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
|
|
auto results = batch_rule(values_value, values_bdim, addends);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _test_string_default_generated_plumbing(const at::Tensor & dummy, c10::string_view a, c10::string_view b) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(dummy, cur_level)) {
|
|
return at::_ops::_test_string_default::call(dummy, a, b);
|
|
}
|
|
Tensor dummy_value;
|
|
optional<int64_t> dummy_bdim;
|
|
std::tie(dummy_value, dummy_bdim) = unwrapTensorAtLevel(dummy, cur_level);
|
|
auto results = batch_rule(dummy_value, dummy_bdim, a, b);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _test_ambiguous_defaults_a_generated_plumbing(const at::Tensor & dummy, int64_t a, int64_t b) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(dummy, cur_level)) {
|
|
return at::_ops::_test_ambiguous_defaults_a::call(dummy, a, b);
|
|
}
|
|
Tensor dummy_value;
|
|
optional<int64_t> dummy_bdim;
|
|
std::tie(dummy_value, dummy_bdim) = unwrapTensorAtLevel(dummy, cur_level);
|
|
auto results = batch_rule(dummy_value, dummy_bdim, a, b);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _test_ambiguous_defaults_b_generated_plumbing(const at::Tensor & dummy, int64_t a, c10::string_view b) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(dummy, cur_level)) {
|
|
return at::_ops::_test_ambiguous_defaults_b::call(dummy, a, b);
|
|
}
|
|
Tensor dummy_value;
|
|
optional<int64_t> dummy_bdim;
|
|
std::tie(dummy_value, dummy_bdim) = unwrapTensorAtLevel(dummy, cur_level);
|
|
auto results = batch_rule(dummy_value, dummy_bdim, a, b);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _test_warn_in_autograd_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_test_warn_in_autograd::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _test_autograd_multiple_dispatch_fullcoverage_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_test_autograd_multiple_dispatch_fullcoverage::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _test_autograd_multiple_dispatch_ntonly_generated_plumbing(const at::Tensor & self, bool b) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_test_autograd_multiple_dispatch_ntonly::call(self, b);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, b);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _test_autograd_multiple_dispatch_view_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_test_autograd_multiple_dispatch_view::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _test_autograd_multiple_dispatch_view_copy_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_test_autograd_multiple_dispatch_view_copy::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor segment_reduce_generated_plumbing(const at::Tensor & data, c10::string_view reduce, const c10::optional<at::Tensor> & lengths, const c10::optional<at::Tensor> & indices, const c10::optional<at::Tensor> & offsets, int64_t axis, bool unsafe, const c10::optional<at::Scalar> & initial) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(data, cur_level) && !isBatchedAtLevel(lengths, cur_level) && !isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(offsets, cur_level)) {
|
|
return at::_ops::segment_reduce::call(data, reduce, lengths, indices, offsets, axis, unsafe, initial);
|
|
}
|
|
Tensor data_value;
|
|
optional<int64_t> data_bdim;
|
|
std::tie(data_value, data_bdim) = unwrapTensorAtLevel(data, cur_level);
|
|
optional<Tensor> lengths_value;
|
|
optional<int64_t> lengths_bdim;
|
|
if (lengths) {
|
|
std::tie(lengths_value, lengths_bdim) = unwrapTensorAtLevel(lengths.value(), cur_level);
|
|
}
|
|
optional<Tensor> indices_value;
|
|
optional<int64_t> indices_bdim;
|
|
if (indices) {
|
|
std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices.value(), cur_level);
|
|
}
|
|
optional<Tensor> offsets_value;
|
|
optional<int64_t> offsets_bdim;
|
|
if (offsets) {
|
|
std::tie(offsets_value, offsets_bdim) = unwrapTensorAtLevel(offsets.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(data_value, data_bdim, reduce, lengths_value, lengths_bdim, indices_value, indices_bdim, offsets_value, offsets_bdim, axis, unsafe, initial);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _segment_reduce_backward_generated_plumbing(const at::Tensor & grad, const at::Tensor & output, const at::Tensor & data, c10::string_view reduce, const c10::optional<at::Tensor> & lengths, const c10::optional<at::Tensor> & offsets, int64_t axis, const c10::optional<at::Scalar> & initial) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad, cur_level) && !isBatchedAtLevel(output, cur_level) && !isBatchedAtLevel(data, cur_level) && !isBatchedAtLevel(lengths, cur_level) && !isBatchedAtLevel(offsets, cur_level)) {
|
|
return at::_ops::_segment_reduce_backward::call(grad, output, data, reduce, lengths, offsets, axis, initial);
|
|
}
|
|
Tensor grad_value;
|
|
optional<int64_t> grad_bdim;
|
|
std::tie(grad_value, grad_bdim) = unwrapTensorAtLevel(grad, cur_level);
|
|
Tensor output_value;
|
|
optional<int64_t> output_bdim;
|
|
std::tie(output_value, output_bdim) = unwrapTensorAtLevel(output, cur_level);
|
|
Tensor data_value;
|
|
optional<int64_t> data_bdim;
|
|
std::tie(data_value, data_bdim) = unwrapTensorAtLevel(data, cur_level);
|
|
optional<Tensor> lengths_value;
|
|
optional<int64_t> lengths_bdim;
|
|
if (lengths) {
|
|
std::tie(lengths_value, lengths_bdim) = unwrapTensorAtLevel(lengths.value(), cur_level);
|
|
}
|
|
optional<Tensor> offsets_value;
|
|
optional<int64_t> offsets_bdim;
|
|
if (offsets) {
|
|
std::tie(offsets_value, offsets_bdim) = unwrapTensorAtLevel(offsets.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(grad_value, grad_bdim, output_value, output_bdim, data_value, data_bdim, reduce, lengths_value, lengths_bdim, offsets_value, offsets_bdim, axis, initial);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor pad_sequence_generated_plumbing(at::TensorList sequences, bool batch_first, double padding_value) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(sequences, cur_level)) {
|
|
return at::_ops::pad_sequence::call(sequences, batch_first, padding_value);
|
|
}
|
|
|
|
auto results = batch_rule(sequences, batch_first, padding_value);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor flatten_dense_tensors_generated_plumbing(at::TensorList tensors) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(tensors, cur_level)) {
|
|
return at::_ops::flatten_dense_tensors::call(tensors);
|
|
}
|
|
|
|
auto results = batch_rule(tensors);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> unflatten_dense_tensors_generated_plumbing(const at::Tensor & flat, at::TensorList tensors) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(flat, cur_level) && !isBatchedAtLevel(tensors, cur_level)) {
|
|
return at::_ops::unflatten_dense_tensors::call(flat, tensors);
|
|
}
|
|
Tensor flat_value;
|
|
optional<int64_t> flat_bdim;
|
|
std::tie(flat_value, flat_bdim) = unwrapTensorAtLevel(flat, cur_level);
|
|
auto results = batch_rule(flat_value, flat_bdim, tensors);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _nested_tensor_from_tensor_list_generated_plumbing(at::TensorList list, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(list, cur_level)) {
|
|
return at::_ops::_nested_tensor_from_tensor_list::call(list, dtype, layout, device, pin_memory);
|
|
}
|
|
|
|
auto results = batch_rule(list, dtype, layout, device, pin_memory);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _fw_primal_copy_generated_plumbing(const at::Tensor & self, int64_t level) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_fw_primal_copy::call(self, level);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, level);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _make_dual_copy_generated_plumbing(const at::Tensor & primal, const at::Tensor & tangent, int64_t level) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(primal, cur_level) && !isBatchedAtLevel(tangent, cur_level)) {
|
|
return at::_ops::_make_dual_copy::call(primal, tangent, level);
|
|
}
|
|
Tensor primal_value;
|
|
optional<int64_t> primal_bdim;
|
|
std::tie(primal_value, primal_bdim) = unwrapTensorAtLevel(primal, cur_level);
|
|
Tensor tangent_value;
|
|
optional<int64_t> tangent_bdim;
|
|
std::tie(tangent_value, tangent_bdim) = unwrapTensorAtLevel(tangent, cur_level);
|
|
auto results = batch_rule(primal_value, primal_bdim, tangent_value, tangent_bdim, level);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor view_as_real_copy_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::view_as_real_copy::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor view_as_complex_copy_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::view_as_complex_copy::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _conj_copy_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_conj_copy::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _neg_view_copy_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_neg_view_copy::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor as_strided_copy_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride, c10::optional<c10::SymInt> storage_offset) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::as_strided_copy::call(self, size, stride, storage_offset);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, size, stride, storage_offset);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _sparse_broadcast_to_copy_generated_plumbing(const at::Tensor & self, at::IntArrayRef size) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_sparse_broadcast_to_copy::call(self, size);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, size);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor diagonal_copy_generated_plumbing(const at::Tensor & self, int64_t offset, int64_t dim1, int64_t dim2) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::diagonal_copy::call(self, offset, dim1, dim2);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, offset, dim1, dim2);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor expand_copy_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size, bool implicit) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::expand_copy::call(self, size, implicit);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, size, implicit);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor permute_copy_generated_plumbing(const at::Tensor & self, at::IntArrayRef dims) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::permute_copy::call(self, dims);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dims);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _reshape_alias_copy_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_reshape_alias_copy::call(self, size, stride);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, size, stride);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor select_copy_int_generated_plumbing(const at::Tensor & self, int64_t dim, c10::SymInt index) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::select_copy_int::call(self, dim, index);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, index);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor detach_copy_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::detach_copy::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor slice_copy_Tensor_generated_plumbing(const at::Tensor & self, int64_t dim, c10::optional<c10::SymInt> start, c10::optional<c10::SymInt> end, c10::SymInt step) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::slice_copy_Tensor::call(self, dim, start, end, step);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim, start, end, step);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> split_copy_Tensor_generated_plumbing(const at::Tensor & self, c10::SymInt split_size, int64_t dim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::split_copy_Tensor::call(self, split_size, dim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, split_size, dim);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> split_with_sizes_copy_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::split_with_sizes_copy::call(self, split_sizes, dim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, split_sizes, dim);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor squeeze_copy_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::squeeze_copy::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor squeeze_copy_dim_generated_plumbing(const at::Tensor & self, int64_t dim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::squeeze_copy_dim::call(self, dim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor squeeze_copy_dims_generated_plumbing(const at::Tensor & self, at::IntArrayRef dim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::squeeze_copy_dims::call(self, dim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor t_copy_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::t_copy::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor transpose_copy_int_generated_plumbing(const at::Tensor & self, int64_t dim0, int64_t dim1) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::transpose_copy_int::call(self, dim0, dim1);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim0, dim1);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor unsqueeze_copy_generated_plumbing(const at::Tensor & self, int64_t dim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::unsqueeze_copy::call(self, dim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _indices_copy_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_indices_copy::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _values_copy_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_values_copy::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor indices_copy_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::indices_copy::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor values_copy_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::values_copy::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor crow_indices_copy_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::crow_indices_copy::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor col_indices_copy_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::col_indices_copy::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor ccol_indices_copy_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::ccol_indices_copy::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor row_indices_copy_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::row_indices_copy::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> unbind_copy_int_generated_plumbing(const at::Tensor & self, int64_t dim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::unbind_copy_int::call(self, dim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dim);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void unbind_copy_int_out_generated_plumbing(const at::Tensor & self, int64_t dim, at::TensorList out) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
|
|
return at::_ops::unbind_copy_int_out::call(self, dim, out);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim, dim, out);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void split_copy_Tensor_out_generated_plumbing(const at::Tensor & self, c10::SymInt split_size, int64_t dim, at::TensorList out) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
|
|
return at::_ops::split_copy_Tensor_out::call(self, split_size, dim, out);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim, split_size, dim, out);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void split_with_sizes_copy_out_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim, at::TensorList out) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
|
|
return at::_ops::split_with_sizes_copy_out::call(self, split_sizes, dim, out);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim, split_sizes, dim, out);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor view_copy_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::view_copy::call(self, size);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, size);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor view_copy_dtype_generated_plumbing(const at::Tensor & self, at::ScalarType dtype) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::view_copy_dtype::call(self, dtype);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dtype);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor unfold_copy_generated_plumbing(const at::Tensor & self, int64_t dimension, int64_t size, int64_t step) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::unfold_copy::call(self, dimension, size, step);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, dimension, size, step);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor alias_copy_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::alias_copy::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor to_padded_tensor_generated_plumbing(const at::Tensor & self, double padding, at::OptionalSymIntArrayRef output_size) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::to_padded_tensor::call(self, padding, output_size);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, padding, output_size);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _nested_tensor_softmax_with_shape_generated_plumbing(const at::Tensor & self, const at::Tensor & query) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(query, cur_level)) {
|
|
return at::_ops::_nested_tensor_softmax_with_shape::call(self, query);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor query_value;
|
|
optional<int64_t> query_bdim;
|
|
std::tie(query_value, query_bdim) = unwrapTensorAtLevel(query, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, query_value, query_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _transformer_encoder_layer_fwd_generated_plumbing(const at::Tensor & src, int64_t embed_dim, int64_t num_heads, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, bool use_gelu, bool norm_first, double eps, const at::Tensor & norm_weight_1, const at::Tensor & norm_bias_1, const at::Tensor & norm_weight_2, const at::Tensor & norm_bias_2, const at::Tensor & ffn_weight_1, const at::Tensor & ffn_bias_1, const at::Tensor & ffn_weight_2, const at::Tensor & ffn_bias_2, const c10::optional<at::Tensor> & mask, c10::optional<int64_t> mask_type) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(src, cur_level) && !isBatchedAtLevel(qkv_weight, cur_level) && !isBatchedAtLevel(qkv_bias, cur_level) && !isBatchedAtLevel(proj_weight, cur_level) && !isBatchedAtLevel(proj_bias, cur_level) && !isBatchedAtLevel(norm_weight_1, cur_level) && !isBatchedAtLevel(norm_bias_1, cur_level) && !isBatchedAtLevel(norm_weight_2, cur_level) && !isBatchedAtLevel(norm_bias_2, cur_level) && !isBatchedAtLevel(ffn_weight_1, cur_level) && !isBatchedAtLevel(ffn_bias_1, cur_level) && !isBatchedAtLevel(ffn_weight_2, cur_level) && !isBatchedAtLevel(ffn_bias_2, cur_level) && !isBatchedAtLevel(mask, cur_level)) {
|
|
return at::_ops::_transformer_encoder_layer_fwd::call(src, embed_dim, num_heads, qkv_weight, qkv_bias, proj_weight, proj_bias, use_gelu, norm_first, eps, norm_weight_1, norm_bias_1, norm_weight_2, norm_bias_2, ffn_weight_1, ffn_bias_1, ffn_weight_2, ffn_bias_2, mask, mask_type);
|
|
}
|
|
Tensor src_value;
|
|
optional<int64_t> src_bdim;
|
|
std::tie(src_value, src_bdim) = unwrapTensorAtLevel(src, cur_level);
|
|
Tensor qkv_weight_value;
|
|
optional<int64_t> qkv_weight_bdim;
|
|
std::tie(qkv_weight_value, qkv_weight_bdim) = unwrapTensorAtLevel(qkv_weight, cur_level);
|
|
Tensor qkv_bias_value;
|
|
optional<int64_t> qkv_bias_bdim;
|
|
std::tie(qkv_bias_value, qkv_bias_bdim) = unwrapTensorAtLevel(qkv_bias, cur_level);
|
|
Tensor proj_weight_value;
|
|
optional<int64_t> proj_weight_bdim;
|
|
std::tie(proj_weight_value, proj_weight_bdim) = unwrapTensorAtLevel(proj_weight, cur_level);
|
|
Tensor proj_bias_value;
|
|
optional<int64_t> proj_bias_bdim;
|
|
std::tie(proj_bias_value, proj_bias_bdim) = unwrapTensorAtLevel(proj_bias, cur_level);
|
|
Tensor norm_weight_1_value;
|
|
optional<int64_t> norm_weight_1_bdim;
|
|
std::tie(norm_weight_1_value, norm_weight_1_bdim) = unwrapTensorAtLevel(norm_weight_1, cur_level);
|
|
Tensor norm_bias_1_value;
|
|
optional<int64_t> norm_bias_1_bdim;
|
|
std::tie(norm_bias_1_value, norm_bias_1_bdim) = unwrapTensorAtLevel(norm_bias_1, cur_level);
|
|
Tensor norm_weight_2_value;
|
|
optional<int64_t> norm_weight_2_bdim;
|
|
std::tie(norm_weight_2_value, norm_weight_2_bdim) = unwrapTensorAtLevel(norm_weight_2, cur_level);
|
|
Tensor norm_bias_2_value;
|
|
optional<int64_t> norm_bias_2_bdim;
|
|
std::tie(norm_bias_2_value, norm_bias_2_bdim) = unwrapTensorAtLevel(norm_bias_2, cur_level);
|
|
Tensor ffn_weight_1_value;
|
|
optional<int64_t> ffn_weight_1_bdim;
|
|
std::tie(ffn_weight_1_value, ffn_weight_1_bdim) = unwrapTensorAtLevel(ffn_weight_1, cur_level);
|
|
Tensor ffn_bias_1_value;
|
|
optional<int64_t> ffn_bias_1_bdim;
|
|
std::tie(ffn_bias_1_value, ffn_bias_1_bdim) = unwrapTensorAtLevel(ffn_bias_1, cur_level);
|
|
Tensor ffn_weight_2_value;
|
|
optional<int64_t> ffn_weight_2_bdim;
|
|
std::tie(ffn_weight_2_value, ffn_weight_2_bdim) = unwrapTensorAtLevel(ffn_weight_2, cur_level);
|
|
Tensor ffn_bias_2_value;
|
|
optional<int64_t> ffn_bias_2_bdim;
|
|
std::tie(ffn_bias_2_value, ffn_bias_2_bdim) = unwrapTensorAtLevel(ffn_bias_2, cur_level);
|
|
optional<Tensor> mask_value;
|
|
optional<int64_t> mask_bdim;
|
|
if (mask) {
|
|
std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(src_value, src_bdim, embed_dim, num_heads, qkv_weight_value, qkv_weight_bdim, qkv_bias_value, qkv_bias_bdim, proj_weight_value, proj_weight_bdim, proj_bias_value, proj_bias_bdim, use_gelu, norm_first, eps, norm_weight_1_value, norm_weight_1_bdim, norm_bias_1_value, norm_bias_1_bdim, norm_weight_2_value, norm_weight_2_bdim, norm_bias_2_value, norm_bias_2_bdim, ffn_weight_1_value, ffn_weight_1_bdim, ffn_bias_1_value, ffn_bias_1_bdim, ffn_weight_2_value, ffn_weight_2_bdim, ffn_bias_2_value, ffn_bias_2_bdim, mask_value, mask_bdim, mask_type);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> _native_multi_head_attention_generated_plumbing(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, int64_t embed_dim, int64_t num_head, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, const c10::optional<at::Tensor> & mask, bool need_weights, bool average_attn_weights, c10::optional<int64_t> mask_type) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(query, cur_level) && !isBatchedAtLevel(key, cur_level) && !isBatchedAtLevel(value, cur_level) && !isBatchedAtLevel(qkv_weight, cur_level) && !isBatchedAtLevel(qkv_bias, cur_level) && !isBatchedAtLevel(proj_weight, cur_level) && !isBatchedAtLevel(proj_bias, cur_level) && !isBatchedAtLevel(mask, cur_level)) {
|
|
return at::_ops::_native_multi_head_attention::call(query, key, value, embed_dim, num_head, qkv_weight, qkv_bias, proj_weight, proj_bias, mask, need_weights, average_attn_weights, mask_type);
|
|
}
|
|
Tensor query_value;
|
|
optional<int64_t> query_bdim;
|
|
std::tie(query_value, query_bdim) = unwrapTensorAtLevel(query, cur_level);
|
|
Tensor key_value;
|
|
optional<int64_t> key_bdim;
|
|
std::tie(key_value, key_bdim) = unwrapTensorAtLevel(key, cur_level);
|
|
Tensor value_value;
|
|
optional<int64_t> value_bdim;
|
|
std::tie(value_value, value_bdim) = unwrapTensorAtLevel(value, cur_level);
|
|
Tensor qkv_weight_value;
|
|
optional<int64_t> qkv_weight_bdim;
|
|
std::tie(qkv_weight_value, qkv_weight_bdim) = unwrapTensorAtLevel(qkv_weight, cur_level);
|
|
Tensor qkv_bias_value;
|
|
optional<int64_t> qkv_bias_bdim;
|
|
std::tie(qkv_bias_value, qkv_bias_bdim) = unwrapTensorAtLevel(qkv_bias, cur_level);
|
|
Tensor proj_weight_value;
|
|
optional<int64_t> proj_weight_bdim;
|
|
std::tie(proj_weight_value, proj_weight_bdim) = unwrapTensorAtLevel(proj_weight, cur_level);
|
|
Tensor proj_bias_value;
|
|
optional<int64_t> proj_bias_bdim;
|
|
std::tie(proj_bias_value, proj_bias_bdim) = unwrapTensorAtLevel(proj_bias, cur_level);
|
|
optional<Tensor> mask_value;
|
|
optional<int64_t> mask_bdim;
|
|
if (mask) {
|
|
std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(query_value, query_bdim, key_value, key_bdim, value_value, value_bdim, embed_dim, num_head, qkv_weight_value, qkv_weight_bdim, qkv_bias_value, qkv_bias_bdim, proj_weight_value, proj_weight_bdim, proj_bias_value, proj_bias_bdim, mask_value, mask_bdim, need_weights, average_attn_weights, mask_type);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor scaled_dot_product_attention_generated_plumbing(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const c10::optional<at::Tensor> & attn_mask, double dropout_p, bool is_causal, c10::optional<double> scale) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(query, cur_level) && !isBatchedAtLevel(key, cur_level) && !isBatchedAtLevel(value, cur_level) && !isBatchedAtLevel(attn_mask, cur_level)) {
|
|
return at::_ops::scaled_dot_product_attention::call(query, key, value, attn_mask, dropout_p, is_causal, scale);
|
|
}
|
|
Tensor query_value;
|
|
optional<int64_t> query_bdim;
|
|
std::tie(query_value, query_bdim) = unwrapTensorAtLevel(query, cur_level);
|
|
Tensor key_value;
|
|
optional<int64_t> key_bdim;
|
|
std::tie(key_value, key_bdim) = unwrapTensorAtLevel(key, cur_level);
|
|
Tensor value_value;
|
|
optional<int64_t> value_bdim;
|
|
std::tie(value_value, value_bdim) = unwrapTensorAtLevel(value, cur_level);
|
|
optional<Tensor> attn_mask_value;
|
|
optional<int64_t> attn_mask_bdim;
|
|
if (attn_mask) {
|
|
std::tie(attn_mask_value, attn_mask_bdim) = unwrapTensorAtLevel(attn_mask.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(query_value, query_bdim, key_value, key_bdim, value_value, value_bdim, attn_mask_value, attn_mask_bdim, dropout_p, is_causal, scale);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> _scaled_dot_product_attention_math_generated_plumbing(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const c10::optional<at::Tensor> & attn_mask, double dropout_p, bool is_causal, const c10::optional<at::Tensor> & dropout_mask, c10::optional<double> scale) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(query, cur_level) && !isBatchedAtLevel(key, cur_level) && !isBatchedAtLevel(value, cur_level) && !isBatchedAtLevel(attn_mask, cur_level) && !isBatchedAtLevel(dropout_mask, cur_level)) {
|
|
return at::_ops::_scaled_dot_product_attention_math::call(query, key, value, attn_mask, dropout_p, is_causal, dropout_mask, scale);
|
|
}
|
|
Tensor query_value;
|
|
optional<int64_t> query_bdim;
|
|
std::tie(query_value, query_bdim) = unwrapTensorAtLevel(query, cur_level);
|
|
Tensor key_value;
|
|
optional<int64_t> key_bdim;
|
|
std::tie(key_value, key_bdim) = unwrapTensorAtLevel(key, cur_level);
|
|
Tensor value_value;
|
|
optional<int64_t> value_bdim;
|
|
std::tie(value_value, value_bdim) = unwrapTensorAtLevel(value, cur_level);
|
|
optional<Tensor> attn_mask_value;
|
|
optional<int64_t> attn_mask_bdim;
|
|
if (attn_mask) {
|
|
std::tie(attn_mask_value, attn_mask_bdim) = unwrapTensorAtLevel(attn_mask.value(), cur_level);
|
|
}
|
|
optional<Tensor> dropout_mask_value;
|
|
optional<int64_t> dropout_mask_bdim;
|
|
if (dropout_mask) {
|
|
std::tie(dropout_mask_value, dropout_mask_bdim) = unwrapTensorAtLevel(dropout_mask.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(query_value, query_bdim, key_value, key_bdim, value_value, value_bdim, attn_mask_value, attn_mask_bdim, dropout_p, is_causal, dropout_mask_value, dropout_mask_bdim, scale);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> _scaled_dot_product_flash_attention_for_cpu_generated_plumbing(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, double dropout_p, bool is_causal, const c10::optional<at::Tensor> & attn_mask, c10::optional<double> scale) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(query, cur_level) && !isBatchedAtLevel(key, cur_level) && !isBatchedAtLevel(value, cur_level) && !isBatchedAtLevel(attn_mask, cur_level)) {
|
|
return at::_ops::_scaled_dot_product_flash_attention_for_cpu::call(query, key, value, dropout_p, is_causal, attn_mask, scale);
|
|
}
|
|
Tensor query_value;
|
|
optional<int64_t> query_bdim;
|
|
std::tie(query_value, query_bdim) = unwrapTensorAtLevel(query, cur_level);
|
|
Tensor key_value;
|
|
optional<int64_t> key_bdim;
|
|
std::tie(key_value, key_bdim) = unwrapTensorAtLevel(key, cur_level);
|
|
Tensor value_value;
|
|
optional<int64_t> value_bdim;
|
|
std::tie(value_value, value_bdim) = unwrapTensorAtLevel(value, cur_level);
|
|
optional<Tensor> attn_mask_value;
|
|
optional<int64_t> attn_mask_bdim;
|
|
if (attn_mask) {
|
|
std::tie(attn_mask_value, attn_mask_bdim) = unwrapTensorAtLevel(attn_mask.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(query_value, query_bdim, key_value, key_bdim, value_value, value_bdim, dropout_p, is_causal, attn_mask_value, attn_mask_bdim, scale);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor,at::Tensor> _scaled_dot_product_flash_attention_backward_generated_plumbing(const at::Tensor & grad_out, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & out, const at::Tensor & logsumexp, const at::Tensor & cum_seq_q, const at::Tensor & cum_seq_k, c10::SymInt max_q, c10::SymInt max_k, double dropout_p, bool is_causal, const at::Tensor & philox_seed, const at::Tensor & philox_offset, c10::optional<double> scale) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad_out, cur_level) && !isBatchedAtLevel(query, cur_level) && !isBatchedAtLevel(key, cur_level) && !isBatchedAtLevel(value, cur_level) && !isBatchedAtLevel(out, cur_level) && !isBatchedAtLevel(logsumexp, cur_level) && !isBatchedAtLevel(cum_seq_q, cur_level) && !isBatchedAtLevel(cum_seq_k, cur_level) && !isBatchedAtLevel(philox_seed, cur_level) && !isBatchedAtLevel(philox_offset, cur_level)) {
|
|
return at::_ops::_scaled_dot_product_flash_attention_backward::call(grad_out, query, key, value, out, logsumexp, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, philox_seed, philox_offset, scale);
|
|
}
|
|
Tensor grad_out_value;
|
|
optional<int64_t> grad_out_bdim;
|
|
std::tie(grad_out_value, grad_out_bdim) = unwrapTensorAtLevel(grad_out, cur_level);
|
|
Tensor query_value;
|
|
optional<int64_t> query_bdim;
|
|
std::tie(query_value, query_bdim) = unwrapTensorAtLevel(query, cur_level);
|
|
Tensor key_value;
|
|
optional<int64_t> key_bdim;
|
|
std::tie(key_value, key_bdim) = unwrapTensorAtLevel(key, cur_level);
|
|
Tensor value_value;
|
|
optional<int64_t> value_bdim;
|
|
std::tie(value_value, value_bdim) = unwrapTensorAtLevel(value, cur_level);
|
|
Tensor out_value;
|
|
optional<int64_t> out_bdim;
|
|
std::tie(out_value, out_bdim) = unwrapTensorAtLevel(out, cur_level);
|
|
Tensor logsumexp_value;
|
|
optional<int64_t> logsumexp_bdim;
|
|
std::tie(logsumexp_value, logsumexp_bdim) = unwrapTensorAtLevel(logsumexp, cur_level);
|
|
Tensor cum_seq_q_value;
|
|
optional<int64_t> cum_seq_q_bdim;
|
|
std::tie(cum_seq_q_value, cum_seq_q_bdim) = unwrapTensorAtLevel(cum_seq_q, cur_level);
|
|
Tensor cum_seq_k_value;
|
|
optional<int64_t> cum_seq_k_bdim;
|
|
std::tie(cum_seq_k_value, cum_seq_k_bdim) = unwrapTensorAtLevel(cum_seq_k, cur_level);
|
|
Tensor philox_seed_value;
|
|
optional<int64_t> philox_seed_bdim;
|
|
std::tie(philox_seed_value, philox_seed_bdim) = unwrapTensorAtLevel(philox_seed, cur_level);
|
|
Tensor philox_offset_value;
|
|
optional<int64_t> philox_offset_bdim;
|
|
std::tie(philox_offset_value, philox_offset_bdim) = unwrapTensorAtLevel(philox_offset, cur_level);
|
|
auto results = batch_rule(grad_out_value, grad_out_bdim, query_value, query_bdim, key_value, key_bdim, value_value, value_bdim, out_value, out_bdim, logsumexp_value, logsumexp_bdim, cum_seq_q_value, cum_seq_q_bdim, cum_seq_k_value, cum_seq_k_bdim, max_q, max_k, dropout_p, is_causal, philox_seed_value, philox_seed_bdim, philox_offset_value, philox_offset_bdim, scale);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor,at::Tensor> _scaled_dot_product_flash_attention_for_cpu_backward_generated_plumbing(const at::Tensor & grad_out, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & out, const at::Tensor & logsumexp, double dropout_p, bool is_causal, const c10::optional<at::Tensor> & attn_mask, c10::optional<double> scale) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad_out, cur_level) && !isBatchedAtLevel(query, cur_level) && !isBatchedAtLevel(key, cur_level) && !isBatchedAtLevel(value, cur_level) && !isBatchedAtLevel(out, cur_level) && !isBatchedAtLevel(logsumexp, cur_level) && !isBatchedAtLevel(attn_mask, cur_level)) {
|
|
return at::_ops::_scaled_dot_product_flash_attention_for_cpu_backward::call(grad_out, query, key, value, out, logsumexp, dropout_p, is_causal, attn_mask, scale);
|
|
}
|
|
Tensor grad_out_value;
|
|
optional<int64_t> grad_out_bdim;
|
|
std::tie(grad_out_value, grad_out_bdim) = unwrapTensorAtLevel(grad_out, cur_level);
|
|
Tensor query_value;
|
|
optional<int64_t> query_bdim;
|
|
std::tie(query_value, query_bdim) = unwrapTensorAtLevel(query, cur_level);
|
|
Tensor key_value;
|
|
optional<int64_t> key_bdim;
|
|
std::tie(key_value, key_bdim) = unwrapTensorAtLevel(key, cur_level);
|
|
Tensor value_value;
|
|
optional<int64_t> value_bdim;
|
|
std::tie(value_value, value_bdim) = unwrapTensorAtLevel(value, cur_level);
|
|
Tensor out_value;
|
|
optional<int64_t> out_bdim;
|
|
std::tie(out_value, out_bdim) = unwrapTensorAtLevel(out, cur_level);
|
|
Tensor logsumexp_value;
|
|
optional<int64_t> logsumexp_bdim;
|
|
std::tie(logsumexp_value, logsumexp_bdim) = unwrapTensorAtLevel(logsumexp, cur_level);
|
|
optional<Tensor> attn_mask_value;
|
|
optional<int64_t> attn_mask_bdim;
|
|
if (attn_mask) {
|
|
std::tie(attn_mask_value, attn_mask_bdim) = unwrapTensorAtLevel(attn_mask.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(grad_out_value, grad_out_bdim, query_value, query_bdim, key_value, key_bdim, value_value, value_bdim, out_value, out_bdim, logsumexp_value, logsumexp_bdim, dropout_p, is_causal, attn_mask_value, attn_mask_bdim, scale);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> _scaled_dot_product_efficient_attention_generated_plumbing(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const c10::optional<at::Tensor> & attn_bias, bool compute_log_sumexp, double dropout_p, bool is_causal, c10::optional<double> scale) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(query, cur_level) && !isBatchedAtLevel(key, cur_level) && !isBatchedAtLevel(value, cur_level) && !isBatchedAtLevel(attn_bias, cur_level)) {
|
|
return at::_ops::_scaled_dot_product_efficient_attention::call(query, key, value, attn_bias, compute_log_sumexp, dropout_p, is_causal, scale);
|
|
}
|
|
Tensor query_value;
|
|
optional<int64_t> query_bdim;
|
|
std::tie(query_value, query_bdim) = unwrapTensorAtLevel(query, cur_level);
|
|
Tensor key_value;
|
|
optional<int64_t> key_bdim;
|
|
std::tie(key_value, key_bdim) = unwrapTensorAtLevel(key, cur_level);
|
|
Tensor value_value;
|
|
optional<int64_t> value_bdim;
|
|
std::tie(value_value, value_bdim) = unwrapTensorAtLevel(value, cur_level);
|
|
optional<Tensor> attn_bias_value;
|
|
optional<int64_t> attn_bias_bdim;
|
|
if (attn_bias) {
|
|
std::tie(attn_bias_value, attn_bias_bdim) = unwrapTensorAtLevel(attn_bias.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(query_value, query_bdim, key_value, key_bdim, value_value, value_bdim, attn_bias_value, attn_bias_bdim, compute_log_sumexp, dropout_p, is_causal, scale);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> _scaled_dot_product_efficient_attention_backward_generated_plumbing(const at::Tensor & grad_out_, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & attn_bias, const at::Tensor & out, const at::Tensor & logsumexp, const at::Tensor & philox_seed, const at::Tensor & philox_offset, double dropout_p, ::std::array<bool,4> grad_input_mask, bool is_causal, c10::optional<double> scale) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad_out_, cur_level) && !isBatchedAtLevel(query, cur_level) && !isBatchedAtLevel(key, cur_level) && !isBatchedAtLevel(value, cur_level) && !isBatchedAtLevel(attn_bias, cur_level) && !isBatchedAtLevel(out, cur_level) && !isBatchedAtLevel(logsumexp, cur_level) && !isBatchedAtLevel(philox_seed, cur_level) && !isBatchedAtLevel(philox_offset, cur_level)) {
|
|
return at::_ops::_scaled_dot_product_efficient_attention_backward::call(grad_out_, query, key, value, attn_bias, out, logsumexp, philox_seed, philox_offset, dropout_p, grad_input_mask, is_causal, scale);
|
|
}
|
|
Tensor grad_out__value;
|
|
optional<int64_t> grad_out__bdim;
|
|
std::tie(grad_out__value, grad_out__bdim) = unwrapTensorAtLevel(grad_out_, cur_level);
|
|
Tensor query_value;
|
|
optional<int64_t> query_bdim;
|
|
std::tie(query_value, query_bdim) = unwrapTensorAtLevel(query, cur_level);
|
|
Tensor key_value;
|
|
optional<int64_t> key_bdim;
|
|
std::tie(key_value, key_bdim) = unwrapTensorAtLevel(key, cur_level);
|
|
Tensor value_value;
|
|
optional<int64_t> value_bdim;
|
|
std::tie(value_value, value_bdim) = unwrapTensorAtLevel(value, cur_level);
|
|
Tensor attn_bias_value;
|
|
optional<int64_t> attn_bias_bdim;
|
|
std::tie(attn_bias_value, attn_bias_bdim) = unwrapTensorAtLevel(attn_bias, cur_level);
|
|
Tensor out_value;
|
|
optional<int64_t> out_bdim;
|
|
std::tie(out_value, out_bdim) = unwrapTensorAtLevel(out, cur_level);
|
|
Tensor logsumexp_value;
|
|
optional<int64_t> logsumexp_bdim;
|
|
std::tie(logsumexp_value, logsumexp_bdim) = unwrapTensorAtLevel(logsumexp, cur_level);
|
|
Tensor philox_seed_value;
|
|
optional<int64_t> philox_seed_bdim;
|
|
std::tie(philox_seed_value, philox_seed_bdim) = unwrapTensorAtLevel(philox_seed, cur_level);
|
|
Tensor philox_offset_value;
|
|
optional<int64_t> philox_offset_bdim;
|
|
std::tie(philox_offset_value, philox_offset_bdim) = unwrapTensorAtLevel(philox_offset, cur_level);
|
|
auto results = batch_rule(grad_out__value, grad_out__bdim, query_value, query_bdim, key_value, key_bdim, value_value, value_bdim, attn_bias_value, attn_bias_bdim, out_value, out_bdim, logsumexp_value, logsumexp_bdim, philox_seed_value, philox_seed_bdim, philox_offset_value, philox_offset_bdim, dropout_p, grad_input_mask, is_causal, scale);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> _scaled_dot_product_cudnn_attention_generated_plumbing(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, double dropout_p, bool is_causal, bool return_debug_mask, c10::optional<double> scale) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(query, cur_level) && !isBatchedAtLevel(key, cur_level) && !isBatchedAtLevel(value, cur_level)) {
|
|
return at::_ops::_scaled_dot_product_cudnn_attention::call(query, key, value, dropout_p, is_causal, return_debug_mask, scale);
|
|
}
|
|
Tensor query_value;
|
|
optional<int64_t> query_bdim;
|
|
std::tie(query_value, query_bdim) = unwrapTensorAtLevel(query, cur_level);
|
|
Tensor key_value;
|
|
optional<int64_t> key_bdim;
|
|
std::tie(key_value, key_bdim) = unwrapTensorAtLevel(key, cur_level);
|
|
Tensor value_value;
|
|
optional<int64_t> value_bdim;
|
|
std::tie(value_value, value_bdim) = unwrapTensorAtLevel(value, cur_level);
|
|
auto results = batch_rule(query_value, query_bdim, key_value, key_bdim, value_value, value_bdim, dropout_p, is_causal, return_debug_mask, scale);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _flash_attention_forward_generated_plumbing(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const c10::optional<at::Tensor> & cum_seq_q, const c10::optional<at::Tensor> & cum_seq_k, c10::SymInt max_q, c10::SymInt max_k, double dropout_p, bool is_causal, bool return_debug_mask, c10::optional<double> scale) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(query, cur_level) && !isBatchedAtLevel(key, cur_level) && !isBatchedAtLevel(value, cur_level) && !isBatchedAtLevel(cum_seq_q, cur_level) && !isBatchedAtLevel(cum_seq_k, cur_level)) {
|
|
return at::_ops::_flash_attention_forward::call(query, key, value, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, return_debug_mask, scale);
|
|
}
|
|
Tensor query_value;
|
|
optional<int64_t> query_bdim;
|
|
std::tie(query_value, query_bdim) = unwrapTensorAtLevel(query, cur_level);
|
|
Tensor key_value;
|
|
optional<int64_t> key_bdim;
|
|
std::tie(key_value, key_bdim) = unwrapTensorAtLevel(key, cur_level);
|
|
Tensor value_value;
|
|
optional<int64_t> value_bdim;
|
|
std::tie(value_value, value_bdim) = unwrapTensorAtLevel(value, cur_level);
|
|
optional<Tensor> cum_seq_q_value;
|
|
optional<int64_t> cum_seq_q_bdim;
|
|
if (cum_seq_q) {
|
|
std::tie(cum_seq_q_value, cum_seq_q_bdim) = unwrapTensorAtLevel(cum_seq_q.value(), cur_level);
|
|
}
|
|
optional<Tensor> cum_seq_k_value;
|
|
optional<int64_t> cum_seq_k_bdim;
|
|
if (cum_seq_k) {
|
|
std::tie(cum_seq_k_value, cum_seq_k_bdim) = unwrapTensorAtLevel(cum_seq_k.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(query_value, query_bdim, key_value, key_bdim, value_value, value_bdim, cum_seq_q_value, cum_seq_q_bdim, cum_seq_k_value, cum_seq_k_bdim, max_q, max_k, dropout_p, is_causal, return_debug_mask, scale);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level), makeBatched(std::get<8>(results), std::get<9>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor,at::Tensor> _flash_attention_backward_generated_plumbing(const at::Tensor & grad_out, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & out, const at::Tensor & logsumexp, const at::Tensor & cum_seq_q, const at::Tensor & cum_seq_k, c10::SymInt max_q, c10::SymInt max_k, double dropout_p, bool is_causal, const at::Tensor & philox_seed, const at::Tensor & philox_offset, c10::optional<double> scale) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad_out, cur_level) && !isBatchedAtLevel(query, cur_level) && !isBatchedAtLevel(key, cur_level) && !isBatchedAtLevel(value, cur_level) && !isBatchedAtLevel(out, cur_level) && !isBatchedAtLevel(logsumexp, cur_level) && !isBatchedAtLevel(cum_seq_q, cur_level) && !isBatchedAtLevel(cum_seq_k, cur_level) && !isBatchedAtLevel(philox_seed, cur_level) && !isBatchedAtLevel(philox_offset, cur_level)) {
|
|
return at::_ops::_flash_attention_backward::call(grad_out, query, key, value, out, logsumexp, cum_seq_q, cum_seq_k, max_q, max_k, dropout_p, is_causal, philox_seed, philox_offset, scale);
|
|
}
|
|
Tensor grad_out_value;
|
|
optional<int64_t> grad_out_bdim;
|
|
std::tie(grad_out_value, grad_out_bdim) = unwrapTensorAtLevel(grad_out, cur_level);
|
|
Tensor query_value;
|
|
optional<int64_t> query_bdim;
|
|
std::tie(query_value, query_bdim) = unwrapTensorAtLevel(query, cur_level);
|
|
Tensor key_value;
|
|
optional<int64_t> key_bdim;
|
|
std::tie(key_value, key_bdim) = unwrapTensorAtLevel(key, cur_level);
|
|
Tensor value_value;
|
|
optional<int64_t> value_bdim;
|
|
std::tie(value_value, value_bdim) = unwrapTensorAtLevel(value, cur_level);
|
|
Tensor out_value;
|
|
optional<int64_t> out_bdim;
|
|
std::tie(out_value, out_bdim) = unwrapTensorAtLevel(out, cur_level);
|
|
Tensor logsumexp_value;
|
|
optional<int64_t> logsumexp_bdim;
|
|
std::tie(logsumexp_value, logsumexp_bdim) = unwrapTensorAtLevel(logsumexp, cur_level);
|
|
Tensor cum_seq_q_value;
|
|
optional<int64_t> cum_seq_q_bdim;
|
|
std::tie(cum_seq_q_value, cum_seq_q_bdim) = unwrapTensorAtLevel(cum_seq_q, cur_level);
|
|
Tensor cum_seq_k_value;
|
|
optional<int64_t> cum_seq_k_bdim;
|
|
std::tie(cum_seq_k_value, cum_seq_k_bdim) = unwrapTensorAtLevel(cum_seq_k, cur_level);
|
|
Tensor philox_seed_value;
|
|
optional<int64_t> philox_seed_bdim;
|
|
std::tie(philox_seed_value, philox_seed_bdim) = unwrapTensorAtLevel(philox_seed, cur_level);
|
|
Tensor philox_offset_value;
|
|
optional<int64_t> philox_offset_bdim;
|
|
std::tie(philox_offset_value, philox_offset_bdim) = unwrapTensorAtLevel(philox_offset, cur_level);
|
|
auto results = batch_rule(grad_out_value, grad_out_bdim, query_value, query_bdim, key_value, key_bdim, value_value, value_bdim, out_value, out_bdim, logsumexp_value, logsumexp_bdim, cum_seq_q_value, cum_seq_q_bdim, cum_seq_k_value, cum_seq_k_bdim, max_q, max_k, dropout_p, is_causal, philox_seed_value, philox_seed_bdim, philox_offset_value, philox_offset_bdim, scale);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> _efficient_attention_backward_generated_plumbing(const at::Tensor & grad_out_, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const c10::optional<at::Tensor> & bias, const at::Tensor & out, const c10::optional<at::Tensor> & cu_seqlens_q, const c10::optional<at::Tensor> & cu_seqlens_k, c10::SymInt max_seqlen_q, c10::SymInt max_seqlen_k, const at::Tensor & logsumexp, double dropout_p, const at::Tensor & philox_seed, const at::Tensor & philox_offset, int64_t custom_mask_type, bool bias_requires_grad, c10::optional<double> scale, c10::optional<int64_t> num_splits_key) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad_out_, cur_level) && !isBatchedAtLevel(query, cur_level) && !isBatchedAtLevel(key, cur_level) && !isBatchedAtLevel(value, cur_level) && !isBatchedAtLevel(bias, cur_level) && !isBatchedAtLevel(out, cur_level) && !isBatchedAtLevel(cu_seqlens_q, cur_level) && !isBatchedAtLevel(cu_seqlens_k, cur_level) && !isBatchedAtLevel(logsumexp, cur_level) && !isBatchedAtLevel(philox_seed, cur_level) && !isBatchedAtLevel(philox_offset, cur_level)) {
|
|
return at::_ops::_efficient_attention_backward::call(grad_out_, query, key, value, bias, out, cu_seqlens_q, cu_seqlens_k, max_seqlen_q, max_seqlen_k, logsumexp, dropout_p, philox_seed, philox_offset, custom_mask_type, bias_requires_grad, scale, num_splits_key);
|
|
}
|
|
Tensor grad_out__value;
|
|
optional<int64_t> grad_out__bdim;
|
|
std::tie(grad_out__value, grad_out__bdim) = unwrapTensorAtLevel(grad_out_, cur_level);
|
|
Tensor query_value;
|
|
optional<int64_t> query_bdim;
|
|
std::tie(query_value, query_bdim) = unwrapTensorAtLevel(query, cur_level);
|
|
Tensor key_value;
|
|
optional<int64_t> key_bdim;
|
|
std::tie(key_value, key_bdim) = unwrapTensorAtLevel(key, cur_level);
|
|
Tensor value_value;
|
|
optional<int64_t> value_bdim;
|
|
std::tie(value_value, value_bdim) = unwrapTensorAtLevel(value, cur_level);
|
|
Tensor out_value;
|
|
optional<int64_t> out_bdim;
|
|
std::tie(out_value, out_bdim) = unwrapTensorAtLevel(out, cur_level);
|
|
Tensor logsumexp_value;
|
|
optional<int64_t> logsumexp_bdim;
|
|
std::tie(logsumexp_value, logsumexp_bdim) = unwrapTensorAtLevel(logsumexp, cur_level);
|
|
Tensor philox_seed_value;
|
|
optional<int64_t> philox_seed_bdim;
|
|
std::tie(philox_seed_value, philox_seed_bdim) = unwrapTensorAtLevel(philox_seed, cur_level);
|
|
Tensor philox_offset_value;
|
|
optional<int64_t> philox_offset_bdim;
|
|
std::tie(philox_offset_value, philox_offset_bdim) = unwrapTensorAtLevel(philox_offset, cur_level);
|
|
optional<Tensor> bias_value;
|
|
optional<int64_t> bias_bdim;
|
|
if (bias) {
|
|
std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
|
|
}
|
|
optional<Tensor> cu_seqlens_q_value;
|
|
optional<int64_t> cu_seqlens_q_bdim;
|
|
if (cu_seqlens_q) {
|
|
std::tie(cu_seqlens_q_value, cu_seqlens_q_bdim) = unwrapTensorAtLevel(cu_seqlens_q.value(), cur_level);
|
|
}
|
|
optional<Tensor> cu_seqlens_k_value;
|
|
optional<int64_t> cu_seqlens_k_bdim;
|
|
if (cu_seqlens_k) {
|
|
std::tie(cu_seqlens_k_value, cu_seqlens_k_bdim) = unwrapTensorAtLevel(cu_seqlens_k.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(grad_out__value, grad_out__bdim, query_value, query_bdim, key_value, key_bdim, value_value, value_bdim, bias_value, bias_bdim, out_value, out_bdim, cu_seqlens_q_value, cu_seqlens_q_bdim, cu_seqlens_k_value, cu_seqlens_k_bdim, max_seqlen_q, max_seqlen_k, logsumexp_value, logsumexp_bdim, dropout_p, philox_seed_value, philox_seed_bdim, philox_offset_value, philox_offset_bdim, custom_mask_type, bias_requires_grad, scale, num_splits_key);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _triton_scaled_dot_attention_generated_plumbing(const at::Tensor & q, const at::Tensor & k, const at::Tensor & v, double dropout_p) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(q, cur_level) && !isBatchedAtLevel(k, cur_level) && !isBatchedAtLevel(v, cur_level)) {
|
|
return at::_ops::_triton_scaled_dot_attention::call(q, k, v, dropout_p);
|
|
}
|
|
Tensor q_value;
|
|
optional<int64_t> q_bdim;
|
|
std::tie(q_value, q_bdim) = unwrapTensorAtLevel(q, cur_level);
|
|
Tensor k_value;
|
|
optional<int64_t> k_bdim;
|
|
std::tie(k_value, k_bdim) = unwrapTensorAtLevel(k, cur_level);
|
|
Tensor v_value;
|
|
optional<int64_t> v_bdim;
|
|
std::tie(v_value, v_bdim) = unwrapTensorAtLevel(v, cur_level);
|
|
auto results = batch_rule(q_value, q_bdim, k_value, k_bdim, v_value, v_bdim, dropout_p);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor & _fill_mem_eff_dropout_mask__generated_plumbing(at::Tensor & self, double dropout_p, int64_t seed, int64_t offset) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_inplace_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_fill_mem_eff_dropout_mask_::call(self, dropout_p, seed, offset);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim, dropout_p, seed, offset);
|
|
return self;
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _triton_multi_head_attention_generated_plumbing(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, int64_t embed_dim, int64_t num_head, const at::Tensor & qkv_weight, const at::Tensor & qkv_bias, const at::Tensor & proj_weight, const at::Tensor & proj_bias, const c10::optional<at::Tensor> & mask) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(query, cur_level) && !isBatchedAtLevel(key, cur_level) && !isBatchedAtLevel(value, cur_level) && !isBatchedAtLevel(qkv_weight, cur_level) && !isBatchedAtLevel(qkv_bias, cur_level) && !isBatchedAtLevel(proj_weight, cur_level) && !isBatchedAtLevel(proj_bias, cur_level) && !isBatchedAtLevel(mask, cur_level)) {
|
|
return at::_ops::_triton_multi_head_attention::call(query, key, value, embed_dim, num_head, qkv_weight, qkv_bias, proj_weight, proj_bias, mask);
|
|
}
|
|
Tensor query_value;
|
|
optional<int64_t> query_bdim;
|
|
std::tie(query_value, query_bdim) = unwrapTensorAtLevel(query, cur_level);
|
|
Tensor key_value;
|
|
optional<int64_t> key_bdim;
|
|
std::tie(key_value, key_bdim) = unwrapTensorAtLevel(key, cur_level);
|
|
Tensor value_value;
|
|
optional<int64_t> value_bdim;
|
|
std::tie(value_value, value_bdim) = unwrapTensorAtLevel(value, cur_level);
|
|
Tensor qkv_weight_value;
|
|
optional<int64_t> qkv_weight_bdim;
|
|
std::tie(qkv_weight_value, qkv_weight_bdim) = unwrapTensorAtLevel(qkv_weight, cur_level);
|
|
Tensor qkv_bias_value;
|
|
optional<int64_t> qkv_bias_bdim;
|
|
std::tie(qkv_bias_value, qkv_bias_bdim) = unwrapTensorAtLevel(qkv_bias, cur_level);
|
|
Tensor proj_weight_value;
|
|
optional<int64_t> proj_weight_bdim;
|
|
std::tie(proj_weight_value, proj_weight_bdim) = unwrapTensorAtLevel(proj_weight, cur_level);
|
|
Tensor proj_bias_value;
|
|
optional<int64_t> proj_bias_bdim;
|
|
std::tie(proj_bias_value, proj_bias_bdim) = unwrapTensorAtLevel(proj_bias, cur_level);
|
|
optional<Tensor> mask_value;
|
|
optional<int64_t> mask_bdim;
|
|
if (mask) {
|
|
std::tie(mask_value, mask_bdim) = unwrapTensorAtLevel(mask.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(query_value, query_bdim, key_value, key_bdim, value_value, value_bdim, embed_dim, num_head, qkv_weight_value, qkv_weight_bdim, qkv_bias_value, qkv_bias_bdim, proj_weight_value, proj_weight_bdim, proj_bias_value, proj_bias_bdim, mask_value, mask_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor special_airy_ai_generated_plumbing(const at::Tensor & x) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(x, cur_level)) {
|
|
return at::_ops::special_airy_ai::call(x);
|
|
}
|
|
Tensor x_value;
|
|
optional<int64_t> x_bdim;
|
|
std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
|
|
auto results = batch_rule(x_value, x_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor special_bessel_j0_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::special_bessel_j0::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor special_bessel_j1_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::special_bessel_j1::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor special_bessel_y0_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::special_bessel_y0::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor special_bessel_y1_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::special_bessel_y1::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor special_chebyshev_polynomial_t_generated_plumbing(const at::Tensor & x, const at::Tensor & n) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(n, cur_level)) {
|
|
return at::_ops::special_chebyshev_polynomial_t::call(x, n);
|
|
}
|
|
Tensor x_value;
|
|
optional<int64_t> x_bdim;
|
|
std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
|
|
Tensor n_value;
|
|
optional<int64_t> n_bdim;
|
|
std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level);
|
|
auto results = batch_rule(x_value, x_bdim, n_value, n_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor special_chebyshev_polynomial_t_x_scalar_generated_plumbing(const at::Scalar & x, const at::Tensor & n) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(n, cur_level)) {
|
|
return at::_ops::special_chebyshev_polynomial_t_x_scalar::call(x, n);
|
|
}
|
|
Tensor n_value;
|
|
optional<int64_t> n_bdim;
|
|
std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level);
|
|
auto results = batch_rule(x, n_value, n_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor special_chebyshev_polynomial_t_n_scalar_generated_plumbing(const at::Tensor & x, const at::Scalar & n) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(x, cur_level)) {
|
|
return at::_ops::special_chebyshev_polynomial_t_n_scalar::call(x, n);
|
|
}
|
|
Tensor x_value;
|
|
optional<int64_t> x_bdim;
|
|
std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
|
|
auto results = batch_rule(x_value, x_bdim, n);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor special_chebyshev_polynomial_u_generated_plumbing(const at::Tensor & x, const at::Tensor & n) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(n, cur_level)) {
|
|
return at::_ops::special_chebyshev_polynomial_u::call(x, n);
|
|
}
|
|
Tensor x_value;
|
|
optional<int64_t> x_bdim;
|
|
std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
|
|
Tensor n_value;
|
|
optional<int64_t> n_bdim;
|
|
std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level);
|
|
auto results = batch_rule(x_value, x_bdim, n_value, n_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor special_chebyshev_polynomial_u_x_scalar_generated_plumbing(const at::Scalar & x, const at::Tensor & n) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(n, cur_level)) {
|
|
return at::_ops::special_chebyshev_polynomial_u_x_scalar::call(x, n);
|
|
}
|
|
Tensor n_value;
|
|
optional<int64_t> n_bdim;
|
|
std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level);
|
|
auto results = batch_rule(x, n_value, n_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor special_chebyshev_polynomial_u_n_scalar_generated_plumbing(const at::Tensor & x, const at::Scalar & n) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(x, cur_level)) {
|
|
return at::_ops::special_chebyshev_polynomial_u_n_scalar::call(x, n);
|
|
}
|
|
Tensor x_value;
|
|
optional<int64_t> x_bdim;
|
|
std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
|
|
auto results = batch_rule(x_value, x_bdim, n);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor special_chebyshev_polynomial_v_generated_plumbing(const at::Tensor & x, const at::Tensor & n) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(n, cur_level)) {
|
|
return at::_ops::special_chebyshev_polynomial_v::call(x, n);
|
|
}
|
|
Tensor x_value;
|
|
optional<int64_t> x_bdim;
|
|
std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
|
|
Tensor n_value;
|
|
optional<int64_t> n_bdim;
|
|
std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level);
|
|
auto results = batch_rule(x_value, x_bdim, n_value, n_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor special_chebyshev_polynomial_v_x_scalar_generated_plumbing(const at::Scalar & x, const at::Tensor & n) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(n, cur_level)) {
|
|
return at::_ops::special_chebyshev_polynomial_v_x_scalar::call(x, n);
|
|
}
|
|
Tensor n_value;
|
|
optional<int64_t> n_bdim;
|
|
std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level);
|
|
auto results = batch_rule(x, n_value, n_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor special_chebyshev_polynomial_v_n_scalar_generated_plumbing(const at::Tensor & x, const at::Scalar & n) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(x, cur_level)) {
|
|
return at::_ops::special_chebyshev_polynomial_v_n_scalar::call(x, n);
|
|
}
|
|
Tensor x_value;
|
|
optional<int64_t> x_bdim;
|
|
std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
|
|
auto results = batch_rule(x_value, x_bdim, n);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor special_chebyshev_polynomial_w_generated_plumbing(const at::Tensor & x, const at::Tensor & n) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(n, cur_level)) {
|
|
return at::_ops::special_chebyshev_polynomial_w::call(x, n);
|
|
}
|
|
Tensor x_value;
|
|
optional<int64_t> x_bdim;
|
|
std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
|
|
Tensor n_value;
|
|
optional<int64_t> n_bdim;
|
|
std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level);
|
|
auto results = batch_rule(x_value, x_bdim, n_value, n_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor special_chebyshev_polynomial_w_x_scalar_generated_plumbing(const at::Scalar & x, const at::Tensor & n) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(n, cur_level)) {
|
|
return at::_ops::special_chebyshev_polynomial_w_x_scalar::call(x, n);
|
|
}
|
|
Tensor n_value;
|
|
optional<int64_t> n_bdim;
|
|
std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level);
|
|
auto results = batch_rule(x, n_value, n_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor special_chebyshev_polynomial_w_n_scalar_generated_plumbing(const at::Tensor & x, const at::Scalar & n) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(x, cur_level)) {
|
|
return at::_ops::special_chebyshev_polynomial_w_n_scalar::call(x, n);
|
|
}
|
|
Tensor x_value;
|
|
optional<int64_t> x_bdim;
|
|
std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
|
|
auto results = batch_rule(x_value, x_bdim, n);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor special_hermite_polynomial_h_generated_plumbing(const at::Tensor & x, const at::Tensor & n) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(n, cur_level)) {
|
|
return at::_ops::special_hermite_polynomial_h::call(x, n);
|
|
}
|
|
Tensor x_value;
|
|
optional<int64_t> x_bdim;
|
|
std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
|
|
Tensor n_value;
|
|
optional<int64_t> n_bdim;
|
|
std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level);
|
|
auto results = batch_rule(x_value, x_bdim, n_value, n_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor special_hermite_polynomial_h_x_scalar_generated_plumbing(const at::Scalar & x, const at::Tensor & n) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(n, cur_level)) {
|
|
return at::_ops::special_hermite_polynomial_h_x_scalar::call(x, n);
|
|
}
|
|
Tensor n_value;
|
|
optional<int64_t> n_bdim;
|
|
std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level);
|
|
auto results = batch_rule(x, n_value, n_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor special_hermite_polynomial_h_n_scalar_generated_plumbing(const at::Tensor & x, const at::Scalar & n) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(x, cur_level)) {
|
|
return at::_ops::special_hermite_polynomial_h_n_scalar::call(x, n);
|
|
}
|
|
Tensor x_value;
|
|
optional<int64_t> x_bdim;
|
|
std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
|
|
auto results = batch_rule(x_value, x_bdim, n);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor special_hermite_polynomial_he_generated_plumbing(const at::Tensor & x, const at::Tensor & n) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(n, cur_level)) {
|
|
return at::_ops::special_hermite_polynomial_he::call(x, n);
|
|
}
|
|
Tensor x_value;
|
|
optional<int64_t> x_bdim;
|
|
std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
|
|
Tensor n_value;
|
|
optional<int64_t> n_bdim;
|
|
std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level);
|
|
auto results = batch_rule(x_value, x_bdim, n_value, n_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor special_hermite_polynomial_he_x_scalar_generated_plumbing(const at::Scalar & x, const at::Tensor & n) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(n, cur_level)) {
|
|
return at::_ops::special_hermite_polynomial_he_x_scalar::call(x, n);
|
|
}
|
|
Tensor n_value;
|
|
optional<int64_t> n_bdim;
|
|
std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level);
|
|
auto results = batch_rule(x, n_value, n_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor special_hermite_polynomial_he_n_scalar_generated_plumbing(const at::Tensor & x, const at::Scalar & n) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(x, cur_level)) {
|
|
return at::_ops::special_hermite_polynomial_he_n_scalar::call(x, n);
|
|
}
|
|
Tensor x_value;
|
|
optional<int64_t> x_bdim;
|
|
std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
|
|
auto results = batch_rule(x_value, x_bdim, n);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor special_laguerre_polynomial_l_generated_plumbing(const at::Tensor & x, const at::Tensor & n) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(n, cur_level)) {
|
|
return at::_ops::special_laguerre_polynomial_l::call(x, n);
|
|
}
|
|
Tensor x_value;
|
|
optional<int64_t> x_bdim;
|
|
std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
|
|
Tensor n_value;
|
|
optional<int64_t> n_bdim;
|
|
std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level);
|
|
auto results = batch_rule(x_value, x_bdim, n_value, n_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor special_laguerre_polynomial_l_x_scalar_generated_plumbing(const at::Scalar & x, const at::Tensor & n) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(n, cur_level)) {
|
|
return at::_ops::special_laguerre_polynomial_l_x_scalar::call(x, n);
|
|
}
|
|
Tensor n_value;
|
|
optional<int64_t> n_bdim;
|
|
std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level);
|
|
auto results = batch_rule(x, n_value, n_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor special_laguerre_polynomial_l_n_scalar_generated_plumbing(const at::Tensor & x, const at::Scalar & n) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(x, cur_level)) {
|
|
return at::_ops::special_laguerre_polynomial_l_n_scalar::call(x, n);
|
|
}
|
|
Tensor x_value;
|
|
optional<int64_t> x_bdim;
|
|
std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
|
|
auto results = batch_rule(x_value, x_bdim, n);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor special_legendre_polynomial_p_generated_plumbing(const at::Tensor & x, const at::Tensor & n) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(n, cur_level)) {
|
|
return at::_ops::special_legendre_polynomial_p::call(x, n);
|
|
}
|
|
Tensor x_value;
|
|
optional<int64_t> x_bdim;
|
|
std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
|
|
Tensor n_value;
|
|
optional<int64_t> n_bdim;
|
|
std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level);
|
|
auto results = batch_rule(x_value, x_bdim, n_value, n_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor special_legendre_polynomial_p_x_scalar_generated_plumbing(const at::Scalar & x, const at::Tensor & n) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(n, cur_level)) {
|
|
return at::_ops::special_legendre_polynomial_p_x_scalar::call(x, n);
|
|
}
|
|
Tensor n_value;
|
|
optional<int64_t> n_bdim;
|
|
std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level);
|
|
auto results = batch_rule(x, n_value, n_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor special_legendre_polynomial_p_n_scalar_generated_plumbing(const at::Tensor & x, const at::Scalar & n) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(x, cur_level)) {
|
|
return at::_ops::special_legendre_polynomial_p_n_scalar::call(x, n);
|
|
}
|
|
Tensor x_value;
|
|
optional<int64_t> x_bdim;
|
|
std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
|
|
auto results = batch_rule(x_value, x_bdim, n);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor special_modified_bessel_i0_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::special_modified_bessel_i0::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor special_modified_bessel_i1_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::special_modified_bessel_i1::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor special_modified_bessel_k0_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::special_modified_bessel_k0::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor special_modified_bessel_k1_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::special_modified_bessel_k1::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor special_scaled_modified_bessel_k0_generated_plumbing(const at::Tensor & x) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(x, cur_level)) {
|
|
return at::_ops::special_scaled_modified_bessel_k0::call(x);
|
|
}
|
|
Tensor x_value;
|
|
optional<int64_t> x_bdim;
|
|
std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
|
|
auto results = batch_rule(x_value, x_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor special_scaled_modified_bessel_k1_generated_plumbing(const at::Tensor & x) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(x, cur_level)) {
|
|
return at::_ops::special_scaled_modified_bessel_k1::call(x);
|
|
}
|
|
Tensor x_value;
|
|
optional<int64_t> x_bdim;
|
|
std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
|
|
auto results = batch_rule(x_value, x_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor special_shifted_chebyshev_polynomial_t_generated_plumbing(const at::Tensor & x, const at::Tensor & n) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(n, cur_level)) {
|
|
return at::_ops::special_shifted_chebyshev_polynomial_t::call(x, n);
|
|
}
|
|
Tensor x_value;
|
|
optional<int64_t> x_bdim;
|
|
std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
|
|
Tensor n_value;
|
|
optional<int64_t> n_bdim;
|
|
std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level);
|
|
auto results = batch_rule(x_value, x_bdim, n_value, n_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor special_shifted_chebyshev_polynomial_t_x_scalar_generated_plumbing(const at::Scalar & x, const at::Tensor & n) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(n, cur_level)) {
|
|
return at::_ops::special_shifted_chebyshev_polynomial_t_x_scalar::call(x, n);
|
|
}
|
|
Tensor n_value;
|
|
optional<int64_t> n_bdim;
|
|
std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level);
|
|
auto results = batch_rule(x, n_value, n_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor special_shifted_chebyshev_polynomial_t_n_scalar_generated_plumbing(const at::Tensor & x, const at::Scalar & n) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(x, cur_level)) {
|
|
return at::_ops::special_shifted_chebyshev_polynomial_t_n_scalar::call(x, n);
|
|
}
|
|
Tensor x_value;
|
|
optional<int64_t> x_bdim;
|
|
std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
|
|
auto results = batch_rule(x_value, x_bdim, n);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor special_shifted_chebyshev_polynomial_u_generated_plumbing(const at::Tensor & x, const at::Tensor & n) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(n, cur_level)) {
|
|
return at::_ops::special_shifted_chebyshev_polynomial_u::call(x, n);
|
|
}
|
|
Tensor x_value;
|
|
optional<int64_t> x_bdim;
|
|
std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
|
|
Tensor n_value;
|
|
optional<int64_t> n_bdim;
|
|
std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level);
|
|
auto results = batch_rule(x_value, x_bdim, n_value, n_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor special_shifted_chebyshev_polynomial_u_x_scalar_generated_plumbing(const at::Scalar & x, const at::Tensor & n) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(n, cur_level)) {
|
|
return at::_ops::special_shifted_chebyshev_polynomial_u_x_scalar::call(x, n);
|
|
}
|
|
Tensor n_value;
|
|
optional<int64_t> n_bdim;
|
|
std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level);
|
|
auto results = batch_rule(x, n_value, n_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor special_shifted_chebyshev_polynomial_u_n_scalar_generated_plumbing(const at::Tensor & x, const at::Scalar & n) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(x, cur_level)) {
|
|
return at::_ops::special_shifted_chebyshev_polynomial_u_n_scalar::call(x, n);
|
|
}
|
|
Tensor x_value;
|
|
optional<int64_t> x_bdim;
|
|
std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
|
|
auto results = batch_rule(x_value, x_bdim, n);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor special_shifted_chebyshev_polynomial_v_generated_plumbing(const at::Tensor & x, const at::Tensor & n) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(n, cur_level)) {
|
|
return at::_ops::special_shifted_chebyshev_polynomial_v::call(x, n);
|
|
}
|
|
Tensor x_value;
|
|
optional<int64_t> x_bdim;
|
|
std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
|
|
Tensor n_value;
|
|
optional<int64_t> n_bdim;
|
|
std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level);
|
|
auto results = batch_rule(x_value, x_bdim, n_value, n_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor special_shifted_chebyshev_polynomial_v_x_scalar_generated_plumbing(const at::Scalar & x, const at::Tensor & n) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(n, cur_level)) {
|
|
return at::_ops::special_shifted_chebyshev_polynomial_v_x_scalar::call(x, n);
|
|
}
|
|
Tensor n_value;
|
|
optional<int64_t> n_bdim;
|
|
std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level);
|
|
auto results = batch_rule(x, n_value, n_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor special_shifted_chebyshev_polynomial_v_n_scalar_generated_plumbing(const at::Tensor & x, const at::Scalar & n) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(x, cur_level)) {
|
|
return at::_ops::special_shifted_chebyshev_polynomial_v_n_scalar::call(x, n);
|
|
}
|
|
Tensor x_value;
|
|
optional<int64_t> x_bdim;
|
|
std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
|
|
auto results = batch_rule(x_value, x_bdim, n);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor special_shifted_chebyshev_polynomial_w_generated_plumbing(const at::Tensor & x, const at::Tensor & n) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(x, cur_level) && !isBatchedAtLevel(n, cur_level)) {
|
|
return at::_ops::special_shifted_chebyshev_polynomial_w::call(x, n);
|
|
}
|
|
Tensor x_value;
|
|
optional<int64_t> x_bdim;
|
|
std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
|
|
Tensor n_value;
|
|
optional<int64_t> n_bdim;
|
|
std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level);
|
|
auto results = batch_rule(x_value, x_bdim, n_value, n_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor special_shifted_chebyshev_polynomial_w_x_scalar_generated_plumbing(const at::Scalar & x, const at::Tensor & n) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(n, cur_level)) {
|
|
return at::_ops::special_shifted_chebyshev_polynomial_w_x_scalar::call(x, n);
|
|
}
|
|
Tensor n_value;
|
|
optional<int64_t> n_bdim;
|
|
std::tie(n_value, n_bdim) = unwrapTensorAtLevel(n, cur_level);
|
|
auto results = batch_rule(x, n_value, n_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor special_shifted_chebyshev_polynomial_w_n_scalar_generated_plumbing(const at::Tensor & x, const at::Scalar & n) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(x, cur_level)) {
|
|
return at::_ops::special_shifted_chebyshev_polynomial_w_n_scalar::call(x, n);
|
|
}
|
|
Tensor x_value;
|
|
optional<int64_t> x_bdim;
|
|
std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
|
|
auto results = batch_rule(x_value, x_bdim, n);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor special_spherical_bessel_j0_generated_plumbing(const at::Tensor & x) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(x, cur_level)) {
|
|
return at::_ops::special_spherical_bessel_j0::call(x);
|
|
}
|
|
Tensor x_value;
|
|
optional<int64_t> x_bdim;
|
|
std::tie(x_value, x_bdim) = unwrapTensorAtLevel(x, cur_level);
|
|
auto results = batch_rule(x_value, x_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _foobar_generated_plumbing(const at::Tensor & self, bool arg1, bool arg2, bool arg3) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_foobar::call(self, arg1, arg2, arg3);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, arg1, arg2, arg3);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _fused_adam__generated_plumbing(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional<at::Tensor> & grad_scale, const c10::optional<at::Tensor> & found_inf) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grads, cur_level) && !isBatchedAtLevel(exp_avgs, cur_level) && !isBatchedAtLevel(exp_avg_sqs, cur_level) && !isBatchedAtLevel(max_exp_avg_sqs, cur_level) && !isBatchedAtLevel(state_steps, cur_level) && !isBatchedAtLevel(grad_scale, cur_level) && !isBatchedAtLevel(found_inf, cur_level)) {
|
|
return at::_ops::_fused_adam_::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf);
|
|
}
|
|
optional<Tensor> grad_scale_value;
|
|
optional<int64_t> grad_scale_bdim;
|
|
if (grad_scale) {
|
|
std::tie(grad_scale_value, grad_scale_bdim) = unwrapTensorAtLevel(grad_scale.value(), cur_level);
|
|
}
|
|
optional<Tensor> found_inf_value;
|
|
optional<int64_t> found_inf_bdim;
|
|
if (found_inf) {
|
|
std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf.value(), cur_level);
|
|
}
|
|
batch_rule(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_value, grad_scale_bdim, found_inf_value, found_inf_bdim);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _fused_adam__tensor_lr_generated_plumbing(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional<at::Tensor> & grad_scale, const c10::optional<at::Tensor> & found_inf) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grads, cur_level) && !isBatchedAtLevel(exp_avgs, cur_level) && !isBatchedAtLevel(exp_avg_sqs, cur_level) && !isBatchedAtLevel(max_exp_avg_sqs, cur_level) && !isBatchedAtLevel(state_steps, cur_level) && !isBatchedAtLevel(lr, cur_level) && !isBatchedAtLevel(grad_scale, cur_level) && !isBatchedAtLevel(found_inf, cur_level)) {
|
|
return at::_ops::_fused_adam__tensor_lr::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf);
|
|
}
|
|
Tensor lr_value;
|
|
optional<int64_t> lr_bdim;
|
|
std::tie(lr_value, lr_bdim) = unwrapTensorAtLevel(lr, cur_level);
|
|
optional<Tensor> grad_scale_value;
|
|
optional<int64_t> grad_scale_bdim;
|
|
if (grad_scale) {
|
|
std::tie(grad_scale_value, grad_scale_bdim) = unwrapTensorAtLevel(grad_scale.value(), cur_level);
|
|
}
|
|
optional<Tensor> found_inf_value;
|
|
optional<int64_t> found_inf_bdim;
|
|
if (found_inf) {
|
|
std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf.value(), cur_level);
|
|
}
|
|
batch_rule(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr_value, lr_bdim, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_value, grad_scale_bdim, found_inf_value, found_inf_bdim);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _fused_adamw__generated_plumbing(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional<at::Tensor> & grad_scale, const c10::optional<at::Tensor> & found_inf) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grads, cur_level) && !isBatchedAtLevel(exp_avgs, cur_level) && !isBatchedAtLevel(exp_avg_sqs, cur_level) && !isBatchedAtLevel(max_exp_avg_sqs, cur_level) && !isBatchedAtLevel(state_steps, cur_level) && !isBatchedAtLevel(grad_scale, cur_level) && !isBatchedAtLevel(found_inf, cur_level)) {
|
|
return at::_ops::_fused_adamw_::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf);
|
|
}
|
|
optional<Tensor> grad_scale_value;
|
|
optional<int64_t> grad_scale_bdim;
|
|
if (grad_scale) {
|
|
std::tie(grad_scale_value, grad_scale_bdim) = unwrapTensorAtLevel(grad_scale.value(), cur_level);
|
|
}
|
|
optional<Tensor> found_inf_value;
|
|
optional<int64_t> found_inf_bdim;
|
|
if (found_inf) {
|
|
std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf.value(), cur_level);
|
|
}
|
|
batch_rule(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_value, grad_scale_bdim, found_inf_value, found_inf_bdim);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _fused_adamw__tensor_lr_generated_plumbing(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional<at::Tensor> & grad_scale, const c10::optional<at::Tensor> & found_inf) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grads, cur_level) && !isBatchedAtLevel(exp_avgs, cur_level) && !isBatchedAtLevel(exp_avg_sqs, cur_level) && !isBatchedAtLevel(max_exp_avg_sqs, cur_level) && !isBatchedAtLevel(state_steps, cur_level) && !isBatchedAtLevel(lr, cur_level) && !isBatchedAtLevel(grad_scale, cur_level) && !isBatchedAtLevel(found_inf, cur_level)) {
|
|
return at::_ops::_fused_adamw__tensor_lr::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf);
|
|
}
|
|
Tensor lr_value;
|
|
optional<int64_t> lr_bdim;
|
|
std::tie(lr_value, lr_bdim) = unwrapTensorAtLevel(lr, cur_level);
|
|
optional<Tensor> grad_scale_value;
|
|
optional<int64_t> grad_scale_bdim;
|
|
if (grad_scale) {
|
|
std::tie(grad_scale_value, grad_scale_bdim) = unwrapTensorAtLevel(grad_scale.value(), cur_level);
|
|
}
|
|
optional<Tensor> found_inf_value;
|
|
optional<int64_t> found_inf_bdim;
|
|
if (found_inf) {
|
|
std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf.value(), cur_level);
|
|
}
|
|
batch_rule(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr_value, lr_bdim, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_value, grad_scale_bdim, found_inf_value, found_inf_bdim);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _fused_sgd__generated_plumbing(at::TensorList self, at::TensorList grads, at::TensorList momentum_buffer_list, double weight_decay, double momentum, double lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const c10::optional<at::Tensor> & grad_scale, const c10::optional<at::Tensor> & found_inf) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grads, cur_level) && !isBatchedAtLevel(momentum_buffer_list, cur_level) && !isBatchedAtLevel(grad_scale, cur_level) && !isBatchedAtLevel(found_inf, cur_level)) {
|
|
return at::_ops::_fused_sgd_::call(self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale, found_inf);
|
|
}
|
|
optional<Tensor> grad_scale_value;
|
|
optional<int64_t> grad_scale_bdim;
|
|
if (grad_scale) {
|
|
std::tie(grad_scale_value, grad_scale_bdim) = unwrapTensorAtLevel(grad_scale.value(), cur_level);
|
|
}
|
|
optional<Tensor> found_inf_value;
|
|
optional<int64_t> found_inf_bdim;
|
|
if (found_inf) {
|
|
std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf.value(), cur_level);
|
|
}
|
|
batch_rule(self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale_value, grad_scale_bdim, found_inf_value, found_inf_bdim);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _fused_sgd__tensor_lr_generated_plumbing(at::TensorList self, at::TensorList grads, at::TensorList momentum_buffer_list, double weight_decay, double momentum, const at::Tensor & lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const c10::optional<at::Tensor> & grad_scale, const c10::optional<at::Tensor> & found_inf) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grads, cur_level) && !isBatchedAtLevel(momentum_buffer_list, cur_level) && !isBatchedAtLevel(lr, cur_level) && !isBatchedAtLevel(grad_scale, cur_level) && !isBatchedAtLevel(found_inf, cur_level)) {
|
|
return at::_ops::_fused_sgd__tensor_lr::call(self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale, found_inf);
|
|
}
|
|
Tensor lr_value;
|
|
optional<int64_t> lr_bdim;
|
|
std::tie(lr_value, lr_bdim) = unwrapTensorAtLevel(lr, cur_level);
|
|
optional<Tensor> grad_scale_value;
|
|
optional<int64_t> grad_scale_bdim;
|
|
if (grad_scale) {
|
|
std::tie(grad_scale_value, grad_scale_bdim) = unwrapTensorAtLevel(grad_scale.value(), cur_level);
|
|
}
|
|
optional<Tensor> found_inf_value;
|
|
optional<int64_t> found_inf_bdim;
|
|
if (found_inf) {
|
|
std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf.value(), cur_level);
|
|
}
|
|
batch_rule(self, grads, momentum_buffer_list, weight_decay, momentum, lr_value, lr_bdim, dampening, nesterov, maximize, is_first_step, grad_scale_value, grad_scale_bdim, found_inf_value, found_inf_bdim);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _propagate_xla_data_generated_plumbing(const at::Tensor & input, const at::Tensor & output) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(output, cur_level)) {
|
|
return at::_ops::_propagate_xla_data::call(input, output);
|
|
}
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
Tensor output_value;
|
|
optional<int64_t> output_bdim;
|
|
std::tie(output_value, output_bdim) = unwrapTensorAtLevel(output, cur_level);
|
|
batch_rule(input_value, input_bdim, output_value, output_bdim);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _cudnn_rnn_backward_out_generated_plumbing(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional<at::Tensor> & cx, const at::Tensor & output, const c10::optional<at::Tensor> & grad_output, const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, int64_t mode, c10::SymInt hidden_size, c10::SymInt proj_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, c10::SymIntArrayRef batch_sizes, const c10::optional<at::Tensor> & dropout_state, const at::Tensor & reserve, ::std::array<bool,4> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::TensorList out3) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(weight_buf, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(cx, cur_level) && !isBatchedAtLevel(output, cur_level) && !isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(grad_hy, cur_level) && !isBatchedAtLevel(grad_cy, cur_level) && !isBatchedAtLevel(dropout_state, cur_level) && !isBatchedAtLevel(reserve, cur_level) && !isBatchedAtLevel(out0, cur_level) && !isBatchedAtLevel(out1, cur_level) && !isBatchedAtLevel(out2, cur_level) && !isBatchedAtLevel(out3, cur_level)) {
|
|
return at::_ops::_cudnn_rnn_backward_out::call(input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, reserve, output_mask, out0, out1, out2, out3);
|
|
}
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
Tensor weight_buf_value;
|
|
optional<int64_t> weight_buf_bdim;
|
|
std::tie(weight_buf_value, weight_buf_bdim) = unwrapTensorAtLevel(weight_buf, cur_level);
|
|
Tensor hx_value;
|
|
optional<int64_t> hx_bdim;
|
|
std::tie(hx_value, hx_bdim) = unwrapTensorAtLevel(hx, cur_level);
|
|
Tensor output_value;
|
|
optional<int64_t> output_bdim;
|
|
std::tie(output_value, output_bdim) = unwrapTensorAtLevel(output, cur_level);
|
|
Tensor reserve_value;
|
|
optional<int64_t> reserve_bdim;
|
|
std::tie(reserve_value, reserve_bdim) = unwrapTensorAtLevel(reserve, cur_level);
|
|
Tensor out0_value;
|
|
optional<int64_t> out0_bdim;
|
|
std::tie(out0_value, out0_bdim) = unwrapTensorAtLevel(out0, cur_level);
|
|
Tensor out1_value;
|
|
optional<int64_t> out1_bdim;
|
|
std::tie(out1_value, out1_bdim) = unwrapTensorAtLevel(out1, cur_level);
|
|
Tensor out2_value;
|
|
optional<int64_t> out2_bdim;
|
|
std::tie(out2_value, out2_bdim) = unwrapTensorAtLevel(out2, cur_level);
|
|
optional<Tensor> cx_value;
|
|
optional<int64_t> cx_bdim;
|
|
if (cx) {
|
|
std::tie(cx_value, cx_bdim) = unwrapTensorAtLevel(cx.value(), cur_level);
|
|
}
|
|
optional<Tensor> grad_output_value;
|
|
optional<int64_t> grad_output_bdim;
|
|
if (grad_output) {
|
|
std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output.value(), cur_level);
|
|
}
|
|
optional<Tensor> grad_hy_value;
|
|
optional<int64_t> grad_hy_bdim;
|
|
if (grad_hy) {
|
|
std::tie(grad_hy_value, grad_hy_bdim) = unwrapTensorAtLevel(grad_hy.value(), cur_level);
|
|
}
|
|
optional<Tensor> grad_cy_value;
|
|
optional<int64_t> grad_cy_bdim;
|
|
if (grad_cy) {
|
|
std::tie(grad_cy_value, grad_cy_bdim) = unwrapTensorAtLevel(grad_cy.value(), cur_level);
|
|
}
|
|
optional<Tensor> dropout_state_value;
|
|
optional<int64_t> dropout_state_bdim;
|
|
if (dropout_state) {
|
|
std::tie(dropout_state_value, dropout_state_bdim) = unwrapTensorAtLevel(dropout_state.value(), cur_level);
|
|
}
|
|
batch_rule(input_value, input_bdim, weight, weight_stride0, weight_buf_value, weight_buf_bdim, hx_value, hx_bdim, cx_value, cx_bdim, output_value, output_bdim, grad_output_value, grad_output_bdim, grad_hy_value, grad_hy_bdim, grad_cy_value, grad_cy_bdim, mode, hidden_size, proj_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state_value, dropout_state_bdim, reserve_value, reserve_bdim, output_mask, out0_value, out0_bdim, out1_value, out1_bdim, out2_value, out2_bdim, out3);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor bernoulli_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & p, c10::optional<at::Generator> generator) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(p, cur_level)) {
|
|
return at::_ops::bernoulli_Tensor::call(self, p, generator);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor p_value;
|
|
optional<int64_t> p_bdim;
|
|
std::tie(p_value, p_bdim) = unwrapTensorAtLevel(p, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, p_value, p_bdim, generator);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor embedding_renorm_generated_plumbing(const at::Tensor & self, const at::Tensor & indices, double max_norm, double norm_type) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level)) {
|
|
return at::_ops::embedding_renorm::call(self, indices, max_norm, norm_type);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor indices_value;
|
|
optional<int64_t> indices_bdim;
|
|
std::tie(indices_value, indices_bdim) = unwrapTensorAtLevel(indices, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, indices_value, indices_bdim, max_norm, norm_type);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor resize_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size, c10::optional<at::MemoryFormat> memory_format) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::resize::call(self, size, memory_format);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, size, memory_format);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _resize_output_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef size, at::Device device) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_resize_output::call(self, size, device);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, size, device);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _index_put_impl_generated_plumbing(const at::Tensor & self, const c10::List<c10::optional<at::Tensor>> & indices, const at::Tensor & values, bool accumulate, bool unsafe) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(indices, cur_level) && !isBatchedAtLevel(values, cur_level)) {
|
|
return at::_ops::_index_put_impl::call(self, indices, values, accumulate, unsafe);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor values_value;
|
|
optional<int64_t> values_bdim;
|
|
std::tie(values_value, values_bdim) = unwrapTensorAtLevel(values, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, indices, values_value, values_bdim, accumulate, unsafe);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void miopen_rnn_backward_out_generated_plumbing(const at::Tensor & input, at::TensorList weight, int64_t weight_stride0, const at::Tensor & weight_buf, const at::Tensor & hx, const c10::optional<at::Tensor> & cx, const at::Tensor & output, const c10::optional<at::Tensor> & grad_output, const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, int64_t mode, int64_t hidden_size, int64_t num_layers, bool batch_first, double dropout, bool train, bool bidirectional, at::IntArrayRef batch_sizes, const c10::optional<at::Tensor> & dropout_state, const at::Tensor & reserve, ::std::array<bool,4> output_mask, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::TensorList out3) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(weight_buf, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(cx, cur_level) && !isBatchedAtLevel(output, cur_level) && !isBatchedAtLevel(grad_output, cur_level) && !isBatchedAtLevel(grad_hy, cur_level) && !isBatchedAtLevel(grad_cy, cur_level) && !isBatchedAtLevel(dropout_state, cur_level) && !isBatchedAtLevel(reserve, cur_level) && !isBatchedAtLevel(out0, cur_level) && !isBatchedAtLevel(out1, cur_level) && !isBatchedAtLevel(out2, cur_level) && !isBatchedAtLevel(out3, cur_level)) {
|
|
return at::_ops::miopen_rnn_backward_out::call(input, weight, weight_stride0, weight_buf, hx, cx, output, grad_output, grad_hy, grad_cy, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state, reserve, output_mask, out0, out1, out2, out3);
|
|
}
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
Tensor weight_buf_value;
|
|
optional<int64_t> weight_buf_bdim;
|
|
std::tie(weight_buf_value, weight_buf_bdim) = unwrapTensorAtLevel(weight_buf, cur_level);
|
|
Tensor hx_value;
|
|
optional<int64_t> hx_bdim;
|
|
std::tie(hx_value, hx_bdim) = unwrapTensorAtLevel(hx, cur_level);
|
|
Tensor output_value;
|
|
optional<int64_t> output_bdim;
|
|
std::tie(output_value, output_bdim) = unwrapTensorAtLevel(output, cur_level);
|
|
Tensor reserve_value;
|
|
optional<int64_t> reserve_bdim;
|
|
std::tie(reserve_value, reserve_bdim) = unwrapTensorAtLevel(reserve, cur_level);
|
|
Tensor out0_value;
|
|
optional<int64_t> out0_bdim;
|
|
std::tie(out0_value, out0_bdim) = unwrapTensorAtLevel(out0, cur_level);
|
|
Tensor out1_value;
|
|
optional<int64_t> out1_bdim;
|
|
std::tie(out1_value, out1_bdim) = unwrapTensorAtLevel(out1, cur_level);
|
|
Tensor out2_value;
|
|
optional<int64_t> out2_bdim;
|
|
std::tie(out2_value, out2_bdim) = unwrapTensorAtLevel(out2, cur_level);
|
|
optional<Tensor> cx_value;
|
|
optional<int64_t> cx_bdim;
|
|
if (cx) {
|
|
std::tie(cx_value, cx_bdim) = unwrapTensorAtLevel(cx.value(), cur_level);
|
|
}
|
|
optional<Tensor> grad_output_value;
|
|
optional<int64_t> grad_output_bdim;
|
|
if (grad_output) {
|
|
std::tie(grad_output_value, grad_output_bdim) = unwrapTensorAtLevel(grad_output.value(), cur_level);
|
|
}
|
|
optional<Tensor> grad_hy_value;
|
|
optional<int64_t> grad_hy_bdim;
|
|
if (grad_hy) {
|
|
std::tie(grad_hy_value, grad_hy_bdim) = unwrapTensorAtLevel(grad_hy.value(), cur_level);
|
|
}
|
|
optional<Tensor> grad_cy_value;
|
|
optional<int64_t> grad_cy_bdim;
|
|
if (grad_cy) {
|
|
std::tie(grad_cy_value, grad_cy_bdim) = unwrapTensorAtLevel(grad_cy.value(), cur_level);
|
|
}
|
|
optional<Tensor> dropout_state_value;
|
|
optional<int64_t> dropout_state_bdim;
|
|
if (dropout_state) {
|
|
std::tie(dropout_state_value, dropout_state_bdim) = unwrapTensorAtLevel(dropout_state.value(), cur_level);
|
|
}
|
|
batch_rule(input_value, input_bdim, weight, weight_stride0, weight_buf_value, weight_buf_bdim, hx_value, hx_bdim, cx_value, cx_bdim, output_value, output_bdim, grad_output_value, grad_output_bdim, grad_hy_value, grad_hy_bdim, grad_cy_value, grad_cy_bdim, mode, hidden_size, num_layers, batch_first, dropout, train, bidirectional, batch_sizes, dropout_state_value, dropout_state_bdim, reserve_value, reserve_bdim, output_mask, out0_value, out0_bdim, out1_value, out1_bdim, out2_value, out2_bdim, out3);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _native_batch_norm_legit_functional_generated_plumbing(const at::Tensor & input, const c10::optional<at::Tensor> & weight, const c10::optional<at::Tensor> & bias, const at::Tensor & running_mean, const at::Tensor & running_var, bool training, double momentum, double eps) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(bias, cur_level) && !isBatchedAtLevel(running_mean, cur_level) && !isBatchedAtLevel(running_var, cur_level)) {
|
|
return at::_ops::_native_batch_norm_legit_functional::call(input, weight, bias, running_mean, running_var, training, momentum, eps);
|
|
}
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
Tensor running_mean_value;
|
|
optional<int64_t> running_mean_bdim;
|
|
std::tie(running_mean_value, running_mean_bdim) = unwrapTensorAtLevel(running_mean, cur_level);
|
|
Tensor running_var_value;
|
|
optional<int64_t> running_var_bdim;
|
|
std::tie(running_var_value, running_var_bdim) = unwrapTensorAtLevel(running_var, cur_level);
|
|
optional<Tensor> weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
if (weight) {
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
|
|
}
|
|
optional<Tensor> bias_value;
|
|
optional<int64_t> bias_bdim;
|
|
if (bias) {
|
|
std::tie(bias_value, bias_bdim) = unwrapTensorAtLevel(bias.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(input_value, input_bdim, weight_value, weight_bdim, bias_value, bias_bdim, running_mean_value, running_mean_bdim, running_var_value, running_var_bdim, training, momentum, eps);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level), makeBatched(std::get<8>(results), std::get<9>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void unsafe_split_Tensor_out_generated_plumbing(const at::Tensor & self, c10::SymInt split_size, int64_t dim, at::TensorList out) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
|
|
return at::_ops::unsafe_split_Tensor_out::call(self, split_size, dim, out);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim, split_size, dim, out);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void unsafe_split_with_sizes_out_generated_plumbing(const at::Tensor & self, c10::SymIntArrayRef split_sizes, int64_t dim, at::TensorList out) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
|
|
return at::_ops::unsafe_split_with_sizes_out::call(self, split_sizes, dim, out);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
batch_rule(self_value, self_bdim, split_sizes, dim, out);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor resize_as_generated_plumbing(const at::Tensor & self, const at::Tensor & the_template, c10::optional<at::MemoryFormat> memory_format) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(the_template, cur_level)) {
|
|
return at::_ops::resize_as::call(self, the_template, memory_format);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor the_template_value;
|
|
optional<int64_t> the_template_bdim;
|
|
std::tie(the_template_value, the_template_bdim) = unwrapTensorAtLevel(the_template, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, the_template_value, the_template_bdim, memory_format);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor resize_as_sparse_generated_plumbing(const at::Tensor & self, const at::Tensor & the_template) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(the_template, cur_level)) {
|
|
return at::_ops::resize_as_sparse::call(self, the_template);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor the_template_value;
|
|
optional<int64_t> the_template_bdim;
|
|
std::tie(the_template_value, the_template_bdim) = unwrapTensorAtLevel(the_template, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, the_template_value, the_template_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor zero_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::zero::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor sparse_resize_generated_plumbing(const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::sparse_resize::call(self, size, sparse_dim, dense_dim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, size, sparse_dim, dense_dim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor sparse_resize_and_clear_generated_plumbing(const at::Tensor & self, at::IntArrayRef size, int64_t sparse_dim, int64_t dense_dim) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::sparse_resize_and_clear::call(self, size, sparse_dim, dense_dim);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, size, sparse_dim, dense_dim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor _coalesced_generated_plumbing(const at::Tensor & self, bool coalesced) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_coalesced::call(self, coalesced);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, coalesced);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor copy_sparse_to_sparse_generated_plumbing(const at::Tensor & self, const at::Tensor & src, bool non_blocking) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(src, cur_level)) {
|
|
return at::_ops::copy_sparse_to_sparse::call(self, src, non_blocking);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor src_value;
|
|
optional<int64_t> src_bdim;
|
|
std::tie(src_value, src_bdim) = unwrapTensorAtLevel(src, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, src_value, src_bdim, non_blocking);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void quantize_per_tensor_tensors_out_generated_plumbing(at::TensorList tensors, const at::Tensor & scales, const at::Tensor & zero_points, at::ScalarType dtype, at::TensorList out) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(tensors, cur_level) && !isBatchedAtLevel(scales, cur_level) && !isBatchedAtLevel(zero_points, cur_level) && !isBatchedAtLevel(out, cur_level)) {
|
|
return at::_ops::quantize_per_tensor_tensors_out::call(tensors, scales, zero_points, dtype, out);
|
|
}
|
|
Tensor scales_value;
|
|
optional<int64_t> scales_bdim;
|
|
std::tie(scales_value, scales_bdim) = unwrapTensorAtLevel(scales, cur_level);
|
|
Tensor zero_points_value;
|
|
optional<int64_t> zero_points_bdim;
|
|
std::tie(zero_points_value, zero_points_bdim) = unwrapTensorAtLevel(zero_points, cur_level);
|
|
batch_rule(tensors, scales_value, scales_bdim, zero_points_value, zero_points_bdim, dtype, out);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void dequantize_tensors_out_generated_plumbing(at::TensorList tensors, at::TensorList out) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(tensors, cur_level) && !isBatchedAtLevel(out, cur_level)) {
|
|
return at::_ops::dequantize_tensors_out::call(tensors, out);
|
|
}
|
|
|
|
batch_rule(tensors, out);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor,at::Tensor> _fused_moving_avg_obs_fq_helper_functional_generated_plumbing(const at::Tensor & self, const at::Tensor & observer_on, const at::Tensor & fake_quant_on, const at::Tensor & running_min, const at::Tensor & running_max, const at::Tensor & scale, const at::Tensor & zero_point, double averaging_const, int64_t quant_min, int64_t quant_max, int64_t ch_axis, bool per_row_fake_quant, bool symmetric_quant) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(observer_on, cur_level) && !isBatchedAtLevel(fake_quant_on, cur_level) && !isBatchedAtLevel(running_min, cur_level) && !isBatchedAtLevel(running_max, cur_level) && !isBatchedAtLevel(scale, cur_level) && !isBatchedAtLevel(zero_point, cur_level)) {
|
|
return at::_ops::_fused_moving_avg_obs_fq_helper_functional::call(self, observer_on, fake_quant_on, running_min, running_max, scale, zero_point, averaging_const, quant_min, quant_max, ch_axis, per_row_fake_quant, symmetric_quant);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor observer_on_value;
|
|
optional<int64_t> observer_on_bdim;
|
|
std::tie(observer_on_value, observer_on_bdim) = unwrapTensorAtLevel(observer_on, cur_level);
|
|
Tensor fake_quant_on_value;
|
|
optional<int64_t> fake_quant_on_bdim;
|
|
std::tie(fake_quant_on_value, fake_quant_on_bdim) = unwrapTensorAtLevel(fake_quant_on, cur_level);
|
|
Tensor running_min_value;
|
|
optional<int64_t> running_min_bdim;
|
|
std::tie(running_min_value, running_min_bdim) = unwrapTensorAtLevel(running_min, cur_level);
|
|
Tensor running_max_value;
|
|
optional<int64_t> running_max_bdim;
|
|
std::tie(running_max_value, running_max_bdim) = unwrapTensorAtLevel(running_max, cur_level);
|
|
Tensor scale_value;
|
|
optional<int64_t> scale_bdim;
|
|
std::tie(scale_value, scale_bdim) = unwrapTensorAtLevel(scale, cur_level);
|
|
Tensor zero_point_value;
|
|
optional<int64_t> zero_point_bdim;
|
|
std::tie(zero_point_value, zero_point_bdim) = unwrapTensorAtLevel(zero_point, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, observer_on_value, observer_on_bdim, fake_quant_on_value, fake_quant_on_bdim, running_min_value, running_min_bdim, running_max_value, running_max_bdim, scale_value, scale_bdim, zero_point_value, zero_point_bdim, averaging_const, quant_min, quant_max, ch_axis, per_row_fake_quant, symmetric_quant);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level), makeBatched(std::get<4>(results), std::get<5>(results), cur_level), makeBatched(std::get<6>(results), std::get<7>(results), cur_level), makeBatched(std::get<8>(results), std::get<9>(results), cur_level), makeBatched(std::get<10>(results), std::get<11>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void lstm_mps_backward_out_generated_plumbing(const c10::optional<at::Tensor> & grad_y, const c10::optional<at::Tensor> & grad_hy, const c10::optional<at::Tensor> & grad_cy, const at::Tensor & z_state, const at::Tensor & cell_state_fwd, const at::Tensor & input, const at::Tensor & layersOutputs, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first, at::Tensor & out0, at::TensorList out1, at::TensorList out2) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(grad_y, cur_level) && !isBatchedAtLevel(grad_hy, cur_level) && !isBatchedAtLevel(grad_cy, cur_level) && !isBatchedAtLevel(z_state, cur_level) && !isBatchedAtLevel(cell_state_fwd, cur_level) && !isBatchedAtLevel(input, cur_level) && !isBatchedAtLevel(layersOutputs, cur_level) && !isBatchedAtLevel(hx, cur_level) && !isBatchedAtLevel(params, cur_level) && !isBatchedAtLevel(out0, cur_level) && !isBatchedAtLevel(out1, cur_level) && !isBatchedAtLevel(out2, cur_level)) {
|
|
return at::_ops::lstm_mps_backward_out::call(grad_y, grad_hy, grad_cy, z_state, cell_state_fwd, input, layersOutputs, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first, out0, out1, out2);
|
|
}
|
|
Tensor z_state_value;
|
|
optional<int64_t> z_state_bdim;
|
|
std::tie(z_state_value, z_state_bdim) = unwrapTensorAtLevel(z_state, cur_level);
|
|
Tensor cell_state_fwd_value;
|
|
optional<int64_t> cell_state_fwd_bdim;
|
|
std::tie(cell_state_fwd_value, cell_state_fwd_bdim) = unwrapTensorAtLevel(cell_state_fwd, cur_level);
|
|
Tensor input_value;
|
|
optional<int64_t> input_bdim;
|
|
std::tie(input_value, input_bdim) = unwrapTensorAtLevel(input, cur_level);
|
|
Tensor layersOutputs_value;
|
|
optional<int64_t> layersOutputs_bdim;
|
|
std::tie(layersOutputs_value, layersOutputs_bdim) = unwrapTensorAtLevel(layersOutputs, cur_level);
|
|
Tensor out0_value;
|
|
optional<int64_t> out0_bdim;
|
|
std::tie(out0_value, out0_bdim) = unwrapTensorAtLevel(out0, cur_level);
|
|
optional<Tensor> grad_y_value;
|
|
optional<int64_t> grad_y_bdim;
|
|
if (grad_y) {
|
|
std::tie(grad_y_value, grad_y_bdim) = unwrapTensorAtLevel(grad_y.value(), cur_level);
|
|
}
|
|
optional<Tensor> grad_hy_value;
|
|
optional<int64_t> grad_hy_bdim;
|
|
if (grad_hy) {
|
|
std::tie(grad_hy_value, grad_hy_bdim) = unwrapTensorAtLevel(grad_hy.value(), cur_level);
|
|
}
|
|
optional<Tensor> grad_cy_value;
|
|
optional<int64_t> grad_cy_bdim;
|
|
if (grad_cy) {
|
|
std::tie(grad_cy_value, grad_cy_bdim) = unwrapTensorAtLevel(grad_cy.value(), cur_level);
|
|
}
|
|
batch_rule(grad_y_value, grad_y_bdim, grad_hy_value, grad_hy_bdim, grad_cy_value, grad_cy_bdim, z_state_value, z_state_bdim, cell_state_fwd_value, cell_state_fwd_bdim, input_value, input_bdim, layersOutputs_value, layersOutputs_bdim, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first, out0_value, out0_bdim, out1, out2);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor set_source_Storage_generated_plumbing(const at::Tensor & self, at::Storage source) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::set_source_Storage::call(self, source);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, source);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor set_source_Storage_storage_offset_generated_plumbing(const at::Tensor & self, at::Storage source, c10::SymInt storage_offset, c10::SymIntArrayRef size, c10::SymIntArrayRef stride) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::set_source_Storage_storage_offset::call(self, source, storage_offset, size, stride);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, source, storage_offset, size, stride);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor set_source_Tensor_generated_plumbing(const at::Tensor & self, const at::Tensor & source) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(source, cur_level)) {
|
|
return at::_ops::set_source_Tensor::call(self, source);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor source_value;
|
|
optional<int64_t> source_bdim;
|
|
std::tie(source_value, source_bdim) = unwrapTensorAtLevel(source, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, source_value, source_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor set_generated_plumbing(const at::Tensor & self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::set::call(self);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor random_from_generated_plumbing(const at::Tensor & self, int64_t from, c10::optional<int64_t> to, c10::optional<at::Generator> generator) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::random_from::call(self, from, to, generator);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, from, to, generator);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor random_to_generated_plumbing(const at::Tensor & self, int64_t to, c10::optional<at::Generator> generator) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::random_to::call(self, to, generator);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, to, generator);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor random_generated_plumbing(const at::Tensor & self, c10::optional<at::Generator> generator) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::random::call(self, generator);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, generator);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor uniform_generated_plumbing(const at::Tensor & self, double from, double to, c10::optional<at::Generator> generator) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::uniform::call(self, from, to, generator);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, from, to, generator);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor cauchy_generated_plumbing(const at::Tensor & self, double median, double sigma, c10::optional<at::Generator> generator) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::cauchy::call(self, median, sigma, generator);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, median, sigma, generator);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor log_normal_generated_plumbing(const at::Tensor & self, double mean, double std, c10::optional<at::Generator> generator) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::log_normal::call(self, mean, std, generator);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, mean, std, generator);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor exponential_generated_plumbing(const at::Tensor & self, double lambd, c10::optional<at::Generator> generator) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::exponential::call(self, lambd, generator);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, lambd, generator);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
at::Tensor geometric_generated_plumbing(const at::Tensor & self, double p, c10::optional<at::Generator> generator) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::geometric::call(self, p, generator);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, p, generator);
|
|
return makeBatched(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _histogramdd_bin_edges_out_generated_plumbing(const at::Tensor & self, at::IntArrayRef bins, c10::optional<at::ArrayRef<double>> range, const c10::optional<at::Tensor> & weight, bool density, at::TensorList out) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(weight, cur_level) && !isBatchedAtLevel(out, cur_level)) {
|
|
return at::_ops::_histogramdd_bin_edges_out::call(self, bins, range, weight, density, out);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
optional<Tensor> weight_value;
|
|
optional<int64_t> weight_bdim;
|
|
if (weight) {
|
|
std::tie(weight_value, weight_bdim) = unwrapTensorAtLevel(weight.value(), cur_level);
|
|
}
|
|
batch_rule(self_value, self_bdim, bins, range, weight_value, weight_bdim, density, out);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _amp_foreach_non_finite_check_and_unscale_out_generated_plumbing(at::TensorList self, at::Tensor & found_inf, const at::Tensor & inv_scale, at::TensorList out) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(found_inf, cur_level) && !isBatchedAtLevel(inv_scale, cur_level) && !isBatchedAtLevel(out, cur_level)) {
|
|
return at::_ops::_amp_foreach_non_finite_check_and_unscale_out::call(self, found_inf, inv_scale, out);
|
|
}
|
|
Tensor found_inf_value;
|
|
optional<int64_t> found_inf_bdim;
|
|
std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf, cur_level);
|
|
Tensor inv_scale_value;
|
|
optional<int64_t> inv_scale_bdim;
|
|
std::tie(inv_scale_value, inv_scale_bdim) = unwrapTensorAtLevel(inv_scale, cur_level);
|
|
batch_rule(self, found_inf_value, found_inf_bdim, inv_scale_value, inv_scale_bdim, out);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<::std::vector<at::Tensor>,at::Tensor> _amp_foreach_non_finite_check_and_unscale_generated_plumbing(at::TensorList self, const at::Tensor & found_inf, const at::Tensor & inv_scale) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(found_inf, cur_level) && !isBatchedAtLevel(inv_scale, cur_level)) {
|
|
return at::_ops::_amp_foreach_non_finite_check_and_unscale::call(self, found_inf, inv_scale);
|
|
}
|
|
Tensor found_inf_value;
|
|
optional<int64_t> found_inf_bdim;
|
|
std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf, cur_level);
|
|
Tensor inv_scale_value;
|
|
optional<int64_t> inv_scale_bdim;
|
|
std::tie(inv_scale_value, inv_scale_bdim) = unwrapTensorAtLevel(inv_scale, cur_level);
|
|
auto results = batch_rule(self, found_inf_value, found_inf_bdim, inv_scale_value, inv_scale_bdim);
|
|
return std::make_tuple(makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<at::Tensor,at::Tensor> _amp_update_scale_generated_plumbing(const at::Tensor & self, const at::Tensor & growth_tracker, const at::Tensor & found_inf, double scale_growth_factor, double scale_backoff_factor, int64_t growth_interval) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(growth_tracker, cur_level) && !isBatchedAtLevel(found_inf, cur_level)) {
|
|
return at::_ops::_amp_update_scale::call(self, growth_tracker, found_inf, scale_growth_factor, scale_backoff_factor, growth_interval);
|
|
}
|
|
Tensor self_value;
|
|
optional<int64_t> self_bdim;
|
|
std::tie(self_value, self_bdim) = unwrapTensorAtLevel(self, cur_level);
|
|
Tensor growth_tracker_value;
|
|
optional<int64_t> growth_tracker_bdim;
|
|
std::tie(growth_tracker_value, growth_tracker_bdim) = unwrapTensorAtLevel(growth_tracker, cur_level);
|
|
Tensor found_inf_value;
|
|
optional<int64_t> found_inf_bdim;
|
|
std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf, cur_level);
|
|
auto results = batch_rule(self_value, self_bdim, growth_tracker_value, growth_tracker_bdim, found_inf_value, found_inf_bdim, scale_growth_factor, scale_backoff_factor, growth_interval);
|
|
return std::make_tuple(makeBatched(std::get<0>(results), std::get<1>(results), cur_level), makeBatched(std::get<2>(results), std::get<3>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_add_Scalar_out_generated_plumbing(at::TensorList self, const at::Scalar & scalar, at::TensorList out) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
|
|
return at::_ops::_foreach_add_Scalar_out::call(self, scalar, out);
|
|
}
|
|
|
|
batch_rule(self, scalar, out);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_add_List_out_generated_plumbing(at::TensorList self, at::TensorList other, const at::Scalar & alpha, at::TensorList out) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level) && !isBatchedAtLevel(out, cur_level)) {
|
|
return at::_ops::_foreach_add_List_out::call(self, other, alpha, out);
|
|
}
|
|
|
|
batch_rule(self, other, alpha, out);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_add_ScalarList_out_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
|
|
return at::_ops::_foreach_add_ScalarList_out::call(self, scalars, out);
|
|
}
|
|
|
|
batch_rule(self, scalars, out);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_add_Tensor_out_generated_plumbing(at::TensorList self, const at::Tensor & other, const at::Scalar & alpha, at::TensorList out) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level) && !isBatchedAtLevel(out, cur_level)) {
|
|
return at::_ops::_foreach_add_Tensor_out::call(self, other, alpha, out);
|
|
}
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
batch_rule(self, other_value, other_bdim, alpha, out);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_sub_Scalar_out_generated_plumbing(at::TensorList self, const at::Scalar & scalar, at::TensorList out) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
|
|
return at::_ops::_foreach_sub_Scalar_out::call(self, scalar, out);
|
|
}
|
|
|
|
batch_rule(self, scalar, out);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_sub_List_out_generated_plumbing(at::TensorList self, at::TensorList other, const at::Scalar & alpha, at::TensorList out) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level) && !isBatchedAtLevel(out, cur_level)) {
|
|
return at::_ops::_foreach_sub_List_out::call(self, other, alpha, out);
|
|
}
|
|
|
|
batch_rule(self, other, alpha, out);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_sub_ScalarList_out_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
|
|
return at::_ops::_foreach_sub_ScalarList_out::call(self, scalars, out);
|
|
}
|
|
|
|
batch_rule(self, scalars, out);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_mul_Scalar_out_generated_plumbing(at::TensorList self, const at::Scalar & scalar, at::TensorList out) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
|
|
return at::_ops::_foreach_mul_Scalar_out::call(self, scalar, out);
|
|
}
|
|
|
|
batch_rule(self, scalar, out);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_mul_List_out_generated_plumbing(at::TensorList self, at::TensorList other, at::TensorList out) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level) && !isBatchedAtLevel(out, cur_level)) {
|
|
return at::_ops::_foreach_mul_List_out::call(self, other, out);
|
|
}
|
|
|
|
batch_rule(self, other, out);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_mul_ScalarList_out_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
|
|
return at::_ops::_foreach_mul_ScalarList_out::call(self, scalars, out);
|
|
}
|
|
|
|
batch_rule(self, scalars, out);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_mul_Tensor_out_generated_plumbing(at::TensorList self, const at::Tensor & other, at::TensorList out) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level) && !isBatchedAtLevel(out, cur_level)) {
|
|
return at::_ops::_foreach_mul_Tensor_out::call(self, other, out);
|
|
}
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
batch_rule(self, other_value, other_bdim, out);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_div_Scalar_out_generated_plumbing(at::TensorList self, const at::Scalar & scalar, at::TensorList out) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
|
|
return at::_ops::_foreach_div_Scalar_out::call(self, scalar, out);
|
|
}
|
|
|
|
batch_rule(self, scalar, out);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_div_List_out_generated_plumbing(at::TensorList self, at::TensorList other, at::TensorList out) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level) && !isBatchedAtLevel(out, cur_level)) {
|
|
return at::_ops::_foreach_div_List_out::call(self, other, out);
|
|
}
|
|
|
|
batch_rule(self, other, out);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_div_ScalarList_out_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
|
|
return at::_ops::_foreach_div_ScalarList_out::call(self, scalars, out);
|
|
}
|
|
|
|
batch_rule(self, scalars, out);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_div_Tensor_out_generated_plumbing(at::TensorList self, const at::Tensor & other, at::TensorList out) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level) && !isBatchedAtLevel(out, cur_level)) {
|
|
return at::_ops::_foreach_div_Tensor_out::call(self, other, out);
|
|
}
|
|
Tensor other_value;
|
|
optional<int64_t> other_bdim;
|
|
std::tie(other_value, other_bdim) = unwrapTensorAtLevel(other, cur_level);
|
|
batch_rule(self, other_value, other_bdim, out);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_clamp_max_Scalar_out_generated_plumbing(at::TensorList self, const at::Scalar & scalar, at::TensorList out) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
|
|
return at::_ops::_foreach_clamp_max_Scalar_out::call(self, scalar, out);
|
|
}
|
|
|
|
batch_rule(self, scalar, out);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_clamp_max_List_out_generated_plumbing(at::TensorList self, at::TensorList other, at::TensorList out) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level) && !isBatchedAtLevel(out, cur_level)) {
|
|
return at::_ops::_foreach_clamp_max_List_out::call(self, other, out);
|
|
}
|
|
|
|
batch_rule(self, other, out);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_clamp_max_ScalarList_out_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
|
|
return at::_ops::_foreach_clamp_max_ScalarList_out::call(self, scalars, out);
|
|
}
|
|
|
|
batch_rule(self, scalars, out);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_clamp_min_Scalar_out_generated_plumbing(at::TensorList self, const at::Scalar & scalar, at::TensorList out) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
|
|
return at::_ops::_foreach_clamp_min_Scalar_out::call(self, scalar, out);
|
|
}
|
|
|
|
batch_rule(self, scalar, out);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_clamp_min_List_out_generated_plumbing(at::TensorList self, at::TensorList other, at::TensorList out) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level) && !isBatchedAtLevel(out, cur_level)) {
|
|
return at::_ops::_foreach_clamp_min_List_out::call(self, other, out);
|
|
}
|
|
|
|
batch_rule(self, other, out);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_clamp_min_ScalarList_out_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
|
|
return at::_ops::_foreach_clamp_min_ScalarList_out::call(self, scalars, out);
|
|
}
|
|
|
|
batch_rule(self, scalars, out);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_maximum_Scalar_out_generated_plumbing(at::TensorList self, const at::Scalar & scalar, at::TensorList out) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
|
|
return at::_ops::_foreach_maximum_Scalar_out::call(self, scalar, out);
|
|
}
|
|
|
|
batch_rule(self, scalar, out);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_maximum_List_out_generated_plumbing(at::TensorList self, at::TensorList other, at::TensorList out) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level) && !isBatchedAtLevel(out, cur_level)) {
|
|
return at::_ops::_foreach_maximum_List_out::call(self, other, out);
|
|
}
|
|
|
|
batch_rule(self, other, out);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_maximum_ScalarList_out_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
|
|
return at::_ops::_foreach_maximum_ScalarList_out::call(self, scalars, out);
|
|
}
|
|
|
|
batch_rule(self, scalars, out);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_minimum_Scalar_out_generated_plumbing(at::TensorList self, const at::Scalar & scalar, at::TensorList out) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
|
|
return at::_ops::_foreach_minimum_Scalar_out::call(self, scalar, out);
|
|
}
|
|
|
|
batch_rule(self, scalar, out);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_minimum_List_out_generated_plumbing(at::TensorList self, at::TensorList other, at::TensorList out) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(other, cur_level) && !isBatchedAtLevel(out, cur_level)) {
|
|
return at::_ops::_foreach_minimum_List_out::call(self, other, out);
|
|
}
|
|
|
|
batch_rule(self, other, out);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_minimum_ScalarList_out_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
|
|
return at::_ops::_foreach_minimum_ScalarList_out::call(self, scalars, out);
|
|
}
|
|
|
|
batch_rule(self, scalars, out);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_addcdiv_Scalar_out_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value, at::TensorList out) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level) && !isBatchedAtLevel(out, cur_level)) {
|
|
return at::_ops::_foreach_addcdiv_Scalar_out::call(self, tensor1, tensor2, value, out);
|
|
}
|
|
|
|
batch_rule(self, tensor1, tensor2, value, out);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_addcdiv_ScalarList_out_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level) && !isBatchedAtLevel(out, cur_level)) {
|
|
return at::_ops::_foreach_addcdiv_ScalarList_out::call(self, tensor1, tensor2, scalars, out);
|
|
}
|
|
|
|
batch_rule(self, tensor1, tensor2, scalars, out);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_addcdiv_Tensor_out_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars, at::TensorList out) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level) && !isBatchedAtLevel(scalars, cur_level) && !isBatchedAtLevel(out, cur_level)) {
|
|
return at::_ops::_foreach_addcdiv_Tensor_out::call(self, tensor1, tensor2, scalars, out);
|
|
}
|
|
Tensor scalars_value;
|
|
optional<int64_t> scalars_bdim;
|
|
std::tie(scalars_value, scalars_bdim) = unwrapTensorAtLevel(scalars, cur_level);
|
|
batch_rule(self, tensor1, tensor2, scalars_value, scalars_bdim, out);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_addcmul_Scalar_out_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Scalar & value, at::TensorList out) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level) && !isBatchedAtLevel(out, cur_level)) {
|
|
return at::_ops::_foreach_addcmul_Scalar_out::call(self, tensor1, tensor2, value, out);
|
|
}
|
|
|
|
batch_rule(self, tensor1, tensor2, value, out);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_addcmul_ScalarList_out_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, at::ArrayRef<at::Scalar> scalars, at::TensorList out) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level) && !isBatchedAtLevel(out, cur_level)) {
|
|
return at::_ops::_foreach_addcmul_ScalarList_out::call(self, tensor1, tensor2, scalars, out);
|
|
}
|
|
|
|
batch_rule(self, tensor1, tensor2, scalars, out);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_addcmul_Tensor_out_generated_plumbing(at::TensorList self, at::TensorList tensor1, at::TensorList tensor2, const at::Tensor & scalars, at::TensorList out) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensor1, cur_level) && !isBatchedAtLevel(tensor2, cur_level) && !isBatchedAtLevel(scalars, cur_level) && !isBatchedAtLevel(out, cur_level)) {
|
|
return at::_ops::_foreach_addcmul_Tensor_out::call(self, tensor1, tensor2, scalars, out);
|
|
}
|
|
Tensor scalars_value;
|
|
optional<int64_t> scalars_bdim;
|
|
std::tie(scalars_value, scalars_bdim) = unwrapTensorAtLevel(scalars, cur_level);
|
|
batch_rule(self, tensor1, tensor2, scalars_value, scalars_bdim, out);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_abs_out_generated_plumbing(at::TensorList self, at::TensorList out) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
|
|
return at::_ops::_foreach_abs_out::call(self, out);
|
|
}
|
|
|
|
batch_rule(self, out);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_acos_out_generated_plumbing(at::TensorList self, at::TensorList out) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
|
|
return at::_ops::_foreach_acos_out::call(self, out);
|
|
}
|
|
|
|
batch_rule(self, out);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_asin_out_generated_plumbing(at::TensorList self, at::TensorList out) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
|
|
return at::_ops::_foreach_asin_out::call(self, out);
|
|
}
|
|
|
|
batch_rule(self, out);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_atan_out_generated_plumbing(at::TensorList self, at::TensorList out) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
|
|
return at::_ops::_foreach_atan_out::call(self, out);
|
|
}
|
|
|
|
batch_rule(self, out);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_ceil_out_generated_plumbing(at::TensorList self, at::TensorList out) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
|
|
return at::_ops::_foreach_ceil_out::call(self, out);
|
|
}
|
|
|
|
batch_rule(self, out);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_cos_out_generated_plumbing(at::TensorList self, at::TensorList out) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
|
|
return at::_ops::_foreach_cos_out::call(self, out);
|
|
}
|
|
|
|
batch_rule(self, out);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_cosh_out_generated_plumbing(at::TensorList self, at::TensorList out) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
|
|
return at::_ops::_foreach_cosh_out::call(self, out);
|
|
}
|
|
|
|
batch_rule(self, out);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_erf_out_generated_plumbing(at::TensorList self, at::TensorList out) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
|
|
return at::_ops::_foreach_erf_out::call(self, out);
|
|
}
|
|
|
|
batch_rule(self, out);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_erfc_out_generated_plumbing(at::TensorList self, at::TensorList out) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
|
|
return at::_ops::_foreach_erfc_out::call(self, out);
|
|
}
|
|
|
|
batch_rule(self, out);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_exp_out_generated_plumbing(at::TensorList self, at::TensorList out) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
|
|
return at::_ops::_foreach_exp_out::call(self, out);
|
|
}
|
|
|
|
batch_rule(self, out);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_expm1_out_generated_plumbing(at::TensorList self, at::TensorList out) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
|
|
return at::_ops::_foreach_expm1_out::call(self, out);
|
|
}
|
|
|
|
batch_rule(self, out);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_floor_out_generated_plumbing(at::TensorList self, at::TensorList out) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
|
|
return at::_ops::_foreach_floor_out::call(self, out);
|
|
}
|
|
|
|
batch_rule(self, out);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_frac_out_generated_plumbing(at::TensorList self, at::TensorList out) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
|
|
return at::_ops::_foreach_frac_out::call(self, out);
|
|
}
|
|
|
|
batch_rule(self, out);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_lerp_List_out_generated_plumbing(at::TensorList self, at::TensorList tensors1, at::TensorList weights, at::TensorList out) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensors1, cur_level) && !isBatchedAtLevel(weights, cur_level) && !isBatchedAtLevel(out, cur_level)) {
|
|
return at::_ops::_foreach_lerp_List_out::call(self, tensors1, weights, out);
|
|
}
|
|
|
|
batch_rule(self, tensors1, weights, out);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_lerp_Scalar_out_generated_plumbing(at::TensorList self, at::TensorList tensors1, const at::Scalar & weight, at::TensorList out) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(tensors1, cur_level) && !isBatchedAtLevel(out, cur_level)) {
|
|
return at::_ops::_foreach_lerp_Scalar_out::call(self, tensors1, weight, out);
|
|
}
|
|
|
|
batch_rule(self, tensors1, weight, out);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_lgamma_out_generated_plumbing(at::TensorList self, at::TensorList out) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
|
|
return at::_ops::_foreach_lgamma_out::call(self, out);
|
|
}
|
|
|
|
batch_rule(self, out);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_log_out_generated_plumbing(at::TensorList self, at::TensorList out) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
|
|
return at::_ops::_foreach_log_out::call(self, out);
|
|
}
|
|
|
|
batch_rule(self, out);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_log10_out_generated_plumbing(at::TensorList self, at::TensorList out) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
|
|
return at::_ops::_foreach_log10_out::call(self, out);
|
|
}
|
|
|
|
batch_rule(self, out);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_log1p_out_generated_plumbing(at::TensorList self, at::TensorList out) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
|
|
return at::_ops::_foreach_log1p_out::call(self, out);
|
|
}
|
|
|
|
batch_rule(self, out);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_log2_out_generated_plumbing(at::TensorList self, at::TensorList out) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
|
|
return at::_ops::_foreach_log2_out::call(self, out);
|
|
}
|
|
|
|
batch_rule(self, out);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_neg_out_generated_plumbing(at::TensorList self, at::TensorList out) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
|
|
return at::_ops::_foreach_neg_out::call(self, out);
|
|
}
|
|
|
|
batch_rule(self, out);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_norm_Scalar_out_generated_plumbing(at::TensorList self, const at::Scalar & ord, at::TensorList out) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
|
|
return at::_ops::_foreach_norm_Scalar_out::call(self, ord, out);
|
|
}
|
|
|
|
batch_rule(self, ord, out);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_pow_List_out_generated_plumbing(at::TensorList self, at::TensorList exponent, at::TensorList out) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(exponent, cur_level) && !isBatchedAtLevel(out, cur_level)) {
|
|
return at::_ops::_foreach_pow_List_out::call(self, exponent, out);
|
|
}
|
|
|
|
batch_rule(self, exponent, out);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_pow_Scalar_out_generated_plumbing(at::TensorList self, const at::Scalar & exponent, at::TensorList out) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
|
|
return at::_ops::_foreach_pow_Scalar_out::call(self, exponent, out);
|
|
}
|
|
|
|
batch_rule(self, exponent, out);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_pow_ScalarList_out_generated_plumbing(at::TensorList self, at::ArrayRef<at::Scalar> exponent, at::TensorList out) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
|
|
return at::_ops::_foreach_pow_ScalarList_out::call(self, exponent, out);
|
|
}
|
|
|
|
batch_rule(self, exponent, out);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_reciprocal_out_generated_plumbing(at::TensorList self, at::TensorList out) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
|
|
return at::_ops::_foreach_reciprocal_out::call(self, out);
|
|
}
|
|
|
|
batch_rule(self, out);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_round_out_generated_plumbing(at::TensorList self, at::TensorList out) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
|
|
return at::_ops::_foreach_round_out::call(self, out);
|
|
}
|
|
|
|
batch_rule(self, out);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_sigmoid_out_generated_plumbing(at::TensorList self, at::TensorList out) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
|
|
return at::_ops::_foreach_sigmoid_out::call(self, out);
|
|
}
|
|
|
|
batch_rule(self, out);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_sign_out_generated_plumbing(at::TensorList self, at::TensorList out) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
|
|
return at::_ops::_foreach_sign_out::call(self, out);
|
|
}
|
|
|
|
batch_rule(self, out);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_sin_out_generated_plumbing(at::TensorList self, at::TensorList out) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
|
|
return at::_ops::_foreach_sin_out::call(self, out);
|
|
}
|
|
|
|
batch_rule(self, out);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_sinh_out_generated_plumbing(at::TensorList self, at::TensorList out) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
|
|
return at::_ops::_foreach_sinh_out::call(self, out);
|
|
}
|
|
|
|
batch_rule(self, out);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_sqrt_out_generated_plumbing(at::TensorList self, at::TensorList out) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
|
|
return at::_ops::_foreach_sqrt_out::call(self, out);
|
|
}
|
|
|
|
batch_rule(self, out);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_tan_out_generated_plumbing(at::TensorList self, at::TensorList out) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
|
|
return at::_ops::_foreach_tan_out::call(self, out);
|
|
}
|
|
|
|
batch_rule(self, out);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_tanh_out_generated_plumbing(at::TensorList self, at::TensorList out) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
|
|
return at::_ops::_foreach_tanh_out::call(self, out);
|
|
}
|
|
|
|
batch_rule(self, out);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_trunc_out_generated_plumbing(at::TensorList self, at::TensorList out) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
|
|
return at::_ops::_foreach_trunc_out::call(self, out);
|
|
}
|
|
|
|
batch_rule(self, out);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_zero_out_generated_plumbing(at::TensorList self, at::TensorList out) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(out, cur_level)) {
|
|
return at::_ops::_foreach_zero_out::call(self, out);
|
|
}
|
|
|
|
batch_rule(self, out);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> _foreach_zero_generated_plumbing(at::TensorList self) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level)) {
|
|
return at::_ops::_foreach_zero::call(self);
|
|
}
|
|
|
|
auto results = batch_rule(self);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _foreach_copy_out_generated_plumbing(at::TensorList self, at::TensorList src, bool non_blocking, at::TensorList out) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(src, cur_level) && !isBatchedAtLevel(out, cur_level)) {
|
|
return at::_ops::_foreach_copy_out::call(self, src, non_blocking, out);
|
|
}
|
|
|
|
batch_rule(self, src, non_blocking, out);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::vector<at::Tensor> _foreach_copy_generated_plumbing(at::TensorList self, at::TensorList src, bool non_blocking) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(src, cur_level)) {
|
|
return at::_ops::_foreach_copy::call(self, src, non_blocking);
|
|
}
|
|
|
|
auto results = batch_rule(self, src, non_blocking);
|
|
return makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _fused_adam_out_generated_plumbing(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional<at::Tensor> & grad_scale, const c10::optional<at::Tensor> & found_inf, at::TensorList out) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grads, cur_level) && !isBatchedAtLevel(exp_avgs, cur_level) && !isBatchedAtLevel(exp_avg_sqs, cur_level) && !isBatchedAtLevel(max_exp_avg_sqs, cur_level) && !isBatchedAtLevel(state_steps, cur_level) && !isBatchedAtLevel(grad_scale, cur_level) && !isBatchedAtLevel(found_inf, cur_level) && !isBatchedAtLevel(out, cur_level)) {
|
|
return at::_ops::_fused_adam_out::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf, out);
|
|
}
|
|
optional<Tensor> grad_scale_value;
|
|
optional<int64_t> grad_scale_bdim;
|
|
if (grad_scale) {
|
|
std::tie(grad_scale_value, grad_scale_bdim) = unwrapTensorAtLevel(grad_scale.value(), cur_level);
|
|
}
|
|
optional<Tensor> found_inf_value;
|
|
optional<int64_t> found_inf_bdim;
|
|
if (found_inf) {
|
|
std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf.value(), cur_level);
|
|
}
|
|
batch_rule(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_value, grad_scale_bdim, found_inf_value, found_inf_bdim, out);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>> _fused_adam_generated_plumbing(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional<at::Tensor> & grad_scale, const c10::optional<at::Tensor> & found_inf) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grads, cur_level) && !isBatchedAtLevel(exp_avgs, cur_level) && !isBatchedAtLevel(exp_avg_sqs, cur_level) && !isBatchedAtLevel(max_exp_avg_sqs, cur_level) && !isBatchedAtLevel(state_steps, cur_level) && !isBatchedAtLevel(grad_scale, cur_level) && !isBatchedAtLevel(found_inf, cur_level)) {
|
|
return at::_ops::_fused_adam::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf);
|
|
}
|
|
optional<Tensor> grad_scale_value;
|
|
optional<int64_t> grad_scale_bdim;
|
|
if (grad_scale) {
|
|
std::tie(grad_scale_value, grad_scale_bdim) = unwrapTensorAtLevel(grad_scale.value(), cur_level);
|
|
}
|
|
optional<Tensor> found_inf_value;
|
|
optional<int64_t> found_inf_bdim;
|
|
if (found_inf) {
|
|
std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_value, grad_scale_bdim, found_inf_value, found_inf_bdim);
|
|
return std::make_tuple(makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level), makeBatchedVector(std::get<2>(results), std::get<3>(results), cur_level), makeBatchedVector(std::get<4>(results), std::get<5>(results), cur_level), makeBatchedVector(std::get<6>(results), std::get<7>(results), cur_level), makeBatchedVector(std::get<8>(results), std::get<9>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _fused_adam_tensor_lr_out_generated_plumbing(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional<at::Tensor> & grad_scale, const c10::optional<at::Tensor> & found_inf, at::TensorList out) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grads, cur_level) && !isBatchedAtLevel(exp_avgs, cur_level) && !isBatchedAtLevel(exp_avg_sqs, cur_level) && !isBatchedAtLevel(max_exp_avg_sqs, cur_level) && !isBatchedAtLevel(state_steps, cur_level) && !isBatchedAtLevel(lr, cur_level) && !isBatchedAtLevel(grad_scale, cur_level) && !isBatchedAtLevel(found_inf, cur_level) && !isBatchedAtLevel(out, cur_level)) {
|
|
return at::_ops::_fused_adam_tensor_lr_out::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf, out);
|
|
}
|
|
Tensor lr_value;
|
|
optional<int64_t> lr_bdim;
|
|
std::tie(lr_value, lr_bdim) = unwrapTensorAtLevel(lr, cur_level);
|
|
optional<Tensor> grad_scale_value;
|
|
optional<int64_t> grad_scale_bdim;
|
|
if (grad_scale) {
|
|
std::tie(grad_scale_value, grad_scale_bdim) = unwrapTensorAtLevel(grad_scale.value(), cur_level);
|
|
}
|
|
optional<Tensor> found_inf_value;
|
|
optional<int64_t> found_inf_bdim;
|
|
if (found_inf) {
|
|
std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf.value(), cur_level);
|
|
}
|
|
batch_rule(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr_value, lr_bdim, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_value, grad_scale_bdim, found_inf_value, found_inf_bdim, out);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>> _fused_adam_tensor_lr_generated_plumbing(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional<at::Tensor> & grad_scale, const c10::optional<at::Tensor> & found_inf) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grads, cur_level) && !isBatchedAtLevel(exp_avgs, cur_level) && !isBatchedAtLevel(exp_avg_sqs, cur_level) && !isBatchedAtLevel(max_exp_avg_sqs, cur_level) && !isBatchedAtLevel(state_steps, cur_level) && !isBatchedAtLevel(lr, cur_level) && !isBatchedAtLevel(grad_scale, cur_level) && !isBatchedAtLevel(found_inf, cur_level)) {
|
|
return at::_ops::_fused_adam_tensor_lr::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf);
|
|
}
|
|
Tensor lr_value;
|
|
optional<int64_t> lr_bdim;
|
|
std::tie(lr_value, lr_bdim) = unwrapTensorAtLevel(lr, cur_level);
|
|
optional<Tensor> grad_scale_value;
|
|
optional<int64_t> grad_scale_bdim;
|
|
if (grad_scale) {
|
|
std::tie(grad_scale_value, grad_scale_bdim) = unwrapTensorAtLevel(grad_scale.value(), cur_level);
|
|
}
|
|
optional<Tensor> found_inf_value;
|
|
optional<int64_t> found_inf_bdim;
|
|
if (found_inf) {
|
|
std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr_value, lr_bdim, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_value, grad_scale_bdim, found_inf_value, found_inf_bdim);
|
|
return std::make_tuple(makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level), makeBatchedVector(std::get<2>(results), std::get<3>(results), cur_level), makeBatchedVector(std::get<4>(results), std::get<5>(results), cur_level), makeBatchedVector(std::get<6>(results), std::get<7>(results), cur_level), makeBatchedVector(std::get<8>(results), std::get<9>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _fused_adamw_out_generated_plumbing(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional<at::Tensor> & grad_scale, const c10::optional<at::Tensor> & found_inf, at::TensorList out) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grads, cur_level) && !isBatchedAtLevel(exp_avgs, cur_level) && !isBatchedAtLevel(exp_avg_sqs, cur_level) && !isBatchedAtLevel(max_exp_avg_sqs, cur_level) && !isBatchedAtLevel(state_steps, cur_level) && !isBatchedAtLevel(grad_scale, cur_level) && !isBatchedAtLevel(found_inf, cur_level) && !isBatchedAtLevel(out, cur_level)) {
|
|
return at::_ops::_fused_adamw_out::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf, out);
|
|
}
|
|
optional<Tensor> grad_scale_value;
|
|
optional<int64_t> grad_scale_bdim;
|
|
if (grad_scale) {
|
|
std::tie(grad_scale_value, grad_scale_bdim) = unwrapTensorAtLevel(grad_scale.value(), cur_level);
|
|
}
|
|
optional<Tensor> found_inf_value;
|
|
optional<int64_t> found_inf_bdim;
|
|
if (found_inf) {
|
|
std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf.value(), cur_level);
|
|
}
|
|
batch_rule(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_value, grad_scale_bdim, found_inf_value, found_inf_bdim, out);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>> _fused_adamw_generated_plumbing(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional<at::Tensor> & grad_scale, const c10::optional<at::Tensor> & found_inf) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grads, cur_level) && !isBatchedAtLevel(exp_avgs, cur_level) && !isBatchedAtLevel(exp_avg_sqs, cur_level) && !isBatchedAtLevel(max_exp_avg_sqs, cur_level) && !isBatchedAtLevel(state_steps, cur_level) && !isBatchedAtLevel(grad_scale, cur_level) && !isBatchedAtLevel(found_inf, cur_level)) {
|
|
return at::_ops::_fused_adamw::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf);
|
|
}
|
|
optional<Tensor> grad_scale_value;
|
|
optional<int64_t> grad_scale_bdim;
|
|
if (grad_scale) {
|
|
std::tie(grad_scale_value, grad_scale_bdim) = unwrapTensorAtLevel(grad_scale.value(), cur_level);
|
|
}
|
|
optional<Tensor> found_inf_value;
|
|
optional<int64_t> found_inf_bdim;
|
|
if (found_inf) {
|
|
std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_value, grad_scale_bdim, found_inf_value, found_inf_bdim);
|
|
return std::make_tuple(makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level), makeBatchedVector(std::get<2>(results), std::get<3>(results), cur_level), makeBatchedVector(std::get<4>(results), std::get<5>(results), cur_level), makeBatchedVector(std::get<6>(results), std::get<7>(results), cur_level), makeBatchedVector(std::get<8>(results), std::get<9>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _fused_adamw_tensor_lr_out_generated_plumbing(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional<at::Tensor> & grad_scale, const c10::optional<at::Tensor> & found_inf, at::TensorList out) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grads, cur_level) && !isBatchedAtLevel(exp_avgs, cur_level) && !isBatchedAtLevel(exp_avg_sqs, cur_level) && !isBatchedAtLevel(max_exp_avg_sqs, cur_level) && !isBatchedAtLevel(state_steps, cur_level) && !isBatchedAtLevel(lr, cur_level) && !isBatchedAtLevel(grad_scale, cur_level) && !isBatchedAtLevel(found_inf, cur_level) && !isBatchedAtLevel(out, cur_level)) {
|
|
return at::_ops::_fused_adamw_tensor_lr_out::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf, out);
|
|
}
|
|
Tensor lr_value;
|
|
optional<int64_t> lr_bdim;
|
|
std::tie(lr_value, lr_bdim) = unwrapTensorAtLevel(lr, cur_level);
|
|
optional<Tensor> grad_scale_value;
|
|
optional<int64_t> grad_scale_bdim;
|
|
if (grad_scale) {
|
|
std::tie(grad_scale_value, grad_scale_bdim) = unwrapTensorAtLevel(grad_scale.value(), cur_level);
|
|
}
|
|
optional<Tensor> found_inf_value;
|
|
optional<int64_t> found_inf_bdim;
|
|
if (found_inf) {
|
|
std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf.value(), cur_level);
|
|
}
|
|
batch_rule(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr_value, lr_bdim, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_value, grad_scale_bdim, found_inf_value, found_inf_bdim, out);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>> _fused_adamw_tensor_lr_generated_plumbing(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const c10::optional<at::Tensor> & grad_scale, const c10::optional<at::Tensor> & found_inf) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grads, cur_level) && !isBatchedAtLevel(exp_avgs, cur_level) && !isBatchedAtLevel(exp_avg_sqs, cur_level) && !isBatchedAtLevel(max_exp_avg_sqs, cur_level) && !isBatchedAtLevel(state_steps, cur_level) && !isBatchedAtLevel(lr, cur_level) && !isBatchedAtLevel(grad_scale, cur_level) && !isBatchedAtLevel(found_inf, cur_level)) {
|
|
return at::_ops::_fused_adamw_tensor_lr::call(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale, found_inf);
|
|
}
|
|
Tensor lr_value;
|
|
optional<int64_t> lr_bdim;
|
|
std::tie(lr_value, lr_bdim) = unwrapTensorAtLevel(lr, cur_level);
|
|
optional<Tensor> grad_scale_value;
|
|
optional<int64_t> grad_scale_bdim;
|
|
if (grad_scale) {
|
|
std::tie(grad_scale_value, grad_scale_bdim) = unwrapTensorAtLevel(grad_scale.value(), cur_level);
|
|
}
|
|
optional<Tensor> found_inf_value;
|
|
optional<int64_t> found_inf_bdim;
|
|
if (found_inf) {
|
|
std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(self, grads, exp_avgs, exp_avg_sqs, max_exp_avg_sqs, state_steps, lr_value, lr_bdim, beta1, beta2, weight_decay, eps, amsgrad, maximize, grad_scale_value, grad_scale_bdim, found_inf_value, found_inf_bdim);
|
|
return std::make_tuple(makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level), makeBatchedVector(std::get<2>(results), std::get<3>(results), cur_level), makeBatchedVector(std::get<4>(results), std::get<5>(results), cur_level), makeBatchedVector(std::get<6>(results), std::get<7>(results), cur_level), makeBatchedVector(std::get<8>(results), std::get<9>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _fused_sgd_out_generated_plumbing(at::TensorList self, at::TensorList grads, at::TensorList momentum_buffer_list, double weight_decay, double momentum, double lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const c10::optional<at::Tensor> & grad_scale, const c10::optional<at::Tensor> & found_inf, at::TensorList out) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grads, cur_level) && !isBatchedAtLevel(momentum_buffer_list, cur_level) && !isBatchedAtLevel(grad_scale, cur_level) && !isBatchedAtLevel(found_inf, cur_level) && !isBatchedAtLevel(out, cur_level)) {
|
|
return at::_ops::_fused_sgd_out::call(self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale, found_inf, out);
|
|
}
|
|
optional<Tensor> grad_scale_value;
|
|
optional<int64_t> grad_scale_bdim;
|
|
if (grad_scale) {
|
|
std::tie(grad_scale_value, grad_scale_bdim) = unwrapTensorAtLevel(grad_scale.value(), cur_level);
|
|
}
|
|
optional<Tensor> found_inf_value;
|
|
optional<int64_t> found_inf_bdim;
|
|
if (found_inf) {
|
|
std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf.value(), cur_level);
|
|
}
|
|
batch_rule(self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale_value, grad_scale_bdim, found_inf_value, found_inf_bdim, out);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>> _fused_sgd_generated_plumbing(at::TensorList self, at::TensorList grads, at::TensorList momentum_buffer_list, double weight_decay, double momentum, double lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const c10::optional<at::Tensor> & grad_scale, const c10::optional<at::Tensor> & found_inf) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grads, cur_level) && !isBatchedAtLevel(momentum_buffer_list, cur_level) && !isBatchedAtLevel(grad_scale, cur_level) && !isBatchedAtLevel(found_inf, cur_level)) {
|
|
return at::_ops::_fused_sgd::call(self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale, found_inf);
|
|
}
|
|
optional<Tensor> grad_scale_value;
|
|
optional<int64_t> grad_scale_bdim;
|
|
if (grad_scale) {
|
|
std::tie(grad_scale_value, grad_scale_bdim) = unwrapTensorAtLevel(grad_scale.value(), cur_level);
|
|
}
|
|
optional<Tensor> found_inf_value;
|
|
optional<int64_t> found_inf_bdim;
|
|
if (found_inf) {
|
|
std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale_value, grad_scale_bdim, found_inf_value, found_inf_bdim);
|
|
return std::make_tuple(makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level), makeBatchedVector(std::get<2>(results), std::get<3>(results), cur_level), makeBatchedVector(std::get<4>(results), std::get<5>(results), cur_level));
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
void _fused_sgd_tensor_lr_out_generated_plumbing(at::TensorList self, at::TensorList grads, at::TensorList momentum_buffer_list, double weight_decay, double momentum, const at::Tensor & lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const c10::optional<at::Tensor> & grad_scale, const c10::optional<at::Tensor> & found_inf, at::TensorList out) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing_no_returns");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grads, cur_level) && !isBatchedAtLevel(momentum_buffer_list, cur_level) && !isBatchedAtLevel(lr, cur_level) && !isBatchedAtLevel(grad_scale, cur_level) && !isBatchedAtLevel(found_inf, cur_level) && !isBatchedAtLevel(out, cur_level)) {
|
|
return at::_ops::_fused_sgd_tensor_lr_out::call(self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale, found_inf, out);
|
|
}
|
|
Tensor lr_value;
|
|
optional<int64_t> lr_bdim;
|
|
std::tie(lr_value, lr_bdim) = unwrapTensorAtLevel(lr, cur_level);
|
|
optional<Tensor> grad_scale_value;
|
|
optional<int64_t> grad_scale_bdim;
|
|
if (grad_scale) {
|
|
std::tie(grad_scale_value, grad_scale_bdim) = unwrapTensorAtLevel(grad_scale.value(), cur_level);
|
|
}
|
|
optional<Tensor> found_inf_value;
|
|
optional<int64_t> found_inf_bdim;
|
|
if (found_inf) {
|
|
std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf.value(), cur_level);
|
|
}
|
|
batch_rule(self, grads, momentum_buffer_list, weight_decay, momentum, lr_value, lr_bdim, dampening, nesterov, maximize, is_first_step, grad_scale_value, grad_scale_bdim, found_inf_value, found_inf_bdim, out);
|
|
}
|
|
template <typename batch_rule_t, batch_rule_t batch_rule>
|
|
::std::tuple<::std::vector<at::Tensor>,::std::vector<at::Tensor>,::std::vector<at::Tensor>> _fused_sgd_tensor_lr_generated_plumbing(at::TensorList self, at::TensorList grads, at::TensorList momentum_buffer_list, double weight_decay, double momentum, const at::Tensor & lr, double dampening, bool nesterov, bool maximize, bool is_first_step, const c10::optional<at::Tensor> & grad_scale, const c10::optional<at::Tensor> & found_inf) {
|
|
c10::impl::ExcludeDispatchKeyGuard guard(DispatchKey::FuncTorchBatched);
|
|
auto maybe_layer = maybeCurrentDynamicLayer();
|
|
vmap_check_escaped(maybe_layer, "gen_vmap_plumbing");
|
|
int64_t cur_level = maybe_layer->layerId();
|
|
if (!isBatchedAtLevel(self, cur_level) && !isBatchedAtLevel(grads, cur_level) && !isBatchedAtLevel(momentum_buffer_list, cur_level) && !isBatchedAtLevel(lr, cur_level) && !isBatchedAtLevel(grad_scale, cur_level) && !isBatchedAtLevel(found_inf, cur_level)) {
|
|
return at::_ops::_fused_sgd_tensor_lr::call(self, grads, momentum_buffer_list, weight_decay, momentum, lr, dampening, nesterov, maximize, is_first_step, grad_scale, found_inf);
|
|
}
|
|
Tensor lr_value;
|
|
optional<int64_t> lr_bdim;
|
|
std::tie(lr_value, lr_bdim) = unwrapTensorAtLevel(lr, cur_level);
|
|
optional<Tensor> grad_scale_value;
|
|
optional<int64_t> grad_scale_bdim;
|
|
if (grad_scale) {
|
|
std::tie(grad_scale_value, grad_scale_bdim) = unwrapTensorAtLevel(grad_scale.value(), cur_level);
|
|
}
|
|
optional<Tensor> found_inf_value;
|
|
optional<int64_t> found_inf_bdim;
|
|
if (found_inf) {
|
|
std::tie(found_inf_value, found_inf_bdim) = unwrapTensorAtLevel(found_inf.value(), cur_level);
|
|
}
|
|
auto results = batch_rule(self, grads, momentum_buffer_list, weight_decay, momentum, lr_value, lr_bdim, dampening, nesterov, maximize, is_first_step, grad_scale_value, grad_scale_bdim, found_inf_value, found_inf_bdim);
|
|
return std::make_tuple(makeBatchedVector(std::get<0>(results), std::get<1>(results), cur_level), makeBatchedVector(std::get<2>(results), std::get<3>(results), cur_level), makeBatchedVector(std::get<4>(results), std::get<5>(results), cur_level));
|
|
}
|
|
|
|
}} // namespace at::functorch
|