diff --git a/source/python.js b/source/python.js index 95b7696a5e..fb8c3d21ec 100644 --- a/source/python.js +++ b/source/python.js @@ -6168,6 +6168,9 @@ python.Execution = class { kind() { return this._kind; } + annotation_str() { + return this._annotation_str; + } equals(/* rhs */) { throw new python.Error(`Not implemented '${this.kind()}'.`); } diff --git a/source/pytorch-metadata.json b/source/pytorch-metadata.json index 26e5d7f76a..e94bd65e27 100755 --- a/source/pytorch-metadata.json +++ b/source/pytorch-metadata.json @@ -32,20009 +32,6444 @@ ] }, { - "name": "_caffe2::BBoxTransform(Tensor rois, Tensor deltas, Tensor im_info, float[] weights, bool apply_scale, bool rotated, bool angle_bound_on, int angle_bound_lo, int angle_bound_hi, float clip_angle_thresh, bool legacy_plus_one) -> (Tensor output_0, Tensor output_1)", - "inputs": [ - { "name": "rois", "type": "Tensor" }, - { "name": "deltas", "type": "Tensor" }, - { "name": "im_info", "type": "Tensor" }, - { "name": "weights", "type": "float32[]" }, - { "name": "apply_scale", "type": "boolean" }, - { "name": "rotated", "type": "boolean" }, - { "name": "angle_bound_on", "type": "boolean" }, - { "name": "angle_bound_lo", "type": "int64" }, - { "name": "angle_bound_hi", "type": "int64" }, - { "name": "clip_angle_thresh", "type": "float32" }, - { "name": "legacy_plus_one", "type": "boolean" } - ], - "outputs": [ - { "name": "output_0", "type": "Tensor" }, - { "name": "output_1", "type": "Tensor" } - ] + "name": "_caffe2::BBoxTransform(Tensor rois, Tensor deltas, Tensor im_info, float[] weights, bool apply_scale, bool rotated, bool angle_bound_on, int angle_bound_lo, int angle_bound_hi, float clip_angle_thresh, bool legacy_plus_one) -> (Tensor output_0, Tensor output_1)" }, { - "name": "_caffe2::BatchPermutation(Tensor X, Tensor indices) -> Tensor", - "inputs": [ - { "name": "X", "type": "Tensor" }, - { "name": "indices", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "_caffe2::BatchPermutation(Tensor X, Tensor indices) -> Tensor" }, { - "name": "_caffe2::BoxWithNMSLimit(Tensor scores, Tensor boxes, Tensor batch_splits, float score_thresh, float nms, int detections_per_im, bool soft_nms_enabled, str soft_nms_method, float soft_nms_sigma, float soft_nms_min_score_thres, bool rotated, bool cls_agnostic_bbox_reg, bool input_boxes_include_bg_cls, bool output_classes_include_bg_cls, bool legacy_plus_one) -> (Tensor scores, Tensor boxes, Tensor classes, Tensor batch_splits, Tensor keeps, Tensor keeps_size)", - "inputs": [ - { "name": "scores", "type": "Tensor" }, - { "name": "boxes", "type": "Tensor" }, - { "name": "batch_splits", "type": "Tensor" }, - { "name": "score_thresh", "type": "float32" }, - { "name": "nms", "type": "float32" }, - { "name": "detections_per_im", "type": "int64" }, - { "name": "soft_nms_enabled", "type": "boolean" }, - { "name": "soft_nms_method", "type": "string" }, - { "name": "soft_nms_sigma", "type": "float32" }, - { "name": "soft_nms_min_score_thres", "type": "float32" }, - { "name": "rotated", "type": "boolean" }, - { "name": "cls_agnostic_bbox_reg", "type": "boolean" }, - { "name": "input_boxes_include_bg_cls", "type": "boolean" }, - { "name": "output_classes_include_bg_cls", "type": "boolean" }, - { "name": "legacy_plus_one", "type": "boolean" } - ], - "outputs": [ - { "name": "scores", "type": "Tensor" }, - { "name": "boxes", "type": "Tensor" }, - { "name": "classes", "type": "Tensor" }, - { "name": "batch_splits", "type": "Tensor" }, - { "name": "keeps", "type": "Tensor" }, - { "name": "keeps_size", "type": "Tensor" } - ] + "name": "_caffe2::BoxWithNMSLimit(Tensor scores, Tensor boxes, Tensor batch_splits, float score_thresh, float nms, int detections_per_im, bool soft_nms_enabled, str soft_nms_method, float soft_nms_sigma, float soft_nms_min_score_thres, bool rotated, bool cls_agnostic_bbox_reg, bool input_boxes_include_bg_cls, bool output_classes_include_bg_cls, bool legacy_plus_one) -> (Tensor scores, Tensor boxes, Tensor classes, Tensor batch_splits, Tensor keeps, Tensor keeps_size)" }, { - "name": "_caffe2::CollectRpnProposals(Tensor[] input_list, int rpn_max_level, int rpn_min_level, int rpn_post_nms_topN) -> (Tensor rois)", - "inputs": [ - { "name": "input_list", "type": "Tensor[]" }, - { "name": "rpn_max_level", "type": "int64" }, - { "name": "rpn_min_level", "type": "int64" }, - { "name": "rpn_post_nms_topN", "type": "int64" } - ], - "outputs": [ - { "name": "rois", "type": "Tensor" } - ] + "name": "_caffe2::CollectRpnProposals(Tensor[] input_list, int rpn_max_level, int rpn_min_level, int rpn_post_nms_topN) -> (Tensor rois)" }, { - "name": "_caffe2::CopyCPUToGPU(Tensor input) -> Tensor", - "inputs": [ - { "name": "input", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "_caffe2::CopyCPUToGPU(Tensor input) -> Tensor" }, { - "name": "_caffe2::CopyGPUToCPU(Tensor input) -> Tensor", - "inputs": [ - { "name": "input", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "_caffe2::CopyGPUToCPU(Tensor input) -> Tensor" }, { - "name": "_caffe2::DistributeFpnProposals(Tensor rois, int roi_canonical_scale, int roi_canonical_level, int roi_max_level, int roi_min_level, bool legacy_plus_one) -> (Tensor rois_fpn2, Tensor rois_fpn3, Tensor rois_fpn4, Tensor rois_fpn5, Tensor rois_idx_restore_int32)", - "inputs": [ - { "name": "rois", "type": "Tensor" }, - { "name": "roi_canonical_scale", "type": "int64" }, - { "name": "roi_canonical_level", "type": "int64" }, - { "name": "roi_max_level", "type": "int64" }, - { "name": "roi_min_level", "type": "int64" }, - { "name": "legacy_plus_one", "type": "boolean" } - ], - "outputs": [ - { "name": "rois_fpn2", "type": "Tensor" }, - { "name": "rois_fpn3", "type": "Tensor" }, - { "name": "rois_fpn4", "type": "Tensor" }, - { "name": "rois_fpn5", "type": "Tensor" }, - { "name": "rois_idx_restore_int32", "type": "Tensor" } - ] + "name": "_caffe2::DistributeFpnProposals(Tensor rois, int roi_canonical_scale, int roi_canonical_level, int roi_max_level, int roi_min_level, bool legacy_plus_one) -> (Tensor rois_fpn2, Tensor rois_fpn3, Tensor rois_fpn4, Tensor rois_fpn5, Tensor rois_idx_restore_int32)" }, { - "name": "_caffe2::GenerateProposals(Tensor scores, Tensor bbox_deltas, Tensor im_info, Tensor anchors, float spatial_scale, int pre_nms_topN, int post_nms_topN, float nms_thresh, float min_size, bool angle_bound_on, int angle_bound_lo, int angle_bound_hi, float clip_angle_thresh, bool legacy_plus_one) -> (Tensor output_0, Tensor output_1)", - "inputs": [ - { "name": "scores", "type": "Tensor" }, - { "name": "bbox_deltas", "type": "Tensor" }, - { "name": "im_info", "type": "Tensor" }, - { "name": "anchors", "type": "Tensor" }, - { "name": "spatial_scale", "type": "float32" }, - { "name": "pre_nms_topN", "type": "int64" }, - { "name": "post_nms_topN", "type": "int64" }, - { "name": "nms_thresh", "type": "float32" }, - { "name": "min_size", "type": "float32" }, - { "name": "angle_bound_on", "type": "boolean" }, - { "name": "angle_bound_lo", "type": "int64" }, - { "name": "angle_bound_hi", "type": "int64" }, - { "name": "clip_angle_thresh", "type": "float32" }, - { "name": "legacy_plus_one", "type": "boolean" } - ], - "outputs": [ - { "name": "output_0", "type": "Tensor" }, - { "name": "output_1", "type": "Tensor" } - ] + "name": "_caffe2::GenerateProposals(Tensor scores, Tensor bbox_deltas, Tensor im_info, Tensor anchors, float spatial_scale, int pre_nms_topN, int post_nms_topN, float nms_thresh, float min_size, bool angle_bound_on, int angle_bound_lo, int angle_bound_hi, float clip_angle_thresh, bool legacy_plus_one) -> (Tensor output_0, Tensor output_1)" }, { - "name": "_caffe2::RoIAlign(Tensor features, Tensor rois, str order, float spatial_scale, int pooled_h, int pooled_w, int sampling_ratio, bool aligned) -> Tensor", - "inputs": [ - { "name": "features", "type": "Tensor" }, - { "name": "rois", "type": "Tensor" }, - { "name": "order", "type": "string" }, - { "name": "spatial_scale", "type": "float32" }, - { "name": "pooled_h", "type": "int64" }, - { "name": "pooled_w", "type": "int64" }, - { "name": "sampling_ratio", "type": "int64" }, - { "name": "aligned", "type": "boolean" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "_caffe2::RoIAlign(Tensor features, Tensor rois, str order, float spatial_scale, int pooled_h, int pooled_w, int sampling_ratio, bool aligned) -> Tensor" }, { - "name": "aten::Bool.Tensor(Tensor a) -> bool", - "inputs": [ - { "name": "a", "type": "Tensor" } - ], - "outputs": [ - { "type": "boolean" } - ] + "name": "aten::Bool.Tensor(Tensor a) -> bool" }, { - "name": "aten::Bool.float(float a) -> bool", - "inputs": [ - { "name": "a", "type": "float32" } - ], - "outputs": [ - { "type": "boolean" } - ] + "name": "aten::Bool.float(float a) -> bool" }, { - "name": "aten::Bool.int(int a) -> bool", - "inputs": [ - { "name": "a", "type": "int64" } - ], - "outputs": [ - { "type": "boolean" } - ] + "name": "aten::Bool.int(int a) -> bool" }, { - "name": "aten::Complex.Scalar(Scalar a) -> complex", - "inputs": [ - { "name": "a", "type": "Scalar" } - ], - "outputs": [ - { "type": "complex" } - ] + "name": "aten::Complex.Scalar(Scalar a) -> complex" }, { - "name": "aten::Complex.Tensor_Tensor(Tensor a, Tensor b) -> complex", - "inputs": [ - { "name": "a", "type": "Tensor" }, - { "name": "b", "type": "Tensor" } - ], - "outputs": [ - { "type": "complex" } - ] + "name": "aten::Complex.Tensor_Tensor(Tensor a, Tensor b) -> complex" }, { - "name": "aten::Complex.Tensor_bool(Tensor x, bool y) -> complex", - "inputs": [ - { "name": "x", "type": "Tensor" }, - { "name": "y", "type": "boolean" } - ], - "outputs": [ - { "type": "complex" } - ] + "name": "aten::Complex.Tensor_bool(Tensor x, bool y) -> complex" }, { - "name": "aten::Complex.Tensor_float(Tensor x, float y) -> complex", - "inputs": [ - { "name": "x", "type": "Tensor" }, - { "name": "y", "type": "float32" } - ], - "outputs": [ - { "type": "complex" } - ] + "name": "aten::Complex.Tensor_float(Tensor x, float y) -> complex" }, { - "name": "aten::Complex.Tensor_int(Tensor x, int y) -> complex", - "inputs": [ - { "name": "x", "type": "Tensor" }, - { "name": "y", "type": "int64" } - ], - "outputs": [ - { "type": "complex" } - ] + "name": "aten::Complex.Tensor_int(Tensor x, int y) -> complex" }, { - "name": "aten::Complex.bool_Tensor(bool x, Tensor y) -> complex", - "inputs": [ - { "name": "x", "type": "boolean" }, - { "name": "y", "type": "Tensor" } - ], - "outputs": [ - { "type": "complex" } - ] + "name": "aten::Complex.bool_Tensor(bool x, Tensor y) -> complex" }, { - "name": "aten::Complex.bool_bool(bool x, bool y) -> complex", - "inputs": [ - { "name": "x", "type": "boolean" }, - { "name": "y", "type": "boolean" } - ], - "outputs": [ - { "type": "complex" } - ] + "name": "aten::Complex.bool_bool(bool x, bool y) -> complex" }, { - "name": "aten::Complex.bool_float(bool x, float y) -> complex", - "inputs": [ - { "name": "x", "type": "boolean" }, - { "name": "y", "type": "float32" } - ], - "outputs": [ - { "type": "complex" } - ] + "name": "aten::Complex.bool_float(bool x, float y) -> complex" }, { - "name": "aten::Complex.bool_int(bool x, int y) -> complex", - "inputs": [ - { "name": "x", "type": "boolean" }, - { "name": "y", "type": "int64" } - ], - "outputs": [ - { "type": "complex" } - ] + "name": "aten::Complex.bool_int(bool x, int y) -> complex" }, { - "name": "aten::Complex.float_Tensor(float x, Tensor y) -> complex", - "inputs": [ - { "name": "x", "type": "float32" }, - { "name": "y", "type": "Tensor" } - ], - "outputs": [ - { "type": "complex" } - ] + "name": "aten::Complex.float_Tensor(float x, Tensor y) -> complex" }, { - "name": "aten::Complex.float_bool(float x, bool y) -> complex", - "inputs": [ - { "name": "x", "type": "float32" }, - { "name": "y", "type": "boolean" } - ], - "outputs": [ - { "type": "complex" } - ] + "name": "aten::Complex.float_bool(float x, bool y) -> complex" }, { - "name": "aten::Complex.float_float(float x, float y) -> complex", - "inputs": [ - { "name": "x", "type": "float32" }, - { "name": "y", "type": "float32" } - ], - "outputs": [ - { "type": "complex" } - ] + "name": "aten::Complex.float_float(float x, float y) -> complex" }, { - "name": "aten::Complex.float_int(float x, int y) -> complex", - "inputs": [ - { "name": "x", "type": "float32" }, - { "name": "y", "type": "int64" } - ], - "outputs": [ - { "type": "complex" } - ] + "name": "aten::Complex.float_int(float x, int y) -> complex" }, { - "name": "aten::Complex.int_Tensor(int x, Tensor y) -> complex", - "inputs": [ - { "name": "x", "type": "int64" }, - { "name": "y", "type": "Tensor" } - ], - "outputs": [ - { "type": "complex" } - ] + "name": "aten::Complex.int_Tensor(int x, Tensor y) -> complex" }, { - "name": "aten::Complex.int_bool(int x, bool y) -> complex", - "inputs": [ - { "name": "x", "type": "int64" }, - { "name": "y", "type": "boolean" } - ], - "outputs": [ - { "type": "complex" } - ] + "name": "aten::Complex.int_bool(int x, bool y) -> complex" }, { - "name": "aten::Complex.int_float(int x, float y) -> complex", - "inputs": [ - { "name": "x", "type": "int64" }, - { "name": "y", "type": "float32" } - ], - "outputs": [ - { "type": "complex" } - ] + "name": "aten::Complex.int_float(int x, float y) -> complex" }, { - "name": "aten::Complex.int_int(int x, int y) -> complex", - "inputs": [ - { "name": "x", "type": "int64" }, - { "name": "y", "type": "int64" } - ], - "outputs": [ - { "type": "complex" } - ] + "name": "aten::Complex.int_int(int x, int y) -> complex" }, { - "name": "aten::ComplexImplicit(Tensor a) -> complex", - "inputs": [ - { "name": "a", "type": "Tensor" } - ], - "outputs": [ - { "type": "complex" } - ] + "name": "aten::ComplexImplicit(Tensor a) -> complex" }, { - "name": "aten::Float.Scalar(Scalar a) -> float", - "inputs": [ - { "name": "a", "type": "Scalar" } - ], - "outputs": [ - { "type": "float32" } - ] + "name": "aten::Float.Scalar(Scalar a) -> float" }, { - "name": "aten::Float.Tensor(Tensor a) -> float", - "inputs": [ - { "name": "a", "type": "Tensor" } - ], - "outputs": [ - { "type": "float32" } - ] + "name": "aten::Float.Tensor(Tensor a) -> float" }, { - "name": "aten::Float.bool(bool a) -> float", - "inputs": [ - { "name": "a", "type": "boolean" } - ], - "outputs": [ - { "type": "float32" } - ] + "name": "aten::Float.bool(bool a) -> float" }, { - "name": "aten::Float.int(int a) -> float", - "inputs": [ - { "name": "a", "type": "int64" } - ], - "outputs": [ - { "type": "float32" } - ] + "name": "aten::Float.int(int a) -> float" }, { - "name": "aten::Float.str(str a) -> float", - "inputs": [ - { "name": "a", "type": "string" } - ], - "outputs": [ - { "type": "float32" } - ] + "name": "aten::Float.str(str a) -> float" }, { - "name": "aten::FloatImplicit(Tensor a) -> float", - "inputs": [ - { "name": "a", "type": "Tensor" } - ], - "outputs": [ - { "type": "float32" } - ] + "name": "aten::FloatImplicit(Tensor a) -> float" }, { - "name": "aten::Int.Scalar(Scalar a) -> int", - "inputs": [ - { "name": "a", "type": "Scalar" } - ], - "outputs": [ - { "type": "int64" } - ] + "name": "aten::Int.Scalar(Scalar a) -> int" }, { - "name": "aten::Int.Tensor(Tensor a) -> int", - "inputs": [ - { "name": "a", "type": "Tensor" } - ], - "outputs": [ - { "type": "int64" } - ] + "name": "aten::Int.Tensor(Tensor a) -> int" }, { - "name": "aten::Int.bool(bool a) -> int", - "inputs": [ - { "name": "a", "type": "boolean" } - ], - "outputs": [ - { "type": "int64" } - ] + "name": "aten::Int.bool(bool a) -> int" }, { - "name": "aten::Int.float(float a) -> int", - "inputs": [ - { "name": "a", "type": "float32" } - ], - "outputs": [ - { "type": "int64" } - ] + "name": "aten::Int.float(float a) -> int" }, { - "name": "aten::Int.str(str a) -> int", - "inputs": [ - { "name": "a", "type": "string" } - ], - "outputs": [ - { "type": "int64" } - ] + "name": "aten::Int.str(str a) -> int" }, { - "name": "aten::IntImplicit(Tensor a) -> int", - "inputs": [ - { "name": "a", "type": "Tensor" } - ], - "outputs": [ - { "type": "int64" } - ] + "name": "aten::IntImplicit(Tensor a) -> int" }, { - "name": "aten::ScalarImplicit(Tensor a) -> Scalar", - "inputs": [ - { "name": "a", "type": "Tensor" } - ], - "outputs": [ - { "type": "Scalar" } - ] + "name": "aten::ScalarImplicit(Tensor a) -> Scalar" }, { - "name": "aten::__and__.Scalar(Tensor self, Scalar other) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Scalar" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::__and__.Scalar(Tensor self, Scalar other) -> Tensor" }, { - "name": "aten::__and__.Tensor(Tensor self, Tensor other) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::__and__.Tensor(Tensor self, Tensor other) -> Tensor" }, { - "name": "aten::__and__.bool(bool a, bool b) -> bool", - "inputs": [ - { "name": "a", "type": "boolean" }, - { "name": "b", "type": "boolean" } - ], - "outputs": [ - { "type": "boolean" } - ] + "name": "aten::__and__.bool(bool a, bool b) -> bool" }, { - "name": "aten::__and__.int(int a, int b) -> int", - "inputs": [ - { "name": "a", "type": "int64" }, - { "name": "b", "type": "int64" } - ], - "outputs": [ - { "type": "int64" } - ] + "name": "aten::__and__.int(int a, int b) -> int" }, { - "name": "aten::__getitem__.Dict_Tensor(Dict(Tensor, t) self, Tensor key) -> t(*)", - "inputs": [ - { "name": "self", "type": "Dict(Tensor, t)" }, - { "name": "key", "type": "Tensor" } - ], - "outputs": [ - { "type": "t" } - ] + "name": "aten::__getitem__.Dict_Tensor(Dict(Tensor, t) self, Tensor key) -> t(*)" }, { - "name": "aten::__getitem__.Dict_bool(Dict(bool, t) self, bool key) -> t(*)", - "inputs": [ - { "name": "self", "type": "Dict(boolean, t)" }, - { "name": "key", "type": "boolean" } - ], - "outputs": [ - { "type": "t" } - ] + "name": "aten::__getitem__.Dict_bool(Dict(bool, t) self, bool key) -> t(*)" }, { - "name": "aten::__getitem__.Dict_complex(Dict(complex, t) self, complex key) -> t(*)", - "inputs": [ - { "name": "self", "type": "Dict(complex, t)" }, - { "name": "key", "type": "complex" } - ], - "outputs": [ - { "type": "t" } - ] + "name": "aten::__getitem__.Dict_complex(Dict(complex, t) self, complex key) -> t(*)" }, { - "name": "aten::__getitem__.Dict_float(Dict(float, t) self, float key) -> t(*)", - "inputs": [ - { "name": "self", "type": "Dict(float32, t)" }, - { "name": "key", "type": "float32" } - ], - "outputs": [ - { "type": "t" } - ] + "name": "aten::__getitem__.Dict_float(Dict(float, t) self, float key) -> t(*)" }, { - "name": "aten::__getitem__.Dict_int(Dict(int, t) self, int key) -> t(*)", - "inputs": [ - { "name": "self", "type": "Dict(int64, t)" }, - { "name": "key", "type": "int64" } - ], - "outputs": [ - { "type": "t" } - ] + "name": "aten::__getitem__.Dict_int(Dict(int, t) self, int key) -> t(*)" }, { - "name": "aten::__getitem__.Dict_str(Dict(str, t) self, str key) -> t(*)", - "inputs": [ - { "name": "self", "type": "Dict(string, t)" }, - { "name": "key", "type": "string" } - ], - "outputs": [ - { "type": "t" } - ] + "name": "aten::__getitem__.Dict_str(Dict(str, t) self, str key) -> t(*)" }, { - "name": "aten::__getitem__.str(str s, int index) -> str", - "inputs": [ - { "name": "s", "type": "string" }, - { "name": "index", "type": "int64" } - ], - "outputs": [ - { "type": "string" } - ] + "name": "aten::__getitem__.str(str s, int index) -> str" }, { - "name": "aten::__getitem__.t(t[](a) list, int idx) -> t(*)", - "inputs": [ - { "name": "list", "type": "t[]" }, - { "name": "idx", "type": "int64" } - ], - "outputs": [ - { "type": "t" } - ] + "name": "aten::__getitem__.t(t[](a) list, int idx) -> t(*)" }, { - "name": "aten::__iand__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Scalar" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::__iand__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)" }, { - "name": "aten::__iand__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::__iand__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)" }, { - "name": "aten::__ilshift__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Scalar" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::__ilshift__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)" }, { - "name": "aten::__ilshift__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::__ilshift__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)" }, { - "name": "aten::__interpolate(Tensor input, int? size = None, float? scale_factor = None, str mode = 'nearest', bool? align_corners = None, bool? recompute_scale_factor = None, bool antialias = False) -> Tensor", - "inputs": [ - { "name": "input", "type": "Tensor" }, - { "name": "size", "type": "int64?", "default": null }, - { "name": "scale_factor", "type": "float32?", "default": null }, - { "name": "mode", "type": "string", "default": "nearest" }, - { "name": "align_corners", "type": "boolean?", "default": null }, - { "name": "recompute_scale_factor", "type": "boolean?", "default": null }, - { "name": "antialias", "type": "boolean", "default": false } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::__interpolate(Tensor input, int? size=None, float? scale_factor=None, str mode=\"nearest\", bool? align_corners=None, bool? recompute_scale_factor=None, bool antialias=False) -> Tensor" }, { - "name": "aten::__interpolate.scale_list(Tensor input, int? size = None, float[]? scale_factor = None, str mode = 'nearest', bool? align_corners = None, bool? recompute_scale_factor = None, bool antialias = False) -> Tensor", - "inputs": [ - { "name": "input", "type": "Tensor" }, - { "name": "size", "type": "int64?", "default": null }, - { "name": "scale_factor", "type": "float32[]?", "default": null }, - { "name": "mode", "type": "string", "default": "nearest" }, - { "name": "align_corners", "type": "boolean?", "default": null }, - { "name": "recompute_scale_factor", "type": "boolean?", "default": null }, - { "name": "antialias", "type": "boolean", "default": false } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::__interpolate.scale_list(Tensor input, int? size=None, float[]? scale_factor=None, str mode=\"nearest\", bool? align_corners=None, bool? recompute_scale_factor=None, bool antialias=False) -> Tensor" }, { - "name": "aten::__interpolate.size_list(Tensor input, int[]? size = None, float? scale_factor = None, str mode = 'nearest', bool? align_corners = None, bool? recompute_scale_factor = None, bool antialias = False) -> Tensor", - "inputs": [ - { "name": "input", "type": "Tensor" }, - { "name": "size", "type": "int64[]?", "default": null }, - { "name": "scale_factor", "type": "float32?", "default": null }, - { "name": "mode", "type": "string", "default": "nearest" }, - { "name": "align_corners", "type": "boolean?", "default": null }, - { "name": "recompute_scale_factor", "type": "boolean?", "default": null }, - { "name": "antialias", "type": "boolean", "default": false } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::__interpolate.size_list(Tensor input, int[]? size=None, float? scale_factor=None, str mode=\"nearest\", bool? align_corners=None, bool? recompute_scale_factor=None, bool antialias=False) -> Tensor" }, { - "name": "aten::__interpolate.size_list_scale_list(Tensor input, int[]? size = None, float[]? scale_factor = None, str mode = 'nearest', bool? align_corners = None, bool? recompute_scale_factor = None, bool antialias = False) -> Tensor", - "inputs": [ - { "name": "input", "type": "Tensor" }, - { "name": "size", "type": "int64[]?", "default": null }, - { "name": "scale_factor", "type": "float32[]?", "default": null }, - { "name": "mode", "type": "string", "default": "nearest" }, - { "name": "align_corners", "type": "boolean?", "default": null }, - { "name": "recompute_scale_factor", "type": "boolean?", "default": null }, - { "name": "antialias", "type": "boolean", "default": false } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::__interpolate.size_list_scale_list(Tensor input, int[]? size=None, float[]? scale_factor=None, str mode=\"nearest\", bool? align_corners=None, bool? recompute_scale_factor=None, bool antialias=False) -> Tensor" }, { - "name": "aten::__irshift__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Scalar" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::__irshift__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)" }, { - "name": "aten::__irshift__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::__irshift__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)" }, { - "name": "aten::__is__(t1 self, t2 obj) -> bool", - "inputs": [ - { "name": "self", "type": "t1" }, - { "name": "obj", "type": "t2" } - ], - "outputs": [ - { "type": "boolean" } - ] + "name": "aten::__is__(t1 self, t2 obj) -> bool" }, { - "name": "aten::__isnot__(t1 self, t2 obj) -> bool", - "inputs": [ - { "name": "self", "type": "t1" }, - { "name": "obj", "type": "t2" } - ], - "outputs": [ - { "type": "boolean" } - ] + "name": "aten::__isnot__(t1 self, t2 obj) -> bool" }, { - "name": "aten::__ixor__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Scalar" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::__ixor__.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)" }, { - "name": "aten::__ixor__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::__ixor__.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)" }, { - "name": "aten::__lshift__.Scalar(Tensor self, Scalar other) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Scalar" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::__lshift__.Scalar(Tensor self, Scalar other) -> Tensor" }, { - "name": "aten::__lshift__.Tensor(Tensor self, Tensor other) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::__lshift__.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::__not__(bool self) -> bool", - "inputs": [ - { "name": "self", "type": "boolean" } - ], - "outputs": [ - { "type": "boolean" } - ] + "name": "aten::__lshift__.Tensor(Tensor self, Tensor other) -> Tensor" }, { - "name": "aten::__or__.Scalar(Tensor self, Scalar other) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Scalar" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::__lshift__.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::__or__.Tensor(Tensor self, Tensor other) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::__lshift__.int(int a, int b) -> int" }, { - "name": "aten::__rshift__.Scalar(Tensor self, Scalar other) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Scalar" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::__not__(bool self) -> bool" }, { - "name": "aten::__rshift__.Tensor(Tensor self, Tensor other) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::__or__.Scalar(Tensor self, Scalar other) -> Tensor" }, { - "name": "aten::__upsample(Tensor input, int? size = None, int? scale_factor = None, str mode = 'nearest', bool? align_corners = None) -> Tensor", - "category": "Layer", - "inputs": [ - { "name": "input", "type": "Tensor" }, - { "name": "size", "type": "int64?", "default": null }, - { "name": "scale_factor", "type": "int64?", "default": null }, - { "name": "mode", "type": "string", "default": "nearest" }, - { "name": "align_corners", "type": "boolean?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::__or__.Tensor(Tensor self, Tensor other) -> Tensor" }, { - "name": "aten::__upsample.size_list(Tensor input, int[]? size = None, int? scale_factor = None, str mode = 'nearest', bool? align_corners = None) -> Tensor", - "category": "Layer", - "inputs": [ - { "name": "input", "type": "Tensor" }, - { "name": "size", "type": "int64[]?", "default": null }, - { "name": "scale_factor", "type": "int64?", "default": null }, - { "name": "mode", "type": "string", "default": "nearest" }, - { "name": "align_corners", "type": "boolean?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::__or__.bool(bool a, bool b) -> bool" }, { - "name": "aten::__upsample_bilinear(Tensor input, int? size = None, int? scale_factor = None) -> Tensor", - "inputs": [ - { "name": "input", "type": "Tensor" }, - { "name": "size", "type": "int64?", "default": null }, - { "name": "scale_factor", "type": "int64?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::__or__.int(int a, int b) -> int" }, { - "name": "aten::__upsample_bilinear.scale_list(Tensor input, int? size = None, int[]? scale_factor = None) -> Tensor", - "inputs": [ - { "name": "input", "type": "Tensor" }, - { "name": "size", "type": "int64?", "default": null }, - { "name": "scale_factor", "type": "int64[]?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::__rshift__.Scalar(Tensor self, Scalar other) -> Tensor" }, { - "name": "aten::__upsample_bilinear.size_list(Tensor input, int[]? size = None, int? scale_factor = None) -> Tensor", - "inputs": [ - { "name": "input", "type": "Tensor" }, - { "name": "size", "type": "int64[]?", "default": null }, - { "name": "scale_factor", "type": "int64?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::__rshift__.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::__upsample_bilinear.size_list_scale_list(Tensor input, int[]? size = None, int[]? scale_factor = None) -> Tensor", - "inputs": [ - { "name": "input", "type": "Tensor" }, - { "name": "size", "type": "int64[]?", "default": null }, - { "name": "scale_factor", "type": "int64[]?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::__rshift__.Tensor(Tensor self, Tensor other) -> Tensor" }, { - "name": "aten::__xor__.Scalar(Tensor self, Scalar other) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Scalar" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::__rshift__.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::__xor__.Tensor(Tensor self, Tensor other) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::__rshift__.int(int a, int b) -> int" }, { - "name": "aten::_adaptive_avg_pool2d(Tensor self, SymInt[2] output_size) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "output_size", "type": "SymInt[2]" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::__upsample(Tensor input, int? size=None, int? scale_factor=None, str mode=\"nearest\", bool? align_corners=None) -> Tensor", + "category": "Layer" }, { - "name": "aten::_add_relu.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Scalar" }, - { "name": "alpha", "type": "Scalar", "default": 1 } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::__upsample.size_list(Tensor input, int[]? size=None, int? scale_factor=None, str mode=\"nearest\", bool? align_corners=None) -> Tensor", + "category": "Layer" }, { - "name": "aten::_add_relu.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" }, - { "name": "alpha", "type": "Scalar", "default": 1, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::__upsample_bilinear(Tensor input, int? size=None, int? scale_factor=None) -> Tensor" }, { - "name": "aten::_add_relu.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" }, - { "name": "alpha", "type": "Scalar", "default": 1, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::__upsample_bilinear.scale_list(Tensor input, int? size=None, int[]? scale_factor=None) -> Tensor" }, { - "name": "aten::_add_relu_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Scalar" }, - { "name": "alpha", "type": "Scalar", "default": 1 } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::__upsample_bilinear.size_list(Tensor input, int[]? size=None, int? scale_factor=None) -> Tensor" }, { - "name": "aten::_add_relu_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" }, - { "name": "alpha", "type": "Scalar", "default": 1, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::__upsample_bilinear.size_list_scale_list(Tensor input, int[]? size=None, int[]? scale_factor=None) -> Tensor" }, { - "name": "aten::_aminmax(Tensor self) -> (Tensor, Tensor)", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "name": "min", "type": "Tensor" }, - { "name": "max", "type": "Tensor" } - ] + "name": "aten::__xor__.Scalar(Tensor self, Scalar other) -> Tensor" }, { - "name": "aten::_aminmax.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor, Tensor)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64" }, - { "name": "keepdim", "type": "boolean", "default": false } - ], - "outputs": [ - { "name": "min", "type": "Tensor" }, - { "name": "max", "type": "Tensor" } - ] + "name": "aten::__xor__.Tensor(Tensor self, Tensor other) -> Tensor" }, { - "name": "aten::_autocast_to_reduced_precision(Tensor(a) self, bool cuda_enabled, bool cpu_enabled, ScalarType cuda_dtype, ScalarType cpu_dtype) -> Tensor(a)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "cuda_enabled", "type": "boolean" }, - { "name": "cpu_enabled", "type": "boolean" }, - { "name": "cuda_dtype", "type": "ScalarType" }, - { "name": "cpu_dtype", "type": "ScalarType" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::__xor__.bool(bool a, bool b) -> bool" }, { - "name": "aten::_cast_Byte(Tensor self, bool non_blocking=False) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "non_blocking", "type": "boolean", "default": false } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::__xor__.int(int a, int b) -> int" }, { - "name": "aten::_cast_Char(Tensor self, bool non_blocking=False) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "non_blocking", "type": "boolean", "default": false } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::_adaptive_avg_pool2d(Tensor self, SymInt[2] output_size) -> Tensor" }, { - "name": "aten::_cast_Double(Tensor self, bool non_blocking=False) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "non_blocking", "type": "boolean", "default": false } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::_adaptive_avg_pool2d.out(Tensor self, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::_cast_Float(Tensor self, bool non_blocking=False) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "non_blocking", "type": "boolean", "default": false } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::_add_relu.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor" }, { - "name": "aten::_cast_Half(Tensor self, bool non_blocking=False) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "non_blocking", "type": "boolean", "default": false } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::_add_relu.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::_cast_Int(Tensor self, bool non_blocking=False) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "non_blocking", "type": "boolean", "default": false } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::_add_relu.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor" }, { - "name": "aten::_cast_Long(Tensor self, bool non_blocking=False) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "non_blocking", "type": "boolean", "default": false } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::_add_relu.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::_cast_Short(Tensor self, bool non_blocking=False) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "non_blocking", "type": "boolean", "default": false } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::_add_relu_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)" }, { - "name": "aten::_cdist_forward(Tensor x1, Tensor x2, float p, int? compute_mode) -> Tensor", - "inputs": [ - { "name": "x1", "type": "Tensor" }, - { "name": "x2", "type": "Tensor" }, - { "name": "p", "type": "float32" }, - { "name": "compute_mode", "type": "int64?" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::_add_relu_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)" }, { - "name": "aten::_coalesce(Tensor self) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::_aminmax(Tensor self) -> (Tensor, Tensor)" }, { - "name": "aten::_conj(Tensor(a) self) -> Tensor(a)", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::_aminmax.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor, Tensor)" }, { - "name": "aten::_convolution(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32) -> Tensor", - "category": "Layer", - "inputs": [ - { "name": "input", "type": "Tensor" }, - { "name": "weight", "type": "Tensor" }, - { "name": "bias", "type": "Tensor?" }, - { "name": "stride", "type": "SymInt[]", "default": 1 }, - { "name": "padding", "type": "SymInt[]", "default": 0 }, - { "name": "dilation", "type": "SymInt[]", "default": 1 }, - { "name": "transposed", "type": "boolean", "default": false }, - { "name": "output_padding", "type": "SymInt[]", "default": 0 }, - { "name": "groups", "type": "SymInt", "default": 1 }, - { "name": "benchmark", "type": "boolean", "visible": false }, - { "name": "deterministic", "type": "boolean", "visible": false }, - { "name": "cudnn_enabled", "type": "boolean", "visible": false }, - { "name": "allow_tf32", "type": "boolean", "visible": false } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::_aminmax.dim_out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))" }, { - "name": "aten::_convolution.deprecated(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, int[] output_padding, SymInt groups, bool benchmark, bool deterministic, bool cudnn_enabled) -> Tensor", - "category": "Layer", - "inputs": [ - { "name": "input", "type": "Tensor" }, - { "name": "weight", "type": "Tensor" }, - { "name": "bias", "type": "Tensor?" }, - { "name": "stride", "type": "SymInt[]", "default": 1 }, - { "name": "padding", "type": "SymInt[]", "default": 0 }, - { "name": "dilation", "type": "SymInt[]", "default": 1 }, - { "name": "transposed", "type": "boolean", "default": false }, - { "name": "output_padding", "type": "int64[]", "default": 0 }, - { "name": "groups", "type": "SymInt", "default": 1 }, - { "name": "benchmark", "type": "boolean", "visible": false }, - { "name": "deterministic", "type": "boolean", "visible": false }, - { "name": "cudnn_enabled", "type": "boolean", "visible": false } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::_aminmax.out(Tensor self, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))" }, { - "name": "aten::_convolution_mode(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, str padding, SymInt[] dilation, SymInt groups) -> Tensor", - "inputs": [ - { "name": "input", "type": "Tensor" }, - { "name": "weight", "type": "Tensor" }, - { "name": "bias", "type": "Tensor?" }, - { "name": "stride", "type": "SymInt[]" }, - { "name": "padding", "type": "string" }, - { "name": "dilation", "type": "SymInt[]" }, - { "name": "groups", "type": "SymInt" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::_autocast_to_reduced_precision(Tensor(a) self, bool cuda_enabled, bool cpu_enabled, ScalarType cuda_dtype, ScalarType cpu_dtype) -> Tensor(a)" }, { - "name": "aten::_ctc_loss(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank=0, bool zero_infinity=False) -> (Tensor, Tensor)", - "inputs": [ - { "name": "log_probs", "type": "Tensor" }, - { "name": "targets", "type": "Tensor" }, - { "name": "input_lengths", "type": "int64[]" }, - { "name": "target_lengths", "type": "int64[]" }, - { "name": "blank", "type": "int64", "default": 0 }, - { "name": "zero_infinity", "type": "boolean", "default": false } - ], - "outputs": [ - { "type": "Tensor" }, - { "type": "Tensor" } - ] + "name": "aten::_cast_Byte(Tensor self, bool non_blocking=False) -> Tensor" }, { - "name": "aten::_ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank=0, bool zero_infinity=False) -> (Tensor, Tensor)", - "inputs": [ - { "name": "log_probs", "type": "Tensor" }, - { "name": "targets", "type": "Tensor" }, - { "name": "input_lengths", "type": "Tensor" }, - { "name": "target_lengths", "type": "Tensor" }, - { "name": "blank", "type": "int64", "default": 0 }, - { "name": "zero_infinity", "type": "boolean", "default": false } - ], - "outputs": [ - { "type": "Tensor" }, - { "type": "Tensor" } - ] + "name": "aten::_cast_Char(Tensor self, bool non_blocking=False) -> Tensor" }, { - "name": "aten::_dim_arange(Tensor like, int dim) -> Tensor", - "inputs": [ - { "name": "like", "type": "Tensor" }, - { "name": "dim", "type": "int64" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::_cast_Double(Tensor self, bool non_blocking=False) -> Tensor" }, { - "name": "aten::_fake_quantize_learnable_per_tensor_affine(Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max, float grad_factor=1.0) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "scale", "type": "Tensor" }, - { "name": "zero_point", "type": "Tensor" }, - { "name": "quant_min", "type": "int64" }, - { "name": "quant_max", "type": "int64" }, - { "name": "grad_factor", "type": "float32", "default": 1.0 } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::_cast_Float(Tensor self, bool non_blocking=False) -> Tensor" }, { - "name": "aten::_fake_quantize_learnable_per_tensor_affine_backward(Tensor grad, Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max, float grad_factor=1.0) -> (Tensor, Tensor, Tensor)", - "inputs": [ - { "name": "grad", "type": "Tensor" }, - { "name": "self", "type": "Tensor" }, - { "name": "scale", "type": "Tensor" }, - { "name": "zero_point", "type": "Tensor" }, - { "name": "quant_min", "type": "int64" }, - { "name": "quant_max", "type": "int64" }, - { "name": "grad_factor", "type": "float32", "default": 1.0 } - ], - "outputs": [ - { "type": "Tensor" }, - { "type": "Tensor" }, - { "type": "Tensor" } - ] + "name": "aten::_cast_Half(Tensor self, bool non_blocking=False) -> Tensor" }, { - "name": "aten::_make_per_tensor_quantized_tensor(Tensor self, float scale, int zero_point) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "scale", "type": "float32" }, - { "name": "zero_point", "type": "int64" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::_cast_Int(Tensor self, bool non_blocking=False) -> Tensor" }, { - "name": "aten::_native_batch_norm_legit_functional(Tensor input, Tensor? weight, Tensor? bias, Tensor running_mean, Tensor running_var, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor, Tensor running_mean_out, Tensor running_var_out)", - "category": "Normalization", - "inputs": [ - { "name": "input", "type": "Tensor" }, - { "name": "weight", "type": "Tensor?" }, - { "name": "bias", "type": "Tensor?" }, - { "name": "running_mean", "type": "Tensor" }, - { "name": "running_var", "type": "Tensor" }, - { "name": "training", "type": "boolean" }, - { "name": "momentum", "type": "float32" }, - { "name": "eps", "type": "float32" } - ], - "outputs": [ - { "name": "output", "type": "Tensor" }, - { "name": "save_mean", "type": "Tensor" }, - { "name": "save_rstd", "type": "Tensor" }, - { "name": "running_mean_out", "type": "Tensor" }, - { "name": "running_var_out", "type": "Tensor" } - ] + "name": "aten::_cast_Long(Tensor self, bool non_blocking=False) -> Tensor" }, { - "name": "aten::_native_batch_norm_legit_no_training(Tensor input, Tensor? weight, Tensor? bias, Tensor running_mean, Tensor running_var, float momentum, float eps) -> (Tensor, Tensor, Tensor)", - "category": "Normalization", - "inputs": [ - { "name": "input", "type": "Tensor" }, - { "name": "weight", "type": "Tensor?" }, - { "name": "bias", "type": "Tensor?" }, - { "name": "running_mean", "type": "Tensor" }, - { "name": "running_var", "type": "Tensor" }, - { "name": "momentum", "type": "float32" }, - { "name": "eps", "type": "float32" } - ], - "outputs": [ - { "name": "output", "type": "Tensor" }, - { "name": "save_mean", "type": "Tensor" }, - { "name": "save_rstd", "type": "Tensor" } - ] + "name": "aten::_cast_Short(Tensor self, bool non_blocking=False) -> Tensor" }, { - "name": "aten::_native_batch_norm_legit_no_training.out(Tensor input, Tensor? weight, Tensor? bias, Tensor running_mean, Tensor running_var, float momentum, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))", - "inputs": [ - { "name": "input", "type": "Tensor" }, - { "name": "weight", "type": "Tensor?" }, - { "name": "bias", "type": "Tensor?" }, - { "name": "running_mean", "type": "Tensor" }, - { "name": "running_var", "type": "Tensor" }, - { "name": "momentum", "type": "float32" }, - { "name": "eps", "type": "float32" } - ], - "outputs": [ - { "type": "Tensor" }, - { "type": "Tensor" }, - { "type": "Tensor" } - ] + "name": "aten::_cdist_forward(Tensor x1, Tensor x2, float p, int? compute_mode) -> Tensor" }, { - "name": "aten::_native_multi_head_attention(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None, bool need_weights=True, bool average_attn_weights=True, int? mask_type=None) -> (Tensor, Tensor)", - "category": "Attention", - "inputs": [ - { "name": "query", "type": "Tensor" }, - { "name": "key", "type": "Tensor" }, - { "name": "value", "type": "Tensor" }, - { "name": "embed_dim", "type": "int64" }, - { "name": "num_head", "type": "int64" }, - { "name": "qkv_weight", "type": "Tensor" }, - { "name": "qkv_bias", "type": "Tensor" }, - { "name": "proj_weight", "type": "Tensor" }, - { "name": "proj_bias", "type": "Tensor" }, - { "name": "mask", "type": "Tensor?", "default": null }, - { "name": "need_weights", "type": "boolean", "default": true }, - { "name": "average_attn_weights", "type": "boolean", "default": true }, - { "name": "mask_type", "type": "int64?", "default": null } - ], - "outputs": [ - { "type": "Tensor" }, - { "type": "Tensor" } - ] + "name": "aten::_cdist_forward.out(Tensor x1, Tensor x2, float p, int? compute_mode, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::_native_multi_head_attention.out(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None, bool need_weights=True, bool average_attn_weights=True, int? mask_type=None, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))", - "inputs": [ - { "name": "query", "type": "Tensor" }, - { "name": "key", "type": "Tensor" }, - { "name": "value", "type": "Tensor" }, - { "name": "embed_dim", "type": "int64" }, - { "name": "num_head", "type": "int64" }, - { "name": "qkv_weight", "type": "Tensor" }, - { "name": "qkv_bias", "type": "Tensor" }, - { "name": "proj_weight", "type": "Tensor" }, - { "name": "proj_bias", "type": "Tensor" }, - { "name": "mask", "type": "Tensor?", "default": null }, - { "name": "need_weights", "type": "boolean", "default": true }, - { "name": "average_attn_weights", "type": "boolean", "default": true }, - { "name": "mask_type", "type": "int64?", "default": null } - ], - "outputs": [ - { "type": "Tensor" }, - { "type": "Tensor" } - ] + "name": "aten::_coalesce(Tensor self) -> Tensor" }, { - "name": "aten::_nested_tensor_from_mask(Tensor t, Tensor mask, bool mask_check=True) -> Tensor", - "inputs": [ - { "name": "t", "type": "Tensor" }, - { "name": "mask", "type": "Tensor" }, - { "name": "mask_check", "type": "boolean", "default": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::_coalesce.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::_pack_padded_sequence(Tensor input, Tensor lengths, bool batch_first) -> (Tensor, Tensor)", - "inputs": [ - { "name": "input", "type": "Tensor" }, - { "name": "lengths", "type": "Tensor" }, - { "name": "batch_first", "type": "boolean" } - ], - "outputs": [ - { "type": "Tensor" }, - { "name": "?", "type": "Tensor" } - ] + "name": "aten::_conj(Tensor(a) self) -> Tensor(a)" }, { - "name": "aten::_pad_packed_sequence(Tensor data, Tensor batch_sizes, bool batch_first, Scalar padding_value, int total_length) -> (Tensor, Tensor)", - "inputs": [ - { "name": "data", "type": "Tensor" }, - { "name": "batch_sizes", "type": "Tensor" }, - { "name": "batch_first", "type": "boolean" }, - { "name": "padding_value", "type": "Scalar" }, - { "name": "total_length", "type": "int64" } - ], - "outputs": [ - { "type": "Tensor" }, - { "name": "?", "type": "Tensor" } - ] + "name": "aten::_convolution(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32) -> Tensor", + "category": "Layer" }, { - "name": "aten::_scaled_dot_product_efficient_attention(Tensor query, Tensor key, Tensor value, Tensor? attn_bias, bool compute_log_sumexp, float dropout_p=0.0, bool is_causal=False, *, float? scale=None) -> (Tensor output, Tensor log_sumexp, Tensor philox_seed, Tensor philox_offset)", - "inputs": [ - { "name": "query", "type": "Tensor" }, - { "name": "key", "type": "Tensor" }, - { "name": "value", "type": "Tensor" }, - { "name": "attn_bias", "type": "Tensor?" }, - { "name": "compute_log_sumexp", "type": "boolean" }, - { "name": "dropout_p", "type": "float32", "default": 0.0 }, - { "name": "is_causal", "type": "boolean", "default": false }, - { "name": "scale", "type": "float32?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "name": "output", "type": "Tensor" }, - { "name": "log_sumexp", "type": "Tensor" }, - { "name": "philox_seed", "type": "Tensor" }, - { "name": "philox_offset", "type": "Tensor" } - ] + "name": "aten::_convolution.deprecated(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, int[] output_padding, SymInt groups, bool benchmark, bool deterministic, bool cudnn_enabled) -> Tensor", + "category": "Layer" }, { - "name": "aten::_scaled_dot_product_flash_attention_for_cpu(Tensor query, Tensor key, Tensor value, float dropout_p=0.0, bool is_causal=False, *, Tensor? attn_mask=None, float? scale=None) -> (Tensor output, Tensor logsumexp)", - "inputs": [ - { "name": "query", "type": "Tensor" }, - { "name": "key", "type": "Tensor" }, - { "name": "value", "type": "Tensor" }, - { "name": "dropout_p", "type": "float32", "default": 0.0 }, - { "name": "is_causal", "type": "boolean", "default": false }, - { "name": "attn_mask", "type": "Tensor?", "default": null, "kwarg_only": true }, - { "name": "scale", "type": "float32?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "name": "output", "type": "Tensor" }, - { "name": "logsumexp", "type": "Tensor" } - ] + "name": "aten::_convolution.out(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool benchmark, bool deterministic, bool cudnn_enabled, bool allow_tf32, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::_shape_as_tensor(Tensor self) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::_convolution_mode(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, str padding, SymInt[] dilation, SymInt groups) -> Tensor" }, { - "name": "aten::_softmax(Tensor self, int dim, bool half_to_float) -> Tensor", - "category": "Activation", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64" }, - { "name": "half_to_float", "type": "boolean" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::_ctc_loss(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank=0, bool zero_infinity=False) -> (Tensor, Tensor)" }, { - "name": "aten::_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64" }, - { "name": "half_to_float", "type": "boolean" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::_ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank=0, bool zero_infinity=False) -> (Tensor, Tensor)" }, { - "name": "aten::_sparse_coo_tensor_unsafe(Tensor indices, Tensor values, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool? is_coalesced=None) -> Tensor", - "category": "Tensor", - "inputs": [ - { "name": "indices", "type": "Tensor" }, - { "name": "values", "type": "Tensor" }, - { "name": "size", "type": "SymInt[]" }, - { "name": "dtype", "type": "ScalarType?", "default": null, "kwarg_only": true }, - { "name": "layout", "type": "Layout?", "default": null, "kwarg_only": true }, - { "name": "device", "type": "Device?", "default": null, "kwarg_only": true }, - { "name": "pin_memory", "type": "boolean?", "default": null, "kwarg_only": true }, - { "name": "is_coalesced", "type": "boolean?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "name": "outputs", "type": "Tensor" } - ] + "name": "aten::_ctc_loss.Tensor_out(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank=0, bool zero_infinity=False, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))" }, { - "name": "aten::_test_serialization_subcmul(Tensor self, Tensor other, Scalar alpha=1) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" }, - { "name": "alpha", "type": "Scalar", "default": 1 } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::_ctc_loss.out(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank=0, bool zero_infinity=False, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))" }, { - "name": "aten::_thnn_fused_gru_cell(Tensor input_gates, Tensor hidden_gates, Tensor hx, Tensor? input_bias=None, Tensor? hidden_bias=None) -> (Tensor, Tensor)", - "inputs": [ - { "name": "input_gates", "type": "Tensor" }, - { "name": "hidden_gates", "type": "Tensor" }, - { "name": "hx", "type": "Tensor" }, - { "name": "input_bias", "type": "Tensor?", "default": null }, - { "name": "hidden_bias", "type": "Tensor?", "default": null } - ], - "outputs": [ - { "type": "Tensor" }, - { "name": "output2", "type": "Tensor" } - ] + "name": "aten::_dim_arange(Tensor like, int dim) -> Tensor" }, { - "name": "aten::_thnn_fused_lstm_cell(Tensor input_gates, Tensor hidden_gates, Tensor cx, Tensor? input_bias=None, Tensor? hidden_bias=None) -> (Tensor, Tensor, Tensor)", - "inputs": [ - { "name": "input_gates", "type": "Tensor" }, - { "name": "hidden_gates", "type": "Tensor" }, - { "name": "cx", "type": "Tensor" }, - { "name": "input_bias", "type": "Tensor?", "default": null }, - { "name": "hidden_bias", "type": "Tensor?", "default": null } - ], - "outputs": [ - { "type": "Tensor" }, - { "name": "output2", "type": "Tensor" }, - { "name": "output3", "type": "Tensor" } - ] + "name": "aten::_fake_quantize_learnable_per_tensor_affine(Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max, float grad_factor=1.) -> Tensor" }, { - "name": "aten::_to_copy(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool non_blocking=False, MemoryFormat? memory_format=None) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dtype", "type": "ScalarType?", "default": null, "kwarg_only": true }, - { "name": "layout", "type": "Layout?", "default": null, "kwarg_only": true }, - { "name": "device", "type": "Device?", "default": null, "kwarg_only": true }, - { "name": "pin_memory", "type": "boolean?", "default": null, "kwarg_only": true }, - { "name": "non_blocking", "type": "boolean", "default": false, "kwarg_only": true }, - { "name": "memory_format", "type": "MemoryFormat?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::_fake_quantize_learnable_per_tensor_affine.out(Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max, float grad_factor=1., *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::_transformer_encoder_layer_fwd(Tensor src, int embed_dim, int num_heads, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, bool use_gelu, bool norm_first, float eps, Tensor norm_weight_1, Tensor norm_bias_1, Tensor norm_weight_2, Tensor norm_bias_2, Tensor ffn_weight_1, Tensor ffn_bias_1, Tensor ffn_weight_2, Tensor ffn_bias_2, Tensor? mask=None, int? mask_type=None) -> Tensor", - "inputs": [ - { "name": "src", "type": "Tensor" }, - { "name": "embed_dim", "type": "int64" }, - { "name": "num_heads", "type": "int64" }, - { "name": "qkv_weight", "type": "Tensor" }, - { "name": "qkv_bias", "type": "Tensor" }, - { "name": "proj_weight", "type": "Tensor" }, - { "name": "proj_bias", "type": "Tensor" }, - { "name": "use_gelu", "type": "boolean" }, - { "name": "norm_first", "type": "boolean" }, - { "name": "eps", "type": "float32" }, - { "name": "norm_weight_1", "type": "Tensor" }, - { "name": "norm_bias_1", "type": "Tensor" }, - { "name": "norm_weight_2", "type": "Tensor" }, - { "name": "norm_bias_2", "type": "Tensor" }, - { "name": "ffn_weight_1", "type": "Tensor" }, - { "name": "ffn_bias_1", "type": "Tensor" }, - { "name": "ffn_weight_2", "type": "Tensor" }, - { "name": "ffn_bias_2", "type": "Tensor" }, - { "name": "mask", "type": "Tensor?", "default": null }, - { "name": "mask_type", "type": "int64?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::_fake_quantize_learnable_per_tensor_affine_backward(Tensor grad, Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max, float grad_factor=1.) -> (Tensor, Tensor, Tensor)" }, { - "name": "aten::_unique2(Tensor self, bool sorted=True, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "sorted", "type": "boolean", "default": true }, - { "name": "return_inverse", "type": "boolean", "default": false }, - { "name": "return_counts", "type": "boolean", "default": false } - ], - "outputs": [ - { "type": "Tensor" }, - { "type": "Tensor" }, - { "type": "Tensor" } - ] + "name": "aten::_make_per_tensor_quantized_tensor(Tensor self, float scale, int zero_point) -> Tensor" }, { - "name": "aten::_unsafe_view(Tensor self, SymInt[] size) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "size", "type": "SymInt[]" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::_make_per_tensor_quantized_tensor.out(Tensor self, float scale, int zero_point, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::_upsample_bilinear2d_aa(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "output_size", "type": "SymInt[2]" }, - { "name": "align_corners", "type": "boolean" }, - { "name": "scales_h", "type": "float32?", "default": null }, - { "name": "scales_w", "type": "float32?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::_native_batch_norm_legit_functional(Tensor input, Tensor? weight, Tensor? bias, Tensor running_mean, Tensor running_var, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor, Tensor running_mean_out, Tensor running_var_out)", + "category": "Normalization" }, { - "name": "aten::_upsample_bilinear2d_aa.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "output_size", "type": "SymInt[2]" }, - { "name": "align_corners", "type": "boolean" }, - { "name": "scales_h", "type": "float32?", "default": null }, - { "name": "scales_w", "type": "float32?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::_native_batch_norm_legit_no_training(Tensor input, Tensor? weight, Tensor? bias, Tensor running_mean, Tensor running_var, float momentum, float eps) -> (Tensor, Tensor, Tensor)", + "category": "Normalization" }, { - "name": "aten::_upsample_bilinear2d_aa.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor", - "inputs": [ - { "name": "input", "type": "Tensor" }, - { "name": "output_size", "type": "SymInt[]?" }, - { "name": "align_corners", "type": "boolean" }, - { "name": "scale_factors", "type": "float32[]?" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::_native_batch_norm_legit_no_training.out(Tensor input, Tensor? weight, Tensor? bias, Tensor running_mean, Tensor running_var, float momentum, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))" }, { - "name": "aten::_weight_norm(Tensor v, Tensor g, int dim=0) -> Tensor", - "inputs": [ - { "name": "v", "type": "Tensor" }, - { "name": "g", "type": "Tensor" }, - { "name": "dim", "type": "int64", "default": 0 } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::_native_multi_head_attention(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None, bool need_weights=True, bool average_attn_weights=True, int? mask_type=None) -> (Tensor, Tensor)", + "category": "Attention" }, { - "name": "aten::_weight_norm_differentiable_backward(Tensor grad_w, Tensor saved_v, Tensor saved_g, Tensor saved_norms, int dim) -> (Tensor, Tensor)", - "inputs": [ - { "name": "grad_w", "type": "Tensor" }, - { "name": "saved_v", "type": "Tensor" }, - { "name": "saved_g", "type": "Tensor" }, - { "name": "saved_norms", "type": "Tensor" }, - { "name": "dim", "type": "int64" } - ], - "outputs": [ - { "type": "Tensor" }, - { "type": "Tensor" } - ] + "name": "aten::_native_multi_head_attention.out(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None, bool need_weights=True, bool average_attn_weights=True, int? mask_type=None, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))" }, { - "name": "aten::_weight_norm_interface(Tensor v, Tensor g, int dim=0) -> (Tensor, Tensor)", - "inputs": [ - { "name": "v", "type": "Tensor" }, - { "name": "g", "type": "Tensor" }, - { "name": "dim", "type": "int64", "default": 0 } - ], - "outputs": [ - { "type": "Tensor" }, - { "type": "Tensor" } - ] + "name": "aten::_nested_tensor_from_mask(Tensor t, Tensor mask, bool mask_check=True) -> Tensor" }, { - "name": "aten::_weight_norm_interface_backward(Tensor grad_w, Tensor saved_v, Tensor saved_g, Tensor saved_norms, int dim) -> (Tensor, Tensor)", - "inputs": [ - { "name": "grad_w", "type": "Tensor" }, - { "name": "saved_v", "type": "Tensor" }, - { "name": "saved_g", "type": "Tensor" }, - { "name": "saved_norms", "type": "Tensor" }, - { "name": "dim", "type": "int64" } - ], - "outputs": [ - { "type": "Tensor" }, - { "type": "Tensor" } - ] + "name": "aten::_nested_tensor_from_mask.out(Tensor t, Tensor mask, bool mask_check=True, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::abs(Tensor self) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::_pack_padded_sequence(Tensor input, Tensor lengths, bool batch_first) -> (Tensor, Tensor)" }, { - "name": "aten::abs.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::_pack_padded_sequence.out(Tensor input, Tensor lengths, bool batch_first, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))" }, { - "name": "aten::abs_(Tensor(a!) self) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::_pad_packed_sequence(Tensor data, Tensor batch_sizes, bool batch_first, Scalar padding_value, int total_length) -> (Tensor, Tensor)" }, { - "name": "aten::acos(Tensor self) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::_scaled_dot_product_efficient_attention(Tensor query, Tensor key, Tensor value, Tensor? attn_bias, bool compute_log_sumexp, float dropout_p=0., bool is_causal=False, *, float? scale=None) -> (Tensor output, Tensor log_sumexp, Tensor philox_seed, Tensor philox_offset)" }, { - "name": "aten::acos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::_scaled_dot_product_flash_attention_for_cpu(Tensor query, Tensor key, Tensor value, float dropout_p=0., bool is_causal=False, *, Tensor? attn_mask=None, float? scale=None) -> (Tensor output, Tensor logsumexp)" }, { - "name": "aten::acos_(Tensor(a!) self) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::_shape_as_tensor(Tensor self) -> Tensor" }, { - "name": "aten::acosh(Tensor self) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::_softmax(Tensor self, int dim, bool half_to_float) -> Tensor", + "category": "Activation" }, { - "name": "aten::acosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::_softmax.out(Tensor self, int dim, bool half_to_float, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::acosh_(Tensor(a!) self) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::_sparse_coo_tensor_unsafe(Tensor indices, Tensor values, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool? is_coalesced=None) -> Tensor", + "category": "Tensor" }, { - "name": "aten::adaptive_avg_pool1d(Tensor self, int[1] output_size) -> Tensor", - "category": "Pool", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "output_size", "type": "int64[1]", "visible": false } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::_test_serialization_subcmul(Tensor self, Tensor other, Scalar alpha=1) -> Tensor" }, { - "name": "aten::adaptive_avg_pool2d(Tensor self, SymInt[2] output_size) -> Tensor", - "category": "Pool", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "output_size", "type": "SymInt[2]", "visible": false } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::_thnn_fused_gru_cell(Tensor input_gates, Tensor hidden_gates, Tensor hx, Tensor? input_bias=None, Tensor? hidden_bias=None) -> (Tensor, Tensor)" }, { - "name": "aten::adaptive_avg_pool2d.out(Tensor self, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!)", - "category": "Pool", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "output_size", "type": "SymInt[2]" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::_thnn_fused_gru_cell.out(Tensor input_gates, Tensor hidden_gates, Tensor hx, Tensor? input_bias=None, Tensor? hidden_bias=None, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))" }, { - "name": "aten::adaptive_avg_pool3d(Tensor self, SymInt[3] output_size) -> Tensor", - "category": "Pool", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "output_size", "type": "SymInt[3]", "visible": false } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::_thnn_fused_lstm_cell(Tensor input_gates, Tensor hidden_gates, Tensor cx, Tensor? input_bias=None, Tensor? hidden_bias=None) -> (Tensor, Tensor, Tensor)" }, { - "name": "aten::adaptive_avg_pool3d.out(Tensor self, SymInt[3] output_size, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "output_size", "type": "SymInt[3]" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::_thnn_fused_lstm_cell.out(Tensor input_gates, Tensor hidden_gates, Tensor cx, Tensor? input_bias=None, Tensor? hidden_bias=None, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))" }, { - "name": "aten::adaptive_max_pool1d(Tensor self, int[1] output_size) -> (Tensor, Tensor)", - "category": "Pool", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "output_size", "type": "int64[1]", "visible": false } - ], - "outputs": [ - { "type": "Tensor" }, - { "name": "?", "type": "Tensor" } - ] + "name": "aten::_to_copy(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool non_blocking=False, MemoryFormat? memory_format=None) -> Tensor" }, { - "name": "aten::adaptive_max_pool2d(Tensor self, int[2] output_size) -> (Tensor, Tensor)", - "category": "Pool", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "output_size", "type": "int64[2]", "visible": false } - ], - "outputs": [ - { "type": "Tensor" }, - { "name": "?", "type": "Tensor" } - ] + "name": "aten::_to_copy.out(Tensor self, *, bool non_blocking=False, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::adaptive_max_pool2d.out(Tensor self, int[2] output_size, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "output_size", "type": "int64[2]" } - ], - "outputs": [ - { "type": "Tensor" }, - { "type": "Tensor" } - ] + "name": "aten::_transformer_encoder_layer_fwd(Tensor src, int embed_dim, int num_heads, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, bool use_gelu, bool norm_first, float eps, Tensor norm_weight_1, Tensor norm_bias_1, Tensor norm_weight_2, Tensor norm_bias_2, Tensor ffn_weight_1, Tensor ffn_bias_1, Tensor ffn_weight_2, Tensor ffn_bias_2, Tensor? mask=None, int? mask_type=None) -> Tensor" }, { - "name": "aten::adaptive_max_pool3d(Tensor self, int[3] output_size) -> (Tensor, Tensor)", - "category": "Pool", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "output_size", "type": "int64[3]", "visible": false } - ], - "outputs": [ - { "type": "Tensor" }, - { "name": "?", "type": "Tensor" } - ] + "name": "aten::_transformer_encoder_layer_fwd.out(Tensor src, int embed_dim, int num_heads, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, bool use_gelu, bool norm_first, float eps, Tensor norm_weight_1, Tensor norm_bias_1, Tensor norm_weight_2, Tensor norm_bias_2, Tensor ffn_weight_1, Tensor ffn_bias_1, Tensor ffn_weight_2, Tensor ffn_bias_2, Tensor? mask=None, int? mask_type=None, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::adaptive_max_pool3d.out(Tensor self, int[3] output_size, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "output_size", "type": "int64[3]" } - ], - "outputs": [ - { "type": "Tensor" }, - { "type": "Tensor" } - ] + "name": "aten::_unique2(Tensor self, bool sorted=True, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor)" }, { - "name": "aten::add(Scalar a, Scalar b) -> Scalar", - "inputs": [ - { "name": "a", "type": "Scalar" }, - { "name": "b", "type": "Scalar" } - ], - "outputs": [ - { "type": "Scalar" } - ] + "name": "aten::_unique2.out(Tensor self, bool sorted=True, bool return_inverse=False, bool return_counts=False, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))" }, { - "name": "aten::add.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Scalar" }, - { "name": "alpha", "type": "Scalar", "default": 1 } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::_unsafe_view(Tensor self, SymInt[] size) -> Tensor" }, { - "name": "aten::add.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Scalar" }, - { "name": "alpha", "type": "Scalar", "default": 1 } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::_unsafe_view.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::add.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" }, - { "name": "alpha", "type": "Scalar", "default": 1, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::_upsample_bilinear2d_aa(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor" }, { - "name": "aten::add.complex(complex a, complex b) -> complex", - "inputs": [ - { "name": "a", "type": "complex" }, - { "name": "b", "type": "complex" } - ], - "outputs": [ - { "type": "complex" } - ] + "name": "aten::_upsample_bilinear2d_aa.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::add.complex_float(complex a, float b) -> complex", - "inputs": [ - { "name": "a", "type": "complex" }, - { "name": "b", "type": "float32" } - ], - "outputs": [ - { "type": "complex" } - ] + "name": "aten::_upsample_bilinear2d_aa.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor" }, { - "name": "aten::add.complex_int(complex a, int b) -> complex", - "inputs": [ - { "name": "a", "type": "complex" }, - { "name": "b", "type": "int64" } - ], - "outputs": [ - { "type": "complex" } - ] + "name": "aten::_weight_norm(Tensor v, Tensor g, int dim=0) -> Tensor" }, { - "name": "aten::add.float(float a, float b) -> float", - "inputs": [ - { "name": "a", "type": "float32" }, - { "name": "b", "type": "float32" } - ], - "outputs": [ - { "type": "float32" } - ] + "name": "aten::_weight_norm_differentiable_backward(Tensor grad_w, Tensor saved_v, Tensor saved_g, Tensor saved_norms, int dim) -> (Tensor, Tensor)" }, { - "name": "aten::add.float_complex(float a, complex b) -> complex", - "inputs": [ - { "name": "a", "type": "float32" }, - { "name": "b", "type": "complex" } - ], - "outputs": [ - { "type": "complex" } - ] + "name": "aten::_weight_norm_interface(Tensor v, Tensor g, int dim=0) -> (Tensor, Tensor)" }, { - "name": "aten::add.float_int(float a, int b) -> float", - "inputs": [ - { "name": "a", "type": "float32" }, - { "name": "b", "type": "int64" } - ], - "outputs": [ - { "type": "float32" } - ] + "name": "aten::_weight_norm_interface.out(Tensor v, Tensor g, int dim=0, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))" }, { - "name": "aten::add.int(int a, int b) -> int", - "inputs": [ - { "name": "a", "type": "int64" }, - { "name": "b", "type": "int64" } - ], - "outputs": [ - { "type": "int64" } - ] + "name": "aten::_weight_norm_interface_backward(Tensor grad_w, Tensor saved_v, Tensor saved_g, Tensor saved_norms, int dim) -> (Tensor, Tensor)" }, { - "name": "aten::add.int_complex(int a, complex b) -> complex", - "inputs": [ - { "name": "a", "type": "int64" }, - { "name": "b", "type": "complex" } - ], - "outputs": [ - { "type": "complex" } - ] + "name": "aten::_weight_norm_interface_backward.out(Tensor grad_w, Tensor saved_v, Tensor saved_g, Tensor saved_norms, int dim, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))" }, { - "name": "aten::add.int_float(int a, float b) -> float", - "inputs": [ - { "name": "a", "type": "int64" }, - { "name": "b", "type": "float32" } - ], - "outputs": [ - { "type": "float32" } - ] + "name": "aten::abs(Tensor self) -> Tensor" }, { - "name": "aten::add.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" }, - { "name": "alpha", "type": "Scalar", "default": 1, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::abs.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::add.str(str a, str b) -> str", - "inputs": [ - { "name": "a", "type": "string" }, - { "name": "b", "type": "string" } - ], - "outputs": [ - { "type": "string" } - ] + "name": "aten::abs_(Tensor(a!) self) -> Tensor(a!)" }, { - "name": "aten::add.t(t[] a, t[] b) -> t[]", - "inputs": [ - { "name": "a", "type": "t[]" }, - { "name": "b", "type": "t[]" } - ], - "outputs": [ - { "type": "t[]" } - ] + "name": "aten::acos(Tensor self) -> Tensor" }, { - "name": "aten::add_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Scalar" }, - { "name": "alpha", "type": "Scalar", "default": 1 } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::acos.Scalar(Scalar a) -> Scalar" }, { - "name": "aten::add_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" }, - { "name": "alpha", "type": "Scalar", "default": 1, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::acos.complex(complex a) -> complex" }, { - "name": "aten::add_.t(t[](a!) self, t[] b) -> t[]", - "inputs": [ - { "name": "self", "type": "t[]" }, - { "name": "b", "type": "t[]" } - ], - "outputs": [ - { "type": "t[]" } - ] + "name": "aten::acos.float(float a) -> float" }, { - "name": "aten::addcdiv(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "tensor1", "type": "Tensor" }, - { "name": "tensor2", "type": "Tensor" }, - { "name": "value", "type": "Scalar", "default": 1, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::acos.int(int a) -> float" }, { - "name": "aten::addcdiv.out(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "tensor1", "type": "Tensor" }, - { "name": "tensor2", "type": "Tensor" }, - { "name": "value", "type": "Scalar", "default": 1, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::acos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::addcmul(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "tensor1", "type": "Tensor" }, - { "name": "tensor2", "type": "Tensor" }, - { "name": "value", "type": "Scalar", "default": 1, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::acos_(Tensor(a!) self) -> Tensor(a!)" }, { - "name": "aten::addcmul.out(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "tensor1", "type": "Tensor" }, - { "name": "tensor2", "type": "Tensor" }, - { "name": "value", "type": "Scalar", "default": 1, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::acosh(Tensor self) -> Tensor" }, { - "name": "aten::addcmul_(Tensor(a!) self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "tensor1", "type": "Tensor" }, - { "name": "tensor2", "type": "Tensor" }, - { "name": "value", "type": "Scalar", "default": 1, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::acosh.Scalar(Scalar a) -> Scalar" }, { - "name": "aten::addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor", - "category": "Layer", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "mat1", "type": "Tensor" }, - { "name": "mat2", "type": "Tensor" }, - { "name": "beta", "type": "Scalar", "default": 1, "kwarg_only": true }, - { "name": "alpha", "type": "Scalar", "default": 1, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::acosh.complex(complex a) -> complex" }, { - "name": "aten::addmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "mat1", "type": "Tensor" }, - { "name": "mat2", "type": "Tensor" }, - { "name": "beta", "type": "Scalar", "default": 1, "kwarg_only": true }, - { "name": "alpha", "type": "Scalar", "default": 1, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::acosh.float(float a) -> float" }, { - "name": "aten::addmm_(Tensor(a!) self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "mat1", "type": "Tensor" }, - { "name": "mat2", "type": "Tensor" }, - { "name": "beta", "type": "Scalar", "default": 1, "kwarg_only": true }, - { "name": "alpha", "type": "Scalar", "default": 1, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::acosh.int(int a) -> float" }, { - "name": "aten::addmv(Tensor self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "mat", "type": "Tensor" }, - { "name": "vec", "type": "Tensor" }, - { "name": "beta", "type": "Scalar", "default": 1, "kwarg_only": true }, - { "name": "alpha", "type": "Scalar", "default": 1, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::acosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::addmv.out(Tensor self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "mat", "type": "Tensor" }, - { "name": "vec", "type": "Tensor" }, - { "name": "beta", "type": "Scalar", "default": 1, "kwarg_only": true }, - { "name": "alpha", "type": "Scalar", "default": 1, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::acosh_(Tensor(a!) self) -> Tensor(a!)" }, { - "name": "aten::addmv_(Tensor(a!) self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "mat", "type": "Tensor" }, - { "name": "vec", "type": "Tensor" }, - { "name": "beta", "type": "Scalar", "default": 1, "kwarg_only": true }, - { "name": "alpha", "type": "Scalar", "default": 1, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::adaptive_avg_pool1d(Tensor self, int[1] output_size) -> Tensor", + "category": "Pool" }, { - "name": "aten::affine_grid_generator(Tensor theta, SymInt[] size, bool align_corners) -> Tensor", - "inputs": [ - { "name": "theta", "type": "Tensor" }, - { "name": "size", "type": "SymInt[]" }, - { "name": "align_corners", "type": "boolean" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::adaptive_avg_pool2d(Tensor self, SymInt[2] output_size) -> Tensor", + "category": "Pool" }, { - "name": "aten::alias(Tensor(a) self) -> Tensor(a)", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::adaptive_avg_pool2d.out(Tensor self, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!)", + "category": "Pool" }, { - "name": "aten::alias_copy(Tensor self) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::adaptive_avg_pool3d(Tensor self, SymInt[3] output_size) -> Tensor", + "category": "Pool" }, { - "name": "aten::all(Tensor self) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::adaptive_avg_pool3d.out(Tensor self, SymInt[3] output_size, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::all.all_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::adaptive_max_pool1d(Tensor self, int[1] output_size) -> (Tensor, Tensor)", + "category": "Pool" }, { - "name": "aten::all.bool(bool[] self) -> bool", - "inputs": [ - { "name": "self", "type": "boolean[]" } - ], - "outputs": [ - { "type": "boolean" } - ] + "name": "aten::adaptive_max_pool2d(Tensor self, int[2] output_size) -> (Tensor, Tensor)", + "category": "Pool" }, { - "name": "aten::all.dim(Tensor self, int dim, bool keepdim=False) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64" }, - { "name": "keepdim", "type": "boolean", "default": false } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::adaptive_max_pool2d.out(Tensor self, int[2] output_size, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))" }, { - "name": "aten::all.dimname(Tensor self, Dimname dim, bool keepdim=False) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "Dimname" }, - { "name": "keepdim", "type": "boolean", "default": false } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::adaptive_max_pool3d(Tensor self, int[3] output_size) -> (Tensor, Tensor)", + "category": "Pool" }, { - "name": "aten::all.dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "Dimname" }, - { "name": "keepdim", "type": "boolean", "default": false } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::adaptive_max_pool3d.out(Tensor self, int[3] output_size, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))" }, { - "name": "aten::all.dims(Tensor self, int[]? dim=None, bool keepdim=False) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64[]?", "default": null }, - { "name": "keepdim", "type": "boolean", "default": false } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::add(Scalar a, Scalar b) -> Scalar" }, { - "name": "aten::all.dims_out(Tensor self, int[]? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64[]?", "default": null }, - { "name": "keepdim", "type": "boolean", "default": false } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::add.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor" }, { - "name": "aten::all.float(float[] self) -> bool", - "inputs": [ - { "name": "self", "type": "float32[]" } - ], - "outputs": [ - { "type": "boolean" } - ] + "name": "aten::add.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::all.int(int[] self) -> bool", - "inputs": [ - { "name": "self", "type": "int64[]" } - ], - "outputs": [ - { "type": "boolean" } - ] + "name": "aten::add.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor" }, { - "name": "aten::all.out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64" }, - { "name": "keepdim", "type": "boolean", "default": false } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::add.complex(complex a, complex b) -> complex" }, { - "name": "aten::alpha_dropout(Tensor input, float p, bool train) -> Tensor", - "category": "Dropout", - "inputs": [ - { "name": "input", "type": "Tensor" }, - { "name": "p", "type": "float32" }, - { "name": "train", "type": "boolean" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::add.complex_float(complex a, float b) -> complex" }, { - "name": "aten::alpha_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)", - "category": "Dropout", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "p", "type": "float32" }, - { "name": "train", "type": "boolean" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::add.complex_int(complex a, int b) -> complex" }, { - "name": "aten::amax(Tensor self, int[1] dim=[], bool keepdim=False) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64[1]", "default": [] }, - { "name": "keepdim", "type": "boolean", "default": false } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::add.float(float a, float b) -> float" }, { - "name": "aten::amax.out(Tensor self, int[1] dim=[], bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64[1]", "default": [] }, - { "name": "keepdim", "type": "boolean", "default": false } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::add.float_complex(float a, complex b) -> complex" }, { - "name": "aten::amin(Tensor self, int[1] dim=[], bool keepdim=False) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64[1]", "default": [] }, - { "name": "keepdim", "type": "boolean", "default": false } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::add.float_int(float a, int b) -> float" }, { - "name": "aten::amin.out(Tensor self, int[1] dim=[], bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64[1]", "default": [] }, - { "name": "keepdim", "type": "boolean", "default": false } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::add.int(int a, int b) -> int" }, { - "name": "aten::aminmax(Tensor self, *, int? dim=None, bool keepdim=False) -> (Tensor min, Tensor max)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64?", "default": null, "kwarg_only": true }, - { "name": "keepdim", "type": "boolean", "default": false, "kwarg_only": true } - ], - "outputs": [ - { "name": "min", "type": "Tensor" }, - { "name": "max", "type": "Tensor" } - ] + "name": "aten::add.int_complex(int a, complex b) -> complex" }, { - "name": "aten::aminmax.out(Tensor self, *, int? dim=None, bool keepdim=False, Tensor(a!) min, Tensor(b!) max) -> (Tensor(a!) min, Tensor(b!) max)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64?", "default": null, "kwarg_only": true }, - { "name": "keepdim", "type": "boolean", "default": false, "kwarg_only": true } - ], - "outputs": [ - { "name": "min", "type": "Tensor" }, - { "name": "max", "type": "Tensor" } - ] + "name": "aten::add.int_float(int a, float b) -> float" }, { - "name": "aten::angle(Tensor self) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::add.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::angle.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::add.str(str a, str b) -> str" }, { - "name": "aten::any(Tensor self) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::add.t(t[] a, t[] b) -> t[]" }, { - "name": "aten::any.all_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::add_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)" }, { - "name": "aten::any.bool(bool[] self) -> bool", - "inputs": [ - { "name": "self", "type": "boolean[]" } - ], - "outputs": [ - { "type": "boolean" } - ] + "name": "aten::add_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)" }, { - "name": "aten::any.dim(Tensor self, int dim, bool keepdim=False) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64" }, - { "name": "keepdim", "type": "boolean", "default": false } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::add_.t(t[](a!) self, t[] b) -> t[]" }, { - "name": "aten::any.dimname(Tensor self, str dim, bool keepdim=False) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "string" }, - { "name": "keepdim", "type": "boolean", "default": false } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::addcdiv(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor" }, { - "name": "aten::any.dimname_out(Tensor self, str dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "string" }, - { "name": "keepdim", "type": "boolean", "default": false } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::addcdiv.out(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::any.dims(Tensor self, int[]? dim=None, bool keepdim=False) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64[]?", "default": null }, - { "name": "keepdim", "type": "boolean", "default": false } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::addcmul(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor" }, { - "name": "aten::any.dims_out(Tensor self, int[]? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64[]?", "default": null }, - { "name": "keepdim", "type": "boolean", "default": false } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::addcmul.out(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::any.float(float[] self) -> bool", - "inputs": [ - { "name": "self", "type": "float32[]" } - ], - "outputs": [ - { "type": "boolean" } - ] + "name": "aten::addcmul_(Tensor(a!) self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor(a!)" }, { - "name": "aten::any.int(int[] self) -> bool", - "inputs": [ - { "name": "self", "type": "int64[]" } - ], - "outputs": [ - { "type": "boolean" } - ] + "name": "aten::addmm(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor", + "category": "Layer" }, { - "name": "aten::any.out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64" }, - { "name": "keepdim", "type": "boolean", "default": false } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::addmm.out(Tensor self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::any.str(str[] self) -> bool", - "inputs": [ - { "name": "self", "type": "string[]" } - ], - "outputs": [ - { "type": "boolean" } - ] + "name": "aten::addmm_(Tensor(a!) self, Tensor mat1, Tensor mat2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)" }, { - "name": "aten::append.t(t[](a!) self, t(c -> *) el) -> t[](a!)", - "inputs": [ - { "name": "self", "type": "t[]" }, - { "name": "el", "type": "t" } - ], - "outputs": [ - { "type": "t[]" } - ] + "name": "aten::addmv(Tensor self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1) -> Tensor" }, { - "name": "aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", - "inputs": [ - { "name": "end", "type": "Scalar" }, - { "name": "dtype", "type": "ScalarType?", "default": null, "kwarg_only": true }, - { "name": "layout", "type": "Layout?", "default": null, "kwarg_only": true }, - { "name": "device", "type": "Device?", "default": null, "kwarg_only": true }, - { "name": "pin_memory", "type": "boolean?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::addmv.out(Tensor self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "end", "type": "Scalar" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::addmv_(Tensor(a!) self, Tensor mat, Tensor vec, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)" }, { - "name": "aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", - "inputs": [ - { "name": "start", "type": "Scalar" }, - { "name": "end", "type": "Scalar" }, - { "name": "dtype", "type": "ScalarType?", "default": null, "kwarg_only": true }, - { "name": "layout", "type": "Layout?", "default": null, "kwarg_only": true }, - { "name": "device", "type": "Device?", "default": null, "kwarg_only": true }, - { "name": "pin_memory", "type": "boolean?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::affine_grid_generator(Tensor theta, SymInt[] size, bool align_corners) -> Tensor" }, { - "name": "aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "start", "type": "Scalar" }, - { "name": "end", "type": "Scalar" }, - { "name": "step", "type": "Scalar", "default": 1 } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::affine_grid_generator.out(Tensor theta, SymInt[] size, bool align_corners, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::arange.start_out_(Scalar start, Scalar end) -> Tensor", - "inputs": [ - { "name": "start", "type": "Scalar" }, - { "name": "end", "type": "Scalar" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::alias(Tensor(a) self) -> Tensor(a)" }, { - "name": "aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", - "inputs": [ - { "name": "start", "type": "Scalar" }, - { "name": "end", "type": "Scalar" }, - { "name": "step", "type": "Scalar", "default": 1 }, - { "name": "dtype", "type": "ScalarType?", "default": null, "kwarg_only": true }, - { "name": "layout", "type": "Layout?", "default": null, "kwarg_only": true }, - { "name": "device", "type": "Device?", "default": null, "kwarg_only": true }, - { "name": "pin_memory", "type": "boolean?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::alias_copy(Tensor self) -> Tensor" }, { - "name": "aten::arctan(Tensor self) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::alias_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::arctan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::all(Tensor self) -> Tensor" }, { - "name": "aten::arctan_(Tensor(a!) self) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::all.all_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::argmax(Tensor self, int? dim=None, bool keepdim=False) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64?", "default": null }, - { "name": "keepdim", "type": "boolean", "default": false } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::all.bool(bool[] self) -> bool" }, { - "name": "aten::argmax.out(Tensor self, int? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64?", "default": null }, - { "name": "keepdim", "type": "boolean", "default": false } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::all.dim(Tensor self, int dim, bool keepdim=False) -> Tensor" }, { - "name": "aten::argmin(Tensor self, int? dim=None, bool keepdim=False) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64?", "default": null }, - { "name": "keepdim", "type": "boolean", "default": false } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::all.dimname(Tensor self, str dim, bool keepdim=False) -> Tensor" }, { - "name": "aten::argmin.out(Tensor self, int? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64?", "default": null }, - { "name": "keepdim", "type": "boolean", "default": false } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::all.dimname_out(Tensor self, str dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::argsort(Tensor self, int dim=-1, bool descending=False) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64", "default": -1 }, - { "name": "descending", "type": "boolean", "default": false } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::all.dims(Tensor self, int[]? dim=None, bool keepdim=False) -> Tensor" }, { - "name": "aten::argsort.dimname(Tensor self, Dimname dim, bool descending=False) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "Dimname" }, - { "name": "descending", "type": "boolean", "default": false } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::all.dims_out(Tensor self, int[]? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::argsort.stable(Tensor self, *, bool stable, int dim=-1, bool descending=False) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "stable", "type": "boolean", "kwarg_only": true }, - { "name": "dim", "type": "int64", "default": -1, "kwarg_only": true }, - { "name": "descending", "type": "boolean", "default": false, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::all.float(float[] self) -> bool" }, { - "name": "aten::argsort.stable_out(Tensor self, *, bool stable, int dim=-1, bool descending=False, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "stable", "type": "boolean", "kwarg_only": true }, - { "name": "dim", "type": "int64", "default": -1, "kwarg_only": true }, - { "name": "descending", "type": "boolean", "default": false, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::all.int(int[] self) -> bool" }, { - "name": "aten::argwhere(Tensor self) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::all.out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::as_strided(Tensor(a) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "size", "type": "SymInt[]" }, - { "name": "stride", "type": "SymInt[]" }, - { "name": "storage_offset", "type": "SymInt?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::alpha_dropout(Tensor input, float p, bool train) -> Tensor", + "category": "Dropout" }, { - "name": "aten::as_strided_(Tensor(a!) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "size", "type": "SymInt[]" }, - { "name": "stride", "type": "SymInt[]" }, - { "name": "storage_offset", "type": "SymInt?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::alpha_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)", + "category": "Dropout" }, { - "name": "aten::as_strided_copy(Tensor self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "size", "type": "SymInt[]" }, - { "name": "stride", "type": "SymInt[]" }, - { "name": "storage_offset", "type": "SymInt?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::amax(Tensor self, int[1] dim=[], bool keepdim=False) -> Tensor" }, { - "name": "aten::as_strided_scatter(Tensor self, Tensor src, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "src", "type": "Tensor" }, - { "name": "size", "type": "SymInt[]" }, - { "name": "stride", "type": "SymInt[]" }, - { "name": "storage_offset", "type": "SymInt?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::amax.out(Tensor self, int[1] dim=[], bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::as_tensor(Tensor(a) data, *, ScalarType? dtype=None, Device? device=None) -> Tensor(b|a)", - "inputs": [ - { "name": "data", "type": "Tensor" }, - { "name": "dtype", "type": "ScalarType?", "default": null, "kwarg_only": true }, - { "name": "device", "type": "Device?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor", "name": "aten" } - ] + "name": "aten::amin(Tensor self, int[1] dim=[], bool keepdim=False) -> Tensor" }, { - "name": "aten::as_tensor.bool(bool t, *, ScalarType? dtype=None, Device? device=None) -> Tensor", - "inputs": [ - { "name": "t", "type": "boolean" }, - { "name": "dtype", "type": "ScalarType?", "default": null, "kwarg_only": true }, - { "name": "device", "type": "Device?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::amin.out(Tensor self, int[1] dim=[], bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::as_tensor.complex(complex t, *, ScalarType? dtype=None, Device? device=None) -> Tensor", - "inputs": [ - { "name": "t", "type": "complex" }, - { "name": "dtype", "type": "ScalarType?", "default": null, "kwarg_only": true }, - { "name": "device", "type": "Device?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::aminmax(Tensor self, *, int? dim=None, bool keepdim=False) -> (Tensor min, Tensor max)" }, { - "name": "aten::as_tensor.float(float t, *, ScalarType? dtype=None, Device? device=None) -> Tensor", - "inputs": [ - { "name": "t", "type": "float32" }, - { "name": "dtype", "type": "ScalarType?", "default": null, "kwarg_only": true }, - { "name": "device", "type": "Device?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::aminmax.out(Tensor self, *, int? dim=None, bool keepdim=False, Tensor(a!) min, Tensor(b!) max) -> (Tensor(a!) min, Tensor(b!) max)" }, { - "name": "aten::as_tensor.int(int t, *, ScalarType? dtype=None, Device? device=None) -> Tensor", - "inputs": [ - { "name": "t", "type": "int64" }, - { "name": "dtype", "type": "ScalarType?", "default": null, "kwarg_only": true }, - { "name": "device", "type": "Device?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::angle(Tensor self) -> Tensor" }, { - "name": "aten::as_tensor.list(t[] data, *, ScalarType? dtype=None, Device? device=None) -> Tensor", - "inputs": [ - { "name": "data", "type": "t[]" }, - { "name": "dtype", "type": "ScalarType?", "default": null, "kwarg_only": true }, - { "name": "device", "type": "Device?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::angle.Scalar(Scalar a) -> Scalar" }, { - "name": "aten::asin(Tensor self) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::angle.complex(complex a) -> float" }, { - "name": "aten::asin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::angle.float(float a) -> float" }, { - "name": "aten::asinh(Tensor self) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::angle.int(int a) -> float" }, { - "name": "aten::asinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::angle.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::atan(Tensor self) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::any(Tensor self) -> Tensor" }, { - "name": "aten::atan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::any.all_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::atan2(Tensor self, Tensor other) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::any.bool(bool[] self) -> bool" }, { - "name": "aten::atan2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::any.dim(Tensor self, int dim, bool keepdim=False) -> Tensor" }, { - "name": "aten::atan2_(Tensor(a!) self, Tensor other) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::any.dimname(Tensor self, str dim, bool keepdim=False) -> Tensor" }, { - "name": "aten::atan_(Tensor(a!) self) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::any.dimname_out(Tensor self, str dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::atanh(Tensor self) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::any.dims(Tensor self, int[]? dim=None, bool keepdim=False) -> Tensor" }, { - "name": "aten::atanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::any.dims_out(Tensor self, int[]? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::atanh_(Tensor(a!) self) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::any.float(float[] self) -> bool" }, { - "name": "aten::avg_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, bool ceil_mode=False, bool count_include_pad=True) -> Tensor", - "category": "Pool", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "kernel_size", "type": "int64[1]" }, - { "name": "stride", "type": "int64[1]", "default": [] }, - { "name": "padding", "type": "int64[1]", "default": 0 }, - { "name": "ceil_mode", "type": "boolean", "default": false }, - { "name": "count_include_pad", "type": "boolean", "default": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::any.int(int[] self) -> bool" }, { - "name": "aten::avg_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None) -> Tensor", - "category": "Pool", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "kernel_size", "type": "int64[2]" }, - { "name": "stride", "type": "int64[2]", "default": [] }, - { "name": "padding", "type": "int64[2]", "default": 0 }, - { "name": "ceil_mode", "type": "boolean", "default": false }, - { "name": "count_include_pad", "type": "boolean", "default": true }, - { "name": "divisor_override", "type": "int64?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::any.out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::avg_pool2d.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "kernel_size", "type": "int64[2]" }, - { "name": "stride", "type": "int64[2]", "default": [] }, - { "name": "padding", "type": "int64[2]", "default": 0 }, - { "name": "ceil_mode", "type": "boolean", "default": false }, - { "name": "count_include_pad", "type": "boolean", "default": true }, - { "name": "divisor_override", "type": "int64?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::any.str(str[] self) -> bool" }, { - "name": "aten::avg_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None) -> Tensor", - "category": "Pool", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "kernel_size", "type": "int64[3]" }, - { "name": "stride", "type": "int64[3]", "default": [] }, - { "name": "padding", "type": "int64[3]", "default": 0 }, - { "name": "ceil_mode", "type": "boolean", "default": false }, - { "name": "count_include_pad", "type": "boolean", "default": true }, - { "name": "divisor_override", "type": "int64?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::append.t(t[](a!) self, t(c -> *) el) -> t[](a!)" }, { - "name": "aten::avg_pool3d.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "kernel_size", "type": "int64[3]" }, - { "name": "stride", "type": "int64[3]", "default": [] }, - { "name": "padding", "type": "int64[3]", "default": 0 }, - { "name": "ceil_mode", "type": "boolean", "default": false }, - { "name": "count_include_pad", "type": "boolean", "default": true }, - { "name": "divisor_override", "type": "int64?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::arange(Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor" }, { - "name": "aten::baddbmm(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "batch1", "type": "Tensor" }, - { "name": "batch2", "type": "Tensor" }, - { "name": "beta", "type": "Scalar", "default": 1, "kwarg_only": true }, - { "name": "alpha", "type": "Scalar", "default": 1, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::arange.out(Scalar end, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::baddbmm.out(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "batch1", "type": "Tensor" }, - { "name": "batch2", "type": "Tensor" }, - { "name": "beta", "type": "Scalar", "default": 1, "kwarg_only": true }, - { "name": "alpha", "type": "Scalar", "default": 1, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::arange.start(Scalar start, Scalar end, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor" }, { - "name": "aten::baddbmm_(Tensor(a!) self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "batch1", "type": "Tensor" }, - { "name": "batch2", "type": "Tensor" }, - { "name": "beta", "type": "Scalar", "default": 1, "kwarg_only": true }, - { "name": "alpha", "type": "Scalar", "default": 1, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::arange.start_out(Scalar start, Scalar end, Scalar step=1, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, bool cudnn_enabled) -> Tensor", - "category": "Normalization", - "inputs": [ - { "name": "input", "type": "Tensor" }, - { "name": "weight", "type": "Tensor?" }, - { "name": "bias", "type": "Tensor?" }, - { "name": "running_mean", "type": "Tensor?" }, - { "name": "running_var", "type": "Tensor?" }, - { "name": "training", "type": "boolean", "visible": false }, - { "name": "momentum", "type": "float32", "default": 0.1 }, - { "name": "eps", "type": "float32", "default": 1e-05 }, - { "name": "cudnn_enabled", "type": "boolean", "visible": false } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::arange.start_out_(Scalar start, Scalar end) -> Tensor" }, { - "name": "aten::bernoulli(Tensor self, *, Generator? generator=None) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "generator", "type": "Generator?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::arange.start_step(Scalar start, Scalar end, Scalar step=1, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor" }, { - "name": "aten::bernoulli.out(Tensor self, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "generator", "type": "Generator?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::arctan(Tensor self) -> Tensor" }, { - "name": "aten::bernoulli.p(Tensor self, float p, *, Generator? generator=None) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "p", "type": "float32", "default": 0.5 }, - { "name": "generator", "type": "Generator?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::arctan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::bernoulli_.Tensor(Tensor(a!) self, Tensor p, *, Generator? generator=None) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "p", "type": "Tensor" }, - { "name": "generator", "type": "Generator?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::arctan_(Tensor(a!) self) -> Tensor(a!)" }, { - "name": "aten::bernoulli_.float(Tensor(a!) self, float p=0.5, *, Generator? generator=None) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "p", "type": "float32", "default": 0.5 }, - { "name": "generator", "type": "Generator?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::argmax(Tensor self, int? dim=None, bool keepdim=False) -> Tensor" }, { - "name": "aten::bilinear(Tensor input1, Tensor input2, Tensor weight, Tensor? bias=None) -> Tensor", - "inputs": [ - { "name": "input1", "type": "Tensor" }, - { "name": "input2", "type": "Tensor" }, - { "name": "weight", "type": "Tensor" }, - { "name": "bias", "type": "Tensor?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::argmax.out(Tensor self, int? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::bin(int i) -> str", - "inputs": [ - { "name": "i", "type": "int64" } - ], - "outputs": [ - { "type": "string" } - ] + "name": "aten::argmin(Tensor self, int? dim=None, bool keepdim=False) -> Tensor" }, { - "name": "aten::binary_cross_entropy(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "target", "type": "Tensor" }, - { "name": "weight", "type": "Tensor?", "default": null }, - { "name": "reduction", "type": "int64", "default": "Mean" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::argmin.out(Tensor self, int? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::binary_cross_entropy.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "target", "type": "Tensor" }, - { "name": "weight", "type": "Tensor?", "default": null }, - { "name": "reduction", "type": "int64", "default": "Mean" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::argsort(Tensor self, int dim=-1, bool descending=False) -> Tensor" }, { - "name": "aten::binary_cross_entropy_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean) -> Tensor", - "inputs": [ - { "name": "grad_output", "type": "Tensor" }, - { "name": "self", "type": "Tensor" }, - { "name": "target", "type": "Tensor" }, - { "name": "weight", "type": "Tensor?", "default": null }, - { "name": "reduction", "type": "int64", "default": "Mean" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::argsort.dimname(Tensor self, str dim, bool descending=False) -> Tensor" }, { - "name": "aten::binary_cross_entropy_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, *, Tensor(a!) grad_input) -> Tensor(a!)", - "inputs": [ - { "name": "grad_output", "type": "Tensor" }, - { "name": "self", "type": "Tensor" }, - { "name": "target", "type": "Tensor" }, - { "name": "weight", "type": "Tensor?", "default": null }, - { "name": "reduction", "type": "int64", "default": "Mean" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::argsort.stable(Tensor self, *, bool stable, int dim=-1, bool descending=False) -> Tensor" }, { - "name": "aten::binary_cross_entropy_with_logits(Tensor self, Tensor target, Tensor? weight=None, Tensor? pos_weight=None, int reduction=Mean) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "target", "type": "Tensor" }, - { "name": "weight", "type": "Tensor?", "default": null }, - { "name": "pos_weight", "type": "Tensor?", "default": null }, - { "name": "reduction", "type": "int64", "default": "Mean" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::argsort.stable_out(Tensor self, *, bool stable, int dim=-1, bool descending=False, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::bincount(Tensor self, Tensor? weights=None, int minlength=0) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "weights", "type": "Tensor?", "default": null }, - { "name": "minlength", "type": "int64", "default": 0 } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::argwhere(Tensor self) -> Tensor" }, { - "name": "aten::binomial(Tensor count, Tensor prob, Generator? generator=None) -> Tensor", - "inputs": [ - { "name": "count", "type": "Tensor" }, - { "name": "prob", "type": "Tensor" }, - { "name": "generator", "type": "Generator?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::as_strided(Tensor(a) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a)" }, { - "name": "aten::bitwise_and.Scalar(Tensor self, Scalar other) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Scalar" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::as_strided_(Tensor(a!) self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor(a!)" }, { - "name": "aten::bitwise_and.Scalar_Tensor(Scalar self, Tensor other) -> Tensor", - "inputs": [ - { "name": "self", "type": "Scalar" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::as_strided_copy(Tensor self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor" }, { - "name": "aten::bitwise_and.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Scalar" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::as_strided_copy.out(Tensor self, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::bitwise_and.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Scalar" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::as_strided_scatter(Tensor self, Tensor src, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None) -> Tensor" }, { - "name": "aten::bitwise_and.Tensor(Tensor self, Tensor other) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::as_strided_scatter.out(Tensor self, Tensor src, SymInt[] size, SymInt[] stride, SymInt? storage_offset=None, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::bitwise_and.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::as_tensor(Tensor(a) data, *, ScalarType? dtype=None, Device? device=None) -> Tensor(a|b)" }, { - "name": "aten::bitwise_and_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Scalar" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::as_tensor.bool(bool t, *, ScalarType? dtype=None, Device? device=None) -> Tensor" }, { - "name": "aten::bitwise_and_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::as_tensor.complex(complex t, *, ScalarType? dtype=None, Device? device=None) -> Tensor" }, { - "name": "aten::bitwise_left_shift.Scalar_Tensor(Scalar self, Tensor other) -> Tensor", - "inputs": [ - { "name": "self", "type": "Scalar" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::as_tensor.float(float t, *, ScalarType? dtype=None, Device? device=None) -> Tensor" }, { - "name": "aten::bitwise_left_shift.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Scalar" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::as_tensor.int(int t, *, ScalarType? dtype=None, Device? device=None) -> Tensor" }, { - "name": "aten::bitwise_left_shift.Tensor(Tensor self, Tensor other) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::as_tensor.list(t[] data, *, ScalarType? dtype=None, Device? device=None) -> Tensor" }, { - "name": "aten::bitwise_left_shift.Tensor_Scalar(Tensor self, Scalar other) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Scalar" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::asin(Tensor self) -> Tensor" }, { - "name": "aten::bitwise_left_shift.Tensor_Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Scalar" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::asin.Scalar(Scalar a) -> Scalar" }, { - "name": "aten::bitwise_left_shift.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::asin.complex(complex a) -> complex" }, { - "name": "aten::bitwise_left_shift_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::asin.float(float a) -> float" }, { - "name": "aten::bitwise_left_shift_.Tensor_Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Scalar" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::asin.int(int a) -> float" }, { - "name": "aten::bitwise_not(Tensor self) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::asin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::bitwise_not.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::asinh(Tensor self) -> Tensor" }, { - "name": "aten::bitwise_not_(Tensor(a!) self) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::asinh.Scalar(Scalar a) -> Scalar" }, { - "name": "aten::bitwise_or.Scalar(Tensor self, Scalar other) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Scalar" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::asinh.complex(complex a) -> complex" }, { - "name": "aten::bitwise_or.Scalar_Tensor(Scalar self, Tensor other) -> Tensor", - "inputs": [ - { "name": "self", "type": "Scalar" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::asinh.float(float a) -> float" }, { - "name": "aten::bitwise_or.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Scalar" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::asinh.int(int a) -> float" }, { - "name": "aten::bitwise_or.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Scalar" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::asinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::bitwise_or.Tensor(Tensor self, Tensor other) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::atan(Tensor self) -> Tensor" }, { - "name": "aten::bitwise_or.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::atan.Scalar(Scalar a) -> Scalar" }, { - "name": "aten::bitwise_or_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Scalar" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::atan.complex(complex a) -> complex" }, { - "name": "aten::bitwise_or_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::atan.float(float a) -> float" }, { - "name": "aten::bitwise_right_shift.Scalar_Tensor(Scalar self, Tensor other) -> Tensor", - "inputs": [ - { "name": "self", "type": "Scalar" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::atan.int(int a) -> float" }, { - "name": "aten::bitwise_right_shift.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Scalar" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::atan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::bitwise_right_shift.Tensor(Tensor self, Tensor other) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::atan2(Tensor self, Tensor other) -> Tensor" }, { - "name": "aten::bitwise_right_shift.Tensor_Scalar(Tensor self, Scalar other) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Scalar" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::atan2.Scalar_Scalar(Scalar a, Scalar b) -> float" }, { - "name": "aten::bitwise_right_shift.Tensor_Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Scalar" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::atan2.float(float a, float b) -> float" }, { - "name": "aten::bitwise_right_shift.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::atan2.float_int(float a, int b) -> float" }, { - "name": "aten::bitwise_right_shift_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::atan2.int(int a, int b) -> float" }, { - "name": "aten::bitwise_right_shift_.Tensor_Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Scalar" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::atan2.int_float(int a, float b) -> float" }, { - "name": "aten::bitwise_xor.Scalar(Tensor self, Scalar other) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Scalar" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::atan2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::bitwise_xor.Scalar_Tensor(Scalar self, Tensor other) -> Tensor", - "inputs": [ - { "name": "self", "type": "Scalar" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::atan2_(Tensor(a!) self, Tensor other) -> Tensor(a!)" }, { - "name": "aten::bitwise_xor.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Scalar" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::atan_(Tensor(a!) self) -> Tensor(a!)" }, { - "name": "aten::bitwise_xor.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Scalar" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::atanh(Tensor self) -> Tensor" }, { - "name": "aten::bitwise_xor.Tensor(Tensor self, Tensor other) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::atanh.Scalar(Scalar a) -> Scalar" }, { - "name": "aten::bitwise_xor.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::atanh.complex(complex a) -> complex" }, { - "name": "aten::bitwise_xor_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Scalar" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::atanh.float(float a) -> float" }, { - "name": "aten::bitwise_xor_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::atanh.int(int a) -> float" }, { - "name": "aten::block_diag(Tensor[] tensors) -> Tensor", - "inputs": [ - { "name": "tensors", "type": "Tensor[]" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::atanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::bmm(Tensor self, Tensor mat2) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "mat2", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::atanh_(Tensor(a!) self) -> Tensor(a!)" }, { - "name": "aten::bmm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "mat2", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::avg_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=[0], bool ceil_mode=False, bool count_include_pad=True) -> Tensor", + "category": "Pool" }, { - "name": "aten::broadcast_tensors(Tensor[] tensors) -> Tensor[]", - "inputs": [ - { "name": "tensors", "type": "Tensor[]" } - ], - "outputs": [ - { "type": "Tensor[]" } - ] + "name": "aten::avg_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None) -> Tensor", + "category": "Pool" }, { - "name": "aten::broadcast_to(Tensor(a) self, SymInt[] size) -> Tensor(a)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "size", "type": "SymInt[]" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::avg_pool2d.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::bucketize.Scalar(Scalar self, Tensor boundaries, *, bool out_int32=False, bool right=False) -> Tensor", - "inputs": [ - { "name": "self", "type": "Scalar" }, - { "name": "boundaries", "type": "Tensor" }, - { "name": "out_int32", "type": "boolean", "default": false, "kwarg_only": true }, - { "name": "right", "type": "boolean", "default": false, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::avg_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None) -> Tensor", + "category": "Pool" }, { - "name": "aten::bucketize.Tensor(Tensor self, Tensor boundaries, *, bool out_int32=False, bool right=False) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "boundaries", "type": "Tensor" }, - { "name": "out_int32", "type": "boolean", "default": false, "kwarg_only": true }, - { "name": "right", "type": "boolean", "default": false, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::avg_pool3d.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, bool ceil_mode=False, bool count_include_pad=True, int? divisor_override=None, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::bucketize.Tensor_out(Tensor self, Tensor boundaries, *, bool out_int32=False, bool right=False, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "boundaries", "type": "Tensor" }, - { "name": "out_int32", "type": "boolean", "default": false, "kwarg_only": true }, - { "name": "right", "type": "boolean", "default": false, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::baddbmm(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor" }, { - "name": "aten::cartesian_prod(Tensor[] tensors) -> Tensor", - "inputs": [ - { "name": "tensors", "type": "Tensor[]" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::baddbmm.out(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::cat(Tensor[] tensors, int dim=0) -> Tensor", - "category": "Tensor", - "inputs": [ - { "name": "tensors", "type": "Tensor[]" }, - { "name": "dim", "type": "int64", "default": 0 } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::baddbmm_(Tensor(a!) self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor(a!)" }, { - "name": "aten::cat.names(Tensor[] tensors, Dimname dim) -> Tensor", - "category": "Tensor", - "inputs": [ - { "name": "tensors", "type": "Tensor[]" }, - { "name": "dim", "type": "Dimname" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::batch_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool training, float momentum, float eps, bool cudnn_enabled) -> Tensor", + "category": "Normalization" }, { - "name": "aten::cat.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!)", - "category": "Tensor", - "inputs": [ - { "name": "tensors", "type": "Tensor[]" }, - { "name": "dim", "type": "Dimname" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::bernoulli(Tensor self, *, Generator? generator=None) -> Tensor" }, { - "name": "aten::cat.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)", - "category": "Tensor", - "inputs": [ - { "name": "tensors", "type": "Tensor[]" }, - { "name": "dim", "type": "int64", "default": 0 } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::bernoulli.Tensor(Tensor self, Tensor p, *, Generator? generator=None) -> Tensor" }, { - "name": "aten::cauchy_(Tensor(a!) self, float median=0, float sigma=1, *, Generator? generator=None) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "median", "type": "float32", "default": 0 }, - { "name": "sigma", "type": "float32", "default": 1 }, - { "name": "generator", "type": "Generator?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::bernoulli.Tensor_out(Tensor self, Tensor p, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::cdist(Tensor x1, Tensor x2, float p=2, int? compute_mode=None) -> Tensor", - "inputs": [ - { "name": "x1", "type": "Tensor" }, - { "name": "x2", "type": "Tensor" }, - { "name": "p", "type": "float32", "default": 2 }, - { "name": "compute_mode", "type": "int64?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::bernoulli.float_out(Tensor self, float p=0.5, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::ceil(Tensor self) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::bernoulli.out(Tensor self, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::ceil.Scalar(Scalar a) -> Scalar", - "inputs": [ - { "name": "a", "type": "Scalar" } - ], - "outputs": [ - { "type": "Scalar" } - ] + "name": "aten::bernoulli.p(Tensor self, float p, *, Generator? generator=None) -> Tensor" }, { - "name": "aten::ceil.float(float a) -> int", - "inputs": [ - { "name": "a", "type": "float32" } - ], - "outputs": [ - { "type": "int64" } - ] + "name": "aten::bernoulli_.Tensor(Tensor(a!) self, Tensor p, *, Generator? generator=None) -> Tensor(a!)" }, { - "name": "aten::ceil.int(int a) -> int", - "inputs": [ - { "name": "a", "type": "int64" } - ], - "outputs": [ - { "type": "int64" } - ] + "name": "aten::bernoulli_.float(Tensor(a!) self, float p=0.5, *, Generator? generator=None) -> Tensor(a!)" }, { - "name": "aten::ceil.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::bilinear(Tensor input1, Tensor input2, Tensor weight, Tensor? bias=None) -> Tensor" }, { - "name": "aten::ceil_(Tensor(a!) self) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::bin(int i) -> str" }, { - "name": "aten::celu(Tensor self, Scalar alpha=1.0) -> Tensor", - "category": "Activation", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "alpha", "type": "Scalar", "default": 1.0 } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::binary_cross_entropy(Tensor self, Tensor target, Tensor? weight=None, int reduction=1) -> Tensor" }, { - "name": "aten::celu_(Tensor(a!) self, Scalar alpha=1.0) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "alpha", "type": "Scalar", "default": 1.0 } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::binary_cross_entropy.out(Tensor self, Tensor target, Tensor? weight=None, int reduction=1, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::channel_shuffle(Tensor self, SymInt groups) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "groups", "type": "SymInt" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::binary_cross_entropy_backward(Tensor grad_output, Tensor self, Tensor target, Tensor? weight=None, int reduction=1) -> Tensor" }, { - "name": "aten::chunk(Tensor(a -> *) self, int chunks, int dim=0) -> Tensor(a)[]", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "chunks", "type": "int64" }, - { "name": "dim", "type": "int64", "default": 0 } - ], - "outputs": [ - { "type": "Tensor[]" } - ] + "name": "aten::binary_cross_entropy_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, Tensor? weight=None, int reduction=1, *, Tensor(a!) grad_input) -> Tensor(a!)" }, { - "name": "aten::clamp(Tensor self, Scalar? min=None, Scalar? max=None) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "min", "type": "Scalar?", "default": null }, - { "name": "max", "type": "Scalar?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::binary_cross_entropy_with_logits(Tensor self, Tensor target, Tensor? weight=None, Tensor? pos_weight=None, int reduction=1) -> Tensor" }, { - "name": "aten::clamp.Tensor(Tensor self, Tensor? min=None, Tensor? max=None) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "min", "type": "Tensor?", "default": null }, - { "name": "max", "type": "Tensor?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::binary_cross_entropy_with_logits.out(Tensor self, Tensor target, Tensor? weight=None, Tensor? pos_weight=None, int reduction=1, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::clamp.Tensor_out(Tensor self, Tensor? min=None, Tensor? max=None, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "min", "type": "Tensor?", "default": null }, - { "name": "max", "type": "Tensor?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::bincount(Tensor self, Tensor? weights=None, int minlength=0) -> Tensor" }, { - "name": "aten::clamp.out(Tensor self, Scalar? min=None, Scalar? max=None, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "min", "type": "Scalar?", "default": null }, - { "name": "max", "type": "Scalar?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::bincount.out(Tensor self, Tensor? weights=None, int minlength=0, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::clamp_(Tensor(a!) self, Scalar? min=None, Scalar? max=None) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "min", "type": "Scalar?", "default": null }, - { "name": "max", "type": "Scalar?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::binomial(Tensor count, Tensor prob, Generator? generator=None) -> Tensor" }, { - "name": "aten::clamp_.Tensor(Tensor(a!) self, Tensor? min=None, Tensor? max=None) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "min", "type": "Tensor?", "default": null }, - { "name": "max", "type": "Tensor?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::binomial.out(Tensor count, Tensor prob, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::clamp_max(Tensor self, Scalar max) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "max", "type": "Scalar" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::bitwise_and.Scalar(Tensor self, Scalar other) -> Tensor" }, { - "name": "aten::clamp_max.Tensor(Tensor self, Tensor max) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "max", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::bitwise_and.Scalar_Tensor(Scalar self, Tensor other) -> Tensor" }, { - "name": "aten::clamp_max.Tensor_out(Tensor self, Tensor max, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "max", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::bitwise_and.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::clamp_max.out(Tensor self, Scalar max, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "max", "type": "Scalar" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::bitwise_and.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::clamp_max_(Tensor(a!) self, Scalar max) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "max", "type": "Scalar" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::bitwise_and.Tensor(Tensor self, Tensor other) -> Tensor" }, { - "name": "aten::clamp_max_.Tensor(Tensor(a!) self, Tensor max) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "max", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::bitwise_and.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::clamp_min(Tensor self, Scalar min) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "min", "type": "Scalar" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::bitwise_and_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)" }, { - "name": "aten::clamp_min.Tensor(Tensor self, Tensor min) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "min", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::bitwise_and_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)" }, { - "name": "aten::clamp_min.Tensor_out(Tensor self, Tensor min, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "min", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::bitwise_left_shift.Scalar_Tensor(Scalar self, Tensor other) -> Tensor" }, { - "name": "aten::clamp_min.out(Tensor self, Scalar min, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "min", "type": "Scalar" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::bitwise_left_shift.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::clamp_min_(Tensor(a!) self, Scalar min) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "min", "type": "Scalar" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::bitwise_left_shift.Tensor(Tensor self, Tensor other) -> Tensor" }, { - "name": "aten::clamp_min_.Tensor(Tensor(a!) self, Tensor min) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "min", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::bitwise_left_shift.Tensor_Scalar(Tensor self, Scalar other) -> Tensor" }, { - "name": "aten::classes._nnapi.Compilation", - "inputs": [ - { "name": "serialized_model", "type": "Tensor" }, - { "name": "inputs", "type": "Tensor[]" }, - { "name": "parameter_buffers", "type": "Tensor[]" } - ], - "outputs": [ - { "type": "Tensor[]" } - ] + "name": "aten::bitwise_left_shift.Tensor_Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::clip(Tensor self, Scalar? min=None, Scalar? max=None) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "min", "type": "Scalar?", "default": null }, - { "name": "max", "type": "Scalar?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::bitwise_left_shift.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::clip.Tensor(Tensor self, Tensor? min=None, Tensor? max=None) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "min", "type": "Tensor?", "default": null }, - { "name": "max", "type": "Tensor?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::bitwise_left_shift_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)" }, { - "name": "aten::clip.Tensor_out(Tensor self, Tensor? min=None, Tensor? max=None, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "min", "type": "Tensor?", "default": null }, - { "name": "max", "type": "Tensor?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::bitwise_left_shift_.Tensor_Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)" }, { - "name": "aten::clip.out(Tensor self, Scalar? min=None, Scalar? max=None, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "min", "type": "Scalar?", "default": null }, - { "name": "max", "type": "Scalar?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::bitwise_not(Tensor self) -> Tensor" }, { - "name": "aten::clip_(Tensor(a!) self, Scalar? min=None, Scalar? max=None) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "min", "type": "Scalar?", "default": null }, - { "name": "max", "type": "Scalar?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::bitwise_not.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::clip_.Tensor(Tensor(a!) self, Tensor? min=None, Tensor? max=None) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "min", "type": "Tensor?", "default": null }, - { "name": "max", "type": "Tensor?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::bitwise_not_(Tensor(a!) self) -> Tensor(a!)" }, { - "name": "aten::clone(Tensor self, *, MemoryFormat? memory_format=None) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "memory_format", "type": "MemoryFormat?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::bitwise_or.Scalar(Tensor self, Scalar other) -> Tensor" }, { - "name": "aten::coalesce(Tensor(a) self) -> Tensor(a)", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::bitwise_or.Scalar_Tensor(Scalar self, Tensor other) -> Tensor" }, { - "name": "aten::col2im(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "output_size", "type": "SymInt[2]" }, - { "name": "kernel_size", "type": "int64[2]" }, - { "name": "dilation", "type": "int64[2]" }, - { "name": "padding", "type": "int64[2]" }, - { "name": "stride", "type": "int64[2]" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::bitwise_or.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::col2im.out(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "output_size", "type": "SymInt[2]" }, - { "name": "kernel_size", "type": "int64[2]" }, - { "name": "dilation", "type": "int64[2]" }, - { "name": "padding", "type": "int64[2]" }, - { "name": "stride", "type": "int64[2]" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::bitwise_or.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::column_stack(Tensor[] tensors) -> Tensor", - "inputs": [ - { "name": "tensors", "type": "Tensor[]" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::bitwise_or.Tensor(Tensor self, Tensor other) -> Tensor" }, { - "name": "aten::column_stack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "tensors", "type": "Tensor[]" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::bitwise_or.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::complex(Tensor real, Tensor imag) -> Tensor", - "inputs": [ - { "name": "real", "type": "Tensor" }, - { "name": "imag", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::bitwise_or_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)" }, { - "name": "aten::complex.out(Tensor real, Tensor imag, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "real", "type": "Tensor" }, - { "name": "imag", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::bitwise_or_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)" }, { - "name": "aten::concat(Tensor[] tensors, int dim=0) -> Tensor", - "category": "Tensor", - "inputs": [ - { "name": "tensors", "type": "Tensor[]" }, - { "name": "dim", "type": "int64", "default": 0 } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::bitwise_right_shift.Scalar_Tensor(Scalar self, Tensor other) -> Tensor" }, { - "name": "aten::concat.names(Tensor[] tensors, Dimname dim) -> Tensor", - "category": "Tensor", - "inputs": [ - { "name": "tensors", "type": "Tensor[]" }, - { "name": "dim", "type": "Dimname" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::bitwise_right_shift.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::concat.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "tensors", "type": "Tensor[]" }, - { "name": "dim", "type": "Dimname" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::bitwise_right_shift.Tensor(Tensor self, Tensor other) -> Tensor" }, { - "name": "aten::concat.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "tensors", "type": "Tensor[]" }, - { "name": "dim", "type": "int64", "default": 0 } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::bitwise_right_shift.Tensor_Scalar(Tensor self, Scalar other) -> Tensor" }, { - "name": "aten::concatenate(Tensor[] tensors, int dim=0) -> Tensor", - "inputs": [ - { "name": "tensors", "type": "Tensor[]" }, - { "name": "dim", "type": "int64", "default": 0 } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::bitwise_right_shift.Tensor_Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::concatenate.names(Tensor[] tensors, Dimname dim) -> Tensor", - "inputs": [ - { "name": "tensors", "type": "Tensor[]" }, - { "name": "dim", "type": "Dimname" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::bitwise_right_shift.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::concatenate.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "tensors", "type": "Tensor[]" }, - { "name": "dim", "type": "Dimname" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::bitwise_right_shift_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)" }, { - "name": "aten::concatenate.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "tensors", "type": "Tensor[]" }, - { "name": "dim", "type": "int64", "default": 0 } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::bitwise_right_shift_.Tensor_Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)" }, { - "name": "aten::conj(Tensor(a) self) -> Tensor(a)", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::bitwise_xor.Scalar(Tensor self, Scalar other) -> Tensor" }, { - "name": "aten::constant_pad_nd(Tensor self, SymInt[] pad, Scalar value=0) -> Tensor", - "category": "Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "pad", "type": "SymInt[]" }, - { "name": "value", "type": "Scalar", "default": 0 } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::bitwise_xor.Scalar_Tensor(Scalar self, Tensor other) -> Tensor" }, { - "name": "aten::contiguous(Tensor(a) self, *, MemoryFormat memory_format=contiguous_format) -> Tensor(a)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "memory_format", "type": "MemoryFormat", "default": "contiguous_format", "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::bitwise_xor.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::conv1d(Tensor input, Tensor weight, Tensor? bias=None, SymInt[1] stride=1, SymInt[1] padding=0, SymInt[1] dilation=1, SymInt groups=1) -> Tensor", - "category": "Layer", - "inputs": [ - { "name": "input", "type": "Tensor" }, - { "name": "weight", "type": "Tensor" }, - { "name": "bias", "type": "Tensor?", "default": null }, - { "name": "stride", "type": "SymInt[1]", "default": 1 }, - { "name": "padding", "type": "SymInt[1]", "default": 0 }, - { "name": "dilation", "type": "SymInt[1]", "default": 1 }, - { "name": "groups", "type": "SymInt", "default": 1 } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::bitwise_xor.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::conv1d.padding(Tensor input, Tensor weight, Tensor? bias=None, SymInt[1] stride=1, str padding=\"valid\", SymInt[1] dilation=1, SymInt groups=1) -> Tensor", - "category": "Layer", - "inputs": [ - { "name": "input", "type": "Tensor" }, - { "name": "weight", "type": "Tensor" }, - { "name": "bias", "type": "Tensor?", "default": null }, - { "name": "stride", "type": "SymInt[1]", "default": 1 }, - { "name": "padding", "type": "string", "default": "valid" }, - { "name": "dilation", "type": "SymInt[1]", "default": 1 }, - { "name": "groups", "type": "SymInt", "default": 1 } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::bitwise_xor.Tensor(Tensor self, Tensor other) -> Tensor" }, { - "name": "aten::conv2d(Tensor input, Tensor weight, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] dilation=1, SymInt groups=1) -> Tensor", - "category": "Layer", - "inputs": [ - { "name": "input", "type": "Tensor" }, - { "name": "weight", "type": "Tensor" }, - { "name": "bias", "type": "Tensor?", "default": null }, - { "name": "stride", "type": "SymInt[2]", "default": 1 }, - { "name": "padding", "type": "SymInt[2]", "default": 0 }, - { "name": "dilation", "type": "SymInt[2]", "default": 1 }, - { "name": "groups", "type": "SymInt", "default": 1 } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::bitwise_xor.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::conv2d.padding(Tensor input, Tensor weight, Tensor? bias=None, SymInt[2] stride=1, str padding=\"valid\", SymInt[2] dilation=1, SymInt groups=1) -> Tensor", - "category": "Layer", - "inputs": [ - { "name": "input", "type": "Tensor" }, - { "name": "weight", "type": "Tensor" }, - { "name": "bias", "type": "Tensor?", "default": null }, - { "name": "stride", "type": "SymInt[2]", "default": 1 }, - { "name": "padding", "type": "string", "default": "valid" }, - { "name": "dilation", "type": "SymInt[2]", "default": 1 }, - { "name": "groups", "type": "SymInt", "default": 1 } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::bitwise_xor_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)" }, { - "name": "aten::conv3d(Tensor input, Tensor weight, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, SymInt[3] dilation=1, SymInt groups=1) -> Tensor", - "category": "Layer", - "inputs": [ - { "name": "input", "type": "Tensor" }, - { "name": "weight", "type": "Tensor" }, - { "name": "bias", "type": "Tensor?", "default": null }, - { "name": "stride", "type": "SymInt[3]", "default": 1 }, - { "name": "padding", "type": "SymInt[3]", "default": 0 }, - { "name": "dilation", "type": "SymInt[3]", "default": 1 }, - { "name": "groups", "type": "SymInt", "default": 1 } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::bitwise_xor_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)" }, { - "name": "aten::conv3d.padding(Tensor input, Tensor weight, Tensor? bias=None, SymInt[3] stride=1, str padding=\"valid\", SymInt[3] dilation=1, SymInt groups=1) -> Tensor", - "category": "Layer", - "inputs": [ - { "name": "input", "type": "Tensor" }, - { "name": "weight", "type": "Tensor" }, - { "name": "bias", "type": "Tensor?", "default": null }, - { "name": "stride", "type": "SymInt[3]", "default": 1 }, - { "name": "padding", "type": "string", "default": "valid" }, - { "name": "dilation", "type": "SymInt[3]", "default": 1 }, - { "name": "groups", "type": "SymInt", "default": 1 } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::block_diag(Tensor[] tensors) -> Tensor" }, { - "name": "aten::conv_transpose1d(Tensor input, Tensor weight, Tensor? bias=None, SymInt[1] stride=1, SymInt[1] padding=0, SymInt[1] output_padding=0, SymInt groups=1, SymInt[1] dilation=1) -> Tensor", - "category": "Layer", - "inputs": [ - { "name": "input", "type": "Tensor" }, - { "name": "weight", "type": "Tensor" }, - { "name": "bias", "type": "Tensor?", "default": null }, - { "name": "stride", "type": "SymInt[1]", "default": 1 }, - { "name": "padding", "type": "SymInt[1]", "default": 0 }, - { "name": "output_padding", "type": "SymInt[1]", "default": 0 }, - { "name": "groups", "type": "SymInt", "default": 1 }, - { "name": "dilation", "type": "SymInt[1]", "default": 1 } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::block_diag.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::conv_transpose2d.input(Tensor input, Tensor weight, Tensor? bias=None, SymInt[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, SymInt groups=1, SymInt[2] dilation=1) -> Tensor", - "category": "Layer", - "inputs": [ - { "name": "input", "type": "Tensor" }, - { "name": "weight", "type": "Tensor" }, - { "name": "bias", "type": "Tensor?", "default": null }, - { "name": "stride", "type": "SymInt[2]", "default": 1 }, - { "name": "padding", "type": "SymInt[2]", "default": 0 }, - { "name": "output_padding", "type": "SymInt[2]", "default": 0 }, - { "name": "groups", "type": "SymInt", "default": 1 }, - { "name": "dilation", "type": "SymInt[2]", "default": 1 } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::bmm(Tensor self, Tensor mat2) -> Tensor" }, { - "name": "aten::conv_transpose3d.input(Tensor input, Tensor weight, Tensor? bias=None, SymInt[3] stride=1, SymInt[3] padding=0, SymInt[3] output_padding=0, SymInt groups=1, SymInt[3] dilation=1) -> Tensor", - "category": "Layer", - "inputs": [ - { "name": "input", "type": "Tensor" }, - { "name": "weight", "type": "Tensor" }, - { "name": "bias", "type": "Tensor?", "default": null }, - { "name": "stride", "type": "SymInt[3]", "default": 1 }, - { "name": "padding", "type": "SymInt[3]", "default": 0 }, - { "name": "output_padding", "type": "SymInt[3]", "default": 0 }, - { "name": "groups", "type": "SymInt", "default": 1 }, - { "name": "dilation", "type": "SymInt[3]", "default": 1 } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::bmm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::convolution(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups) -> Tensor", - "category": "Layer", - "inputs": [ - { "name": "input", "type": "Tensor" }, - { "name": "weight", "type": "Tensor" }, - { "name": "bias", "type": "Tensor?" }, - { "name": "stride", "type": "SymInt[]" }, - { "name": "padding", "type": "SymInt[]" }, - { "name": "dilation", "type": "SymInt[]" }, - { "name": "transposed", "type": "boolean" }, - { "name": "output_padding", "type": "SymInt[]" }, - { "name": "groups", "type": "SymInt" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::broadcast_tensors(Tensor[] tensors) -> Tensor[]" }, { - "name": "aten::convolution_backward(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor)", - "inputs": [ - { "name": "grad_output", "type": "Tensor" }, - { "name": "input", "type": "Tensor" }, - { "name": "weight", "type": "Tensor" }, - { "name": "bias_sizes", "type": "SymInt[]?" }, - { "name": "stride", "type": "SymInt[]" }, - { "name": "padding", "type": "SymInt[]" }, - { "name": "dilation", "type": "SymInt[]" }, - { "name": "transposed", "type": "boolean" }, - { "name": "output_padding", "type": "SymInt[]" }, - { "name": "groups", "type": "SymInt" }, - { "name": "output_mask", "type": "boolean[3]" } - ], - "outputs": [ - { "type": "Tensor" }, - { "type": "Tensor" }, - { "type": "Tensor" } - ] + "name": "aten::broadcast_to(Tensor(a) self, SymInt[] size) -> Tensor(a)" }, { - "name": "aten::convolution_backward_overrideable(Tensor grad_output, Tensor input, Tensor weight, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias)", - "inputs": [ - { "name": "grad_output", "type": "Tensor" }, - { "name": "input", "type": "Tensor" }, - { "name": "weight", "type": "Tensor" }, - { "name": "stride", "type": "SymInt[]" }, - { "name": "padding", "type": "SymInt[]" }, - { "name": "dilation", "type": "SymInt[]" }, - { "name": "transposed", "type": "boolean" }, - { "name": "output_padding", "type": "SymInt[]" }, - { "name": "groups", "type": "SymInt" }, - { "name": "output_mask", "type": "boolean[3]" } - ], - "outputs": [ - { "name": "grad_input", "type": "Tensor" }, - { "name": "grad_weight", "type": "Tensor" }, - { "name": "grad_bias", "type": "Tensor" } - ] + "name": "aten::bucketize.Scalar(Scalar self, Tensor boundaries, *, bool out_int32=False, bool right=False) -> Tensor" }, { - "name": "aten::convolution_overrideable(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups) -> Tensor", - "inputs": [ - { "name": "input", "type": "Tensor" }, - { "name": "weight", "type": "Tensor" }, - { "name": "bias", "type": "Tensor?" }, - { "name": "stride", "type": "SymInt[]" }, - { "name": "padding", "type": "SymInt[]" }, - { "name": "dilation", "type": "SymInt[]" }, - { "name": "transposed", "type": "boolean" }, - { "name": "output_padding", "type": "SymInt[]" }, - { "name": "groups", "type": "SymInt" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::bucketize.Scalar_out(Scalar self, Tensor boundaries, *, bool out_int32=False, bool right=False, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::copy_(Tensor(a!) self, Tensor src, bool non_blocking=False) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "src", "type": "Tensor" }, - { "name": "non_blocking", "type": "boolean", "default": false } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::bucketize.Tensor(Tensor self, Tensor boundaries, *, bool out_int32=False, bool right=False) -> Tensor" }, { - "name": "aten::cos(Tensor self) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::bucketize.Tensor_out(Tensor self, Tensor boundaries, *, bool out_int32=False, bool right=False, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::cos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::cartesian_prod(Tensor[] tensors) -> Tensor" }, { - "name": "aten::cosh(Tensor self) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::cat(Tensor[] tensors, int dim=0) -> Tensor", + "category": "Tensor" }, { - "name": "aten::cosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::cat.names(Tensor[] tensors, str dim) -> Tensor", + "category": "Tensor" }, { - "name": "aten::cosine_similarity(Tensor x1, Tensor x2, int dim=1, float eps=1e-08) -> Tensor", - "inputs": [ - { "name": "x1", "type": "Tensor" }, - { "name": "x2", "type": "Tensor" }, - { "name": "dim", "type": "int64", "default": 1 }, - { "name": "eps", "type": "float32", "default": 1e-08 } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::cat.names_out(Tensor[] tensors, str dim, *, Tensor(a!) out) -> Tensor(a!)", + "category": "Tensor" }, { - "name": "aten::count_nonzero(Tensor self, int? dim=None) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::cat.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)", + "category": "Tensor" }, { - "name": "aten::count_nonzero.dim_IntList(Tensor self, int[] dim) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64[]" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::cauchy_(Tensor(a!) self, float median=0., float sigma=1., *, Generator? generator=None) -> Tensor(a!)" }, { - "name": "aten::cpu(Tensor(a) self) -> Tensor(a|b)", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::cdist(Tensor x1, Tensor x2, float p=2., int? compute_mode=None) -> Tensor" }, { - "name": "aten::cross(Tensor self, Tensor other, int? dim=None) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" }, - { "name": "dim", "type": "int64?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::ceil(Tensor self) -> Tensor" }, { - "name": "aten::cross.out(Tensor self, Tensor other, int? dim=None, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" }, - { "name": "dim", "type": "int64?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::ceil.Scalar(Scalar a) -> Scalar" }, { - "name": "aten::cross_entropy_loss(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100, float label_smoothing=0.0) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "target", "type": "Tensor" }, - { "name": "weight", "type": "Tensor?", "default": null }, - { "name": "reduction", "type": "int64", "default": "Mean" }, - { "name": "ignore_index", "type": "SymInt", "default": -100 }, - { "name": "label_smoothing", "type": "float32", "default": 0.0 } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::ceil.float(float a) -> int" }, { - "name": "aten::ctc_loss.IntList(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank=0, int reduction=Mean, bool zero_infinity=False) -> Tensor", - "inputs": [ - { "name": "log_probs", "type": "Tensor" }, - { "name": "targets", "type": "Tensor" }, - { "name": "input_lengths", "type": "int64[]" }, - { "name": "target_lengths", "type": "int64[]" }, - { "name": "blank", "type": "int64", "default": 0 }, - { "name": "reduction", "type": "int64", "default": "Mean" }, - { "name": "zero_infinity", "type": "boolean", "default": false } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::ceil.int(int a) -> int" }, { - "name": "aten::ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank=0, int reduction=Mean, bool zero_infinity=False) -> Tensor", - "inputs": [ - { "name": "log_probs", "type": "Tensor" }, - { "name": "targets", "type": "Tensor" }, - { "name": "input_lengths", "type": "Tensor" }, - { "name": "target_lengths", "type": "Tensor" }, - { "name": "blank", "type": "int64", "default": 0 }, - { "name": "reduction", "type": "int64", "default": "Mean" }, - { "name": "zero_infinity", "type": "boolean", "default": false } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::ceil.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::cudnn_convolution_add_relu(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "weight", "type": "Tensor" }, - { "name": "z", "type": "Tensor" }, - { "name": "alpha", "type": "Scalar?" }, - { "name": "bias", "type": "Tensor?" }, - { "name": "stride", "type": "SymInt[]" }, - { "name": "padding", "type": "SymInt[]" }, - { "name": "dilation", "type": "SymInt[]" }, - { "name": "groups", "type": "SymInt" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::ceil_(Tensor(a!) self) -> Tensor(a!)" }, { - "name": "aten::cudnn_convolution_relu(Tensor self, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "weight", "type": "Tensor" }, - { "name": "bias", "type": "Tensor?" }, - { "name": "stride", "type": "SymInt[]" }, - { "name": "padding", "type": "SymInt[]" }, - { "name": "dilation", "type": "SymInt[]" }, - { "name": "groups", "type": "SymInt" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::celu(Tensor self, Scalar alpha=1.) -> Tensor", + "category": "Activation" }, { - "name": "aten::cummax(Tensor self, int dim) -> (Tensor values, Tensor indices)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64" } - ], - "outputs": [ - { "name": "values", "type": "Tensor" }, - { "name": "indices", "type": "Tensor" } - ] + "name": "aten::celu.out(Tensor self, Scalar alpha=1., *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::cummax.dimname(Tensor self, Dimname dim) -> (Tensor values, Tensor indices)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "Dimname" } - ], - "outputs": [ - { "name": "values", "type": "Tensor" }, - { "name": "indices", "type": "Tensor" } - ] + "name": "aten::celu_(Tensor(a!) self, Scalar alpha=1.) -> Tensor(a!)" }, { - "name": "aten::cummax.dimname_out(Tensor self, Dimname dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "Dimname" } - ], - "outputs": [ - { "name": "values", "type": "Tensor" }, - { "name": "indices", "type": "Tensor" } - ] + "name": "aten::channel_shuffle(Tensor self, SymInt groups) -> Tensor" }, { - "name": "aten::cummax.out(Tensor self, int dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64" } - ], - "outputs": [ - { "name": "values", "type": "Tensor" }, - { "name": "indices", "type": "Tensor" } - ] + "name": "aten::channel_shuffle.out(Tensor self, SymInt groups, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::cummaxmin_backward(Tensor grad, Tensor input, Tensor indices, int dim) -> Tensor", - "inputs": [ - { "name": "grad", "type": "Tensor" }, - { "name": "input", "type": "Tensor" }, - { "name": "indices", "type": "Tensor" }, - { "name": "dim", "type": "int64" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::chunk(Tensor(a -> *) self, int chunks, int dim=0) -> Tensor(a)[]" }, { - "name": "aten::cumsum(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64" }, - { "name": "dtype", "type": "ScalarType?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::clamp(Tensor self, Scalar? min=None, Scalar? max=None) -> Tensor" }, { - "name": "aten::cumsum.dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "Dimname" }, - { "name": "dtype", "type": "ScalarType?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::clamp.Tensor(Tensor self, Tensor? min=None, Tensor? max=None) -> Tensor" }, { - "name": "aten::cumsum.dimname_out(Tensor self, Dimname dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "Dimname" }, - { "name": "dtype", "type": "ScalarType?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::clamp.Tensor_out(Tensor self, Tensor? min=None, Tensor? max=None, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::cumsum.out(Tensor self, int dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64" }, - { "name": "dtype", "type": "ScalarType?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::clamp.out(Tensor self, Scalar? min=None, Scalar? max=None, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::cumsum_(Tensor(a!) self, int dim, *, ScalarType? dtype=None) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64" }, - { "name": "dtype", "type": "ScalarType?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::clamp_(Tensor(a!) self, Scalar? min=None, Scalar? max=None) -> Tensor(a!)" }, { - "name": "aten::cumsum_.dimname(Tensor(a!) self, Dimname dim, *, ScalarType? dtype=None) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "Dimname" }, - { "name": "dtype", "type": "ScalarType?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::clamp_.Tensor(Tensor(a!) self, Tensor? min=None, Tensor? max=None) -> Tensor(a!)" }, { - "name": "aten::dequantize.any(Any tensors) -> Any", - "category": "Quantization", - "inputs": [ - { "name": "tensors", "type": "Any" } - ], - "outputs": [ - { "type": "Any" } - ] + "name": "aten::clamp_max(Tensor self, Scalar max) -> Tensor" }, { - "name": "aten::dequantize.list(Tensor[] qtensors) -> Tensor[]", - "category": "Quantization", - "inputs": [ - { "name": "qtensors", "type": "Tensor[]" } - ], - "outputs": [ - { "type": "Tensor[]" } - ] + "name": "aten::clamp_max.Tensor(Tensor self, Tensor max) -> Tensor" }, { - "name": "aten::dequantize.self(Tensor self) -> Tensor", - "category": "Quantization", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::clamp_max.Tensor_out(Tensor self, Tensor max, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::dequantize.tensor(Tensor qtensor) -> Tensor", - "category": "Quantization", - "inputs": [ - { "name": "qtensor", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::clamp_max.out(Tensor self, Scalar max, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::dequantize.tensors(Tensor[] tensors) -> Tensor[]", - "category": "Quantization", - "inputs": [ - { "name": "tensors", "type": "Tensor[]" } - ], - "outputs": [ - { "type": "Tensor[]" } - ] + "name": "aten::clamp_max_(Tensor(a!) self, Scalar max) -> Tensor(a!)" }, { - "name": "aten::detach(Tensor(a) self) -> Tensor(a)", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::clamp_max_.Tensor(Tensor(a!) self, Tensor max) -> Tensor(a!)" }, { - "name": "aten::detach_(Tensor(a!) self) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::clamp_min(Tensor self, Scalar min) -> Tensor" }, { - "name": "aten::detach_copy(Tensor self) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::clamp_min.Tensor(Tensor self, Tensor min) -> Tensor" }, { - "name": "aten::device(str a) -> Device", - "inputs": [ - { "name": "a", "type": "string" } - ], - "outputs": [ - { "type": "Device" } - ] + "name": "aten::clamp_min.Tensor_out(Tensor self, Tensor min, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::device.with_index(str type, int index) -> Device", - "inputs": [ - { "name": "type", "type": "string" }, - { "name": "index", "type": "int64" } - ], - "outputs": [ - { "type": "Device" } - ] + "name": "aten::clamp_min.out(Tensor self, Scalar min, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::diag(Tensor self, int diagonal=0) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "diagonal", "type": "int64", "default": 0 } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::clamp_min_(Tensor(a!) self, Scalar min) -> Tensor(a!)" }, { - "name": "aten::diag.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "diagonal", "type": "int64", "default": 0 } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::clamp_min_.Tensor(Tensor(a!) self, Tensor min) -> Tensor(a!)" }, { - "name": "aten::diag_embed(Tensor self, int offset=0, int dim1=-2, int dim2=-1) -> Tensor", + "name": "aten::classes._nnapi.Compilation", "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "offset", "type": "int64", "default": 0 }, - { "name": "dim1", "type": "int64", "default": -2 }, - { "name": "dim2", "type": "int64", "default": -1 } + { "name": "serialized_model", "type": "Tensor" }, + { "name": "inputs", "type": "Tensor[]" }, + { "name": "parameter_buffers", "type": "Tensor[]" } ], "outputs": [ - { "type": "Tensor" } + { "type": "Tensor[]" } ] }, { - "name": "aten::diagflat(Tensor self, int offset=0) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "offset", "type": "int64", "default": 0 } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::clip(Tensor self, Scalar? min=None, Scalar? max=None) -> Tensor" }, { - "name": "aten::diagonal(Tensor(a) self, int offset=0, int dim1=0, int dim2=1) -> Tensor(a)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "offset", "type": "int64", "default": 0 }, - { "name": "dim1", "type": "int64", "default": 0 }, - { "name": "dim2", "type": "int64", "default": 1 } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::clip.Tensor(Tensor self, Tensor? min=None, Tensor? max=None) -> Tensor" }, { - "name": "aten::diagonal.Dimname(Tensor(a) self, *, Dimname outdim, Dimname dim1, Dimname dim2, int offset=0) -> Tensor(a)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "outdim", "type": "Dimname", "kwarg_only": true }, - { "name": "dim1", "type": "Dimname", "kwarg_only": true }, - { "name": "dim2", "type": "Dimname", "kwarg_only": true }, - { "name": "offset", "type": "int64", "default": 0, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::clip.Tensor_out(Tensor self, Tensor? min=None, Tensor? max=None, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::diagonal_backward(Tensor grad_output, SymInt[] input_sizes, int offset, int dim1, int dim2) -> Tensor", - "inputs": [ - { "name": "grad_output", "type": "Tensor" }, - { "name": "input_sizes", "type": "SymInt[]" }, - { "name": "offset", "type": "int64" }, - { "name": "dim1", "type": "int64" }, - { "name": "dim2", "type": "int64" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::clip.out(Tensor self, Scalar? min=None, Scalar? max=None, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::diagonal_copy(Tensor self, int offset=0, int dim1=0, int dim2=1) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "offset", "type": "int64", "default": 0 }, - { "name": "dim1", "type": "int64", "default": 0 }, - { "name": "dim2", "type": "int64", "default": 1 } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::clip_(Tensor(a!) self, Scalar? min=None, Scalar? max=None) -> Tensor(a!)" }, { - "name": "aten::diagonal_scatter(Tensor self, Tensor src, int offset=0, int dim1=0, int dim2=1) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "src", "type": "Tensor" }, - { "name": "offset", "type": "int64", "default": 0 }, - { "name": "dim1", "type": "int64", "default": 0 }, - { "name": "dim2", "type": "int64", "default": 1 } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::clip_.Tensor(Tensor(a!) self, Tensor? min=None, Tensor? max=None) -> Tensor(a!)" }, { - "name": "aten::dict() -> Dict(str, Tensor)", - "inputs": [], - "outputs": [ - { "type": "Dict(string, Tensor)" } - ] + "name": "aten::clone(Tensor self, *, MemoryFormat? memory_format=None) -> Tensor" }, { - "name": "aten::dict.Dict_Tensor(Dict(Tensor, t)(a) self) -> Dict(Tensor, t)", - "inputs": [ - { "name": "self", "type": "Dict(Tensor, t)" } - ], - "outputs": [ - { "type": "Dict(Tensor, t)" } - ] + "name": "aten::clone.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::dict.Dict_bool(Dict(bool, t)(a) self) -> Dict(bool, t)", - "inputs": [ - { "name": "self", "type": "Dict(boolean, t)" } - ], - "outputs": [ - { "type": "Dict(boolean, t)" } - ] + "name": "aten::coalesce(Tensor(a) self) -> Tensor(a)" }, { - "name": "aten::dict.Dict_complex(Dict(complex, t)(a) self) -> Dict(complex, t)", - "inputs": [ - { "name": "self", "type": "Dict(complex, t)" } - ], - "outputs": [ - { "type": "Dict(complex, t)" } - ] + "name": "aten::col2im(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor" }, { - "name": "aten::dict.Dict_float(Dict(float, t)(a) self) -> Dict(float, t)", - "inputs": [ - { "name": "self", "type": "Dict(float32, t)" } - ], - "outputs": [ - { "type": "Dict(float32, t)" } - ] + "name": "aten::col2im.out(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::dict.Dict_int(Dict(int, t)(a) self) -> Dict(int, t)", - "inputs": [ - { "name": "self", "type": "Dict(int64, t)" } - ], - "outputs": [ - { "type": "Dict(int64, t)" } - ] + "name": "aten::column_stack(Tensor[] tensors) -> Tensor" }, { - "name": "aten::dict.Dict_str(Dict(str, t)(a) self) -> Dict(str, t)", - "inputs": [ - { "name": "self", "type": "Dict(string, t)" } - ], - "outputs": [ - { "type": "Dict(string, t)" } - ] + "name": "aten::column_stack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::dict.Tensor((Tensor, tVal)[] inputs) -> Dict(Tensor, tVal)", - "inputs": [ - { "name": "inputs", "type": "(Tensor, tVal)[]" } - ], - "outputs": [ - { "type": "Dict(Tensor, tVal)" } - ] + "name": "aten::complex(Tensor real, Tensor imag) -> Tensor" }, { - "name": "aten::dict.bool((bool, tVal)[] inputs) -> Dict(bool, tVal)", - "inputs": [ - { "name": "inputs", "type": "(boolean, tVal)[]" } - ], - "outputs": [ - { "type": "Dict(boolean, tVal)" } - ] + "name": "aten::complex.out(Tensor real, Tensor imag, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::dict.complex((complex, tVal)[] inputs) -> Dict(complex, tVal)", - "inputs": [ - { "name": "inputs", "type": "(complex, tVal)[]" } - ], - "outputs": [ - { "type": "Dict(complex, tVal)" } - ] + "name": "aten::concat(Tensor[] tensors, int dim=0) -> Tensor", + "category": "Tensor" }, { - "name": "aten::dict.float((float, tVal)[] inputs) -> Dict(float, tVal)", - "inputs": [ - { "name": "inputs", "type": "(float32, tVal)[]" } - ], - "outputs": [ - { "type": "Dict(float32, tVal)" } - ] + "name": "aten::concat.names(Tensor[] tensors, str dim) -> Tensor", + "category": "Tensor" }, { - "name": "aten::dict.int((int, tVal)[] inputs) -> Dict(int, tVal)", - "inputs": [ - { "name": "inputs", "type": "(int64, tVal)[]" } - ], - "outputs": [ - { "type": "Dict(int64, tVal)" } - ] + "name": "aten::concat.names_out(Tensor[] tensors, str dim, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::dict.str((str, tVal)[] inputs) -> Dict(str, tVal)", - "inputs": [ - { "name": "inputs", "type": "(string, tVal)[]" } - ], - "outputs": [ - { "type": "Dict(string, tVal)" } - ] + "name": "aten::concat.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::diff(Tensor self, int n=1, int dim=-1, Tensor? prepend=None, Tensor? append=None) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "n", "type": "int64", "default": 1 }, - { "name": "dim", "type": "int64", "default": -1 }, - { "name": "prepend", "type": "Tensor?", "default": null }, - { "name": "append", "type": "Tensor?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::concatenate(Tensor[] tensors, int dim=0) -> Tensor" }, { - "name": "aten::diff.out(Tensor self, int n=1, int dim=-1, Tensor? prepend=None, Tensor? append=None, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "n", "type": "int64", "default": 1 }, - { "name": "dim", "type": "int64", "default": -1 }, - { "name": "prepend", "type": "Tensor?", "default": null }, - { "name": "append", "type": "Tensor?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::concatenate.names(Tensor[] tensors, str dim) -> Tensor" }, { - "name": "aten::dim(Tensor self) -> int", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "int64" } - ] + "name": "aten::concatenate.names_out(Tensor[] tensors, str dim, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::dist(Tensor self, Tensor other, Scalar p=2) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" }, - { "name": "p", "type": "Scalar", "default": 2 } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::concatenate.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::div(Scalar a, Scalar b) -> float", - "inputs": [ - { "name": "a", "type": "Scalar" }, - { "name": "b", "type": "Scalar" } - ], - "outputs": [ - { "type": "float32" } - ] + "name": "aten::conj(Tensor(a) self) -> Tensor(a)" }, { - "name": "aten::div.Scalar(Tensor self, Scalar other) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Scalar" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::constant_pad_nd(Tensor self, SymInt[] pad, Scalar value=0) -> Tensor", + "category": "Tensor" }, { - "name": "aten::div.Scalar_mode(Tensor self, Scalar other, *, str? rounding_mode) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Scalar" }, - { "name": "rounding_mode", "type": "string?", "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::constant_pad_nd.out(Tensor self, SymInt[] pad, Scalar value=0, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::div.Scalar_mode_out(Tensor self, Scalar other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Scalar" }, - { "name": "rounding_mode", "type": "string?", "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::contiguous(Tensor(a) self, *, MemoryFormat memory_format=0) -> Tensor(a)" }, { - "name": "aten::div.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Scalar" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::conv1d(Tensor input, Tensor weight, Tensor? bias=None, SymInt[1] stride=[1], SymInt[1] padding=[0], SymInt[1] dilation=[1], SymInt groups=1) -> Tensor", + "category": "Layer" }, { - "name": "aten::div.Tensor(Tensor self, Tensor other) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::conv1d.padding(Tensor input, Tensor weight, Tensor? bias=None, SymInt[1] stride=[1], str padding=\"valid\", SymInt[1] dilation=[1], SymInt groups=1) -> Tensor", + "category": "Layer" }, { - "name": "aten::div.Tensor_mode(Tensor self, Tensor other, *, str? rounding_mode) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" }, - { "name": "rounding_mode", "type": "string?", "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::conv2d(Tensor input, Tensor weight, Tensor? bias=None, SymInt[2] stride=[1, 1], SymInt[2] padding=[0, 0], SymInt[2] dilation=[1, 1], SymInt groups=1) -> Tensor", + "category": "Layer" }, { - "name": "aten::div.complex(complex a, complex b) -> complex", - "inputs": [ - { "name": "a", "type": "complex" }, - { "name": "b", "type": "complex" } - ], - "outputs": [ - { "type": "complex" } - ] + "name": "aten::conv2d.padding(Tensor input, Tensor weight, Tensor? bias=None, SymInt[2] stride=[1, 1], str padding=\"valid\", SymInt[2] dilation=[1, 1], SymInt groups=1) -> Tensor", + "category": "Layer" }, { - "name": "aten::div.float(float a, float b) -> float", - "inputs": [ - { "name": "a", "type": "float32" }, - { "name": "b", "type": "float32" } - ], - "outputs": [ - { "type": "float32" } - ] + "name": "aten::conv3d(Tensor input, Tensor weight, Tensor? bias=None, SymInt[3] stride=[1, 1, 1], SymInt[3] padding=[0, 0, 0], SymInt[3] dilation=[1, 1, 1], SymInt groups=1) -> Tensor", + "category": "Layer" }, { - "name": "aten::div.int(int a, int b) -> float", - "inputs": [ - { "name": "a", "type": "int64" }, - { "name": "b", "type": "int64" } - ], - "outputs": [ - { "type": "float32" } - ] + "name": "aten::conv3d.padding(Tensor input, Tensor weight, Tensor? bias=None, SymInt[3] stride=[1, 1, 1], str padding=\"valid\", SymInt[3] dilation=[1, 1, 1], SymInt groups=1) -> Tensor", + "category": "Layer" }, { - "name": "aten::div.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::conv_transpose1d(Tensor input, Tensor weight, Tensor? bias=None, SymInt[1] stride=[1], SymInt[1] padding=[0], SymInt[1] output_padding=[0], SymInt groups=1, SymInt[1] dilation=[1]) -> Tensor", + "category": "Layer" }, { - "name": "aten::div.out_mode(Tensor self, Tensor other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" }, - { "name": "rounding_mode", "type": "string?", "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::conv_transpose2d.input(Tensor input, Tensor weight, Tensor? bias=None, SymInt[2] stride=[1, 1], SymInt[2] padding=[0, 0], SymInt[2] output_padding=[0, 0], SymInt groups=1, SymInt[2] dilation=[1, 1]) -> Tensor", + "category": "Layer" }, { - "name": "aten::div_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Scalar" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::conv_transpose3d.input(Tensor input, Tensor weight, Tensor? bias=None, SymInt[3] stride=[1, 1, 1], SymInt[3] padding=[0, 0, 0], SymInt[3] output_padding=[0, 0, 0], SymInt groups=1, SymInt[3] dilation=[1, 1, 1]) -> Tensor", + "category": "Layer" }, { - "name": "aten::div_.Scalar_mode(Tensor(a!) self, Scalar other, *, str? rounding_mode) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Scalar" }, - { "name": "rounding_mode", "type": "string?", "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::convolution(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups) -> Tensor", + "category": "Layer" }, { - "name": "aten::div_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::convolution.out(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::div_.Tensor_mode(Tensor(a!) self, Tensor other, *, str? rounding_mode) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" }, - { "name": "rounding_mode", "type": "string?", "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::convolution_backward(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool[3] output_mask) -> (Tensor, Tensor, Tensor)" }, { - "name": "aten::divide.Scalar(Tensor self, Scalar other) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Scalar" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::convolution_backward.out(Tensor grad_output, Tensor input, Tensor weight, SymInt[]? bias_sizes, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))" }, { - "name": "aten::divide.Scalar_mode(Tensor self, Scalar other, *, str? rounding_mode) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Scalar" }, - { "name": "rounding_mode", "type": "string?", "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::convolution_backward_overrideable(Tensor grad_output, Tensor input, Tensor weight, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool[3] output_mask) -> (Tensor grad_input, Tensor grad_weight, Tensor grad_bias)" }, { - "name": "aten::divide.Tensor(Tensor self, Tensor other) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::convolution_backward_overrideable.out(Tensor grad_output, Tensor input, Tensor weight, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))" }, { - "name": "aten::divide.Tensor_mode(Tensor self, Tensor other, *, str? rounding_mode) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" }, - { "name": "rounding_mode", "type": "string?", "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::convolution_overrideable(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups) -> Tensor" }, { - "name": "aten::divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::convolution_overrideable.out(Tensor input, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, bool transposed, SymInt[] output_padding, SymInt groups, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::divide.out_mode(Tensor self, Tensor other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" }, - { "name": "rounding_mode", "type": "string?", "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::copy_(Tensor(a!) self, Tensor src, bool non_blocking=False) -> Tensor(a!)" }, { - "name": "aten::divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Scalar" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::copy_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)" }, { - "name": "aten::divide_.Scalar_mode(Tensor(a!) self, Scalar other, *, str? rounding_mode) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Scalar" }, - { "name": "rounding_mode", "type": "string?", "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::copy_.float(Tensor(a!) self, float other) -> Tensor(a!)" }, { - "name": "aten::divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::copy_.int(Tensor(a!) self, int other) -> Tensor(a!)" }, { - "name": "aten::divide_.Tensor_mode(Tensor(a!) self, Tensor other, *, str? rounding_mode) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" }, - { "name": "rounding_mode", "type": "string?", "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::cos(Tensor self) -> Tensor" }, { - "name": "aten::divmod.float(float x, float y) -> (float, float)", - "inputs": [ - { "name": "x", "type": "float32" }, - { "name": "y", "type": "float32" } - ], - "outputs": [ - { "type": "float32" }, - { "type": "float32" } - ] + "name": "aten::cos.Scalar(Scalar a) -> Scalar" }, { - "name": "aten::divmod.int(int x, int y) -> (int, int)", - "inputs": [ - { "name": "x", "type": "int64" }, - { "name": "y", "type": "int64" } - ], - "outputs": [ - { "type": "int64" }, - { "type": "int64" } - ] + "name": "aten::cos.complex(complex a) -> complex" }, { - "name": "aten::dot(Tensor self, Tensor tensor) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "tensor", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::cos.float(float a) -> float" }, { - "name": "aten::dot.out(Tensor self, Tensor tensor, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "tensor", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::cos.int(int a) -> float" }, { - "name": "aten::dropout(Tensor input, float p, bool train) -> Tensor", - "category": "Dropout", - "inputs": [ - { "name": "input", "type": "Tensor" }, - { "name": "p", "type": "float32", "default": 0.5 }, - { "name": "train", "type": "boolean", "visible": false } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::cos.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)", - "category": "Dropout", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "p", "type": "float32", "default": 0.5 }, - { "name": "train", "type": "boolean", "visible": false } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::cosh(Tensor self) -> Tensor" }, { - "name": "aten::einsum(str equation, Tensor[] tensors, *, int[]? path=None) -> Tensor", - "inputs": [ - { "name": "equation", "type": "string" }, - { "name": "tensors", "type": "Tensor[]" }, - { "name": "path", "type": "int64[]?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::cosh.Scalar(Scalar a) -> Scalar" }, { - "name": "aten::einsum.sublist(Tensor a, ...) -> Tensor", - "inputs": [ - { "name": "a", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::cosh.complex(complex a) -> complex" }, { - "name": "aten::elu(Tensor self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1) -> Tensor", - "category": "Activation", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "alpha", "type": "Scalar", "default": 1 }, - { "name": "scale", "type": "Scalar", "default": 1 }, - { "name": "input_scale", "type": "Scalar", "default": 1 } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::cosh.float(float a) -> float" }, { - "name": "aten::elu.out(Tensor self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "alpha", "type": "Scalar", "default": 1 }, - { "name": "scale", "type": "Scalar", "default": 1 }, - { "name": "input_scale", "type": "Scalar", "default": 1 } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::cosh.int(int a) -> float" }, { - "name": "aten::elu_(Tensor(a!) self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1) -> Tensor(a!)", - "category": "Activation", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "alpha", "type": "Scalar", "default": 1 }, - { "name": "scale", "type": "Scalar", "default": 1 }, - { "name": "input_scale", "type": "Scalar", "default": 1 } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::cosh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::embedding(Tensor weight, Tensor indices, SymInt padding_idx=-1, bool scale_grad_by_freq=False, bool sparse=False) -> Tensor", - "category": "Transform", - "inputs": [ - { "name": "weight", "type": "Tensor" }, - { "name": "indices", "type": "Tensor" }, - { "name": "padding_idx", "type": "SymInt", "default": -1 }, - { "name": "scale_grad_by_freq", "type": "boolean", "default": false }, - { "name": "sparse", "type": "boolean", "default": false } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::cosine_similarity(Tensor x1, Tensor x2, int dim=1, float eps=1e-08) -> Tensor" }, { - "name": "aten::embedding_bag(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False) -> (Tensor, Tensor, Tensor, Tensor)", - "category": "Transform", - "inputs": [ - { "name": "weight", "type": "Tensor" }, - { "name": "indices", "type": "Tensor" }, - { "name": "offsets", "type": "Tensor" }, - { "name": "scale_grad_by_freq", "type": "boolean", "default": false }, - { "name": "mode", "type": "int64", "default": 0 }, - { "name": "sparse", "type": "boolean", "default": false }, - { "name": "per_sample_weights", "type": "Tensor?", "default": null }, - { "name": "include_last_offset", "type": "boolean", "default": false } - ], - "outputs": [ - { "name": "output1", "type": "Tensor" }, - { "name": "output2", "type": "Tensor" }, - { "name": "output3", "type": "Tensor" }, - { "name": "output4", "type": "Tensor" } - ] + "name": "aten::count_nonzero(Tensor self, int? dim=None) -> Tensor" }, { - "name": "aten::embedding_bag.padding_idx(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq, int mode, bool sparse, Tensor? per_sample_weights, bool include_last_offset, int? padding_idx) -> (Tensor, Tensor, Tensor, Tensor)", - "inputs": [ - { "name": "weight", "type": "Tensor" }, - { "name": "indices", "type": "Tensor" }, - { "name": "offsets", "type": "Tensor" }, - { "name": "scale_grad_by_freq", "type": "boolean" }, - { "name": "mode", "type": "int64" }, - { "name": "sparse", "type": "boolean" }, - { "name": "per_sample_weights", "type": "Tensor?" }, - { "name": "include_last_offset", "type": "boolean" }, - { "name": "padding_idx", "type": "int64?" } - ], - "outputs": [ - { "type": "Tensor" }, - { "type": "Tensor" }, - { "type": "Tensor" }, - { "type": "Tensor" } - ] + "name": "aten::count_nonzero.dim_IntList(Tensor self, int[] dim) -> Tensor" }, { - "name": "aten::embedding_renorm_(Tensor(a!) self, Tensor indices, float max_norm, float norm_type) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "indices", "type": "Tensor" }, - { "name": "max_norm", "type": "float32" }, - { "name": "norm_type", "type": "float32" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::count_nonzero.dim_IntList_out(Tensor self, int[] dim, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::empty.memory_format(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor", - "inputs": [ - { "name": "size", "type": "SymInt[]" }, - { "name": "dtype", "type": "ScalarType?", "default": null, "kwarg_only": true }, - { "name": "layout", "type": "Layout?", "default": null, "kwarg_only": true }, - { "name": "device", "type": "Device?", "default": null, "kwarg_only": true }, - { "name": "pin_memory", "type": "boolean?", "default": null, "kwarg_only": true }, - { "name": "memory_format", "type": "MemoryFormat?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::count_nonzero.out(Tensor self, int? dim=None, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::empty.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor", - "inputs": [ - { "name": "size", "type": "int64[]" }, - { "name": "names", "type": "Dimname[]?", "default": null, "kwarg_only": true }, - { "name": "dtype", "type": "ScalarType?", "default": null, "kwarg_only": true }, - { "name": "layout", "type": "Layout?", "default": null, "kwarg_only": true }, - { "name": "device", "type": "Device?", "default": null, "kwarg_only": true }, - { "name": "pin_memory", "type": "boolean?", "default": null, "kwarg_only": true }, - { "name": "memory_format", "type": "MemoryFormat?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::cpu(Tensor(a) self) -> Tensor(a|b)" }, { - "name": "aten::empty.out(SymInt[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "size", "type": "SymInt[]" }, - { "name": "memory_format", "type": "MemoryFormat?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::cross(Tensor self, Tensor other, int? dim=None) -> Tensor" }, { - "name": "aten::empty_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dtype", "type": "ScalarType?", "default": null, "kwarg_only": true }, - { "name": "layout", "type": "Layout?", "default": null, "kwarg_only": true }, - { "name": "device", "type": "Device?", "default": null, "kwarg_only": true }, - { "name": "pin_memory", "type": "boolean?", "default": null, "kwarg_only": true }, - { "name": "memory_format", "type": "MemoryFormat?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::cross.out(Tensor self, Tensor other, int? dim=None, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::eq(Scalar a, Scalar b) -> bool", - "inputs": [ - { "name": "a", "type": "Scalar" }, - { "name": "b", "type": "Scalar" } - ], - "outputs": [ - { "type": "boolean" } - ] + "name": "aten::cross_entropy_loss(Tensor self, Tensor target, Tensor? weight=None, int reduction=1, SymInt ignore_index=-100, float label_smoothing=0.) -> Tensor" }, { - "name": "aten::eq.Scalar(Tensor self, Scalar other) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Scalar" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::ctc_loss.IntList(Tensor log_probs, Tensor targets, int[] input_lengths, int[] target_lengths, int blank=0, int reduction=1, bool zero_infinity=False) -> Tensor" }, { - "name": "aten::eq.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Scalar" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::ctc_loss.Tensor(Tensor log_probs, Tensor targets, Tensor input_lengths, Tensor target_lengths, int blank=0, int reduction=1, bool zero_infinity=False) -> Tensor" }, { - "name": "aten::eq.Tensor(Tensor self, Tensor other) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::cudnn_convolution_add_relu(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups) -> Tensor" }, { - "name": "aten::eq.Tensor_list(Tensor[] a, Tensor[] b) -> bool", - "inputs": [ - { "name": "a", "type": "Tensor[]" }, - { "name": "b", "type": "Tensor[]" } - ], - "outputs": [ - { "type": "boolean" } - ] + "name": "aten::cudnn_convolution_add_relu.out(Tensor self, Tensor weight, Tensor z, Scalar? alpha, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::eq.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::cudnn_convolution_relu(Tensor self, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups) -> Tensor" }, { - "name": "aten::eq.bool(bool a, bool b) -> bool", - "inputs": [ - { "name": "a", "type": "boolean" }, - { "name": "b", "type": "boolean" } - ], - "outputs": [ - { "type": "boolean" } - ] + "name": "aten::cudnn_convolution_relu.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] stride, SymInt[] padding, SymInt[] dilation, SymInt groups, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::eq.bool_list(bool[] a, bool[] b) -> bool", - "inputs": [ - { "name": "a", "type": "boolean[]" }, - { "name": "b", "type": "boolean[]" } - ], - "outputs": [ - { "type": "boolean" } - ] + "name": "aten::cummax(Tensor self, int dim) -> (Tensor values, Tensor indices)" }, { - "name": "aten::eq.complex(complex a, complex b) -> bool", - "inputs": [ - { "name": "a", "type": "complex" }, - { "name": "b", "type": "complex" } - ], - "outputs": [ - { "type": "boolean" } - ] + "name": "aten::cummax.dimname(Tensor self, str dim) -> (Tensor values, Tensor indices)" }, { - "name": "aten::eq.complex_float(complex a, float b) -> bool", - "inputs": [ - { "name": "a", "type": "complex" }, - { "name": "b", "type": "float32" } - ], - "outputs": [ - { "type": "boolean" } - ] + "name": "aten::cummax.dimname_out(Tensor self, str dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)" }, { - "name": "aten::eq.device(Device a, Device b) -> bool", - "inputs": [ - { "name": "a", "type": "Device" }, - { "name": "b", "type": "Device" } - ], - "outputs": [ - { "type": "boolean" } - ] + "name": "aten::cummax.out(Tensor self, int dim, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)" }, { - "name": "aten::eq.enum(AnyEnumType a, AnyEnumType b) -> bool", - "inputs": [ - { "name": "a", "type": "AnyEnumType" }, - { "name": "b", "type": "AnyEnumType" } - ], - "outputs": [ - { "type": "boolean" } - ] + "name": "aten::cummaxmin_backward(Tensor grad, Tensor input, Tensor indices, int dim) -> Tensor" }, { - "name": "aten::eq.float(float a, float b) -> bool", - "inputs": [ - { "name": "a", "type": "float32" }, - { "name": "b", "type": "float32" } - ], - "outputs": [ - { "type": "boolean" } - ] + "name": "aten::cumsum(Tensor self, int dim, *, ScalarType? dtype=None) -> Tensor" }, { - "name": "aten::eq.float_complex(float a, complex b) -> bool", - "inputs": [ - { "name": "a", "type": "float32" }, - { "name": "b", "type": "complex" } - ], - "outputs": [ - { "type": "boolean" } - ] + "name": "aten::cumsum.dimname(Tensor self, str dim, *, ScalarType? dtype=None) -> Tensor" }, { - "name": "aten::eq.float_int(float a, int b) -> bool", - "inputs": [ - { "name": "a", "type": "float32" }, - { "name": "b", "type": "int64" } - ], - "outputs": [ - { "type": "boolean" } - ] + "name": "aten::cumsum.dimname_out(Tensor self, str dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::eq.float_list(float[] a, float[] b) -> bool", - "inputs": [ - { "name": "a", "type": "float32[]" }, - { "name": "b", "type": "float32[]" } - ], - "outputs": [ - { "type": "boolean" } - ] + "name": "aten::cumsum.out(Tensor self, int dim, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::eq.int(int a, int b) -> bool", - "inputs": [ - { "name": "a", "type": "int64" }, - { "name": "b", "type": "int64" } - ], - "outputs": [ - { "type": "boolean" } - ] + "name": "aten::cumsum_(Tensor(a!) self, int dim, *, ScalarType? dtype=None) -> Tensor(a!)" }, { - "name": "aten::eq.int_float(int a, float b) -> bool", - "inputs": [ - { "name": "a", "type": "int64" }, - { "name": "b", "type": "float32" } - ], - "outputs": [ - { "type": "boolean" } - ] + "name": "aten::cumsum_.dimname(Tensor(a!) self, str dim, *, ScalarType? dtype=None) -> Tensor(a!)" }, { - "name": "aten::eq.int_list(int[] a, int[] b) -> bool", - "inputs": [ - { "name": "a", "type": "int64[]" }, - { "name": "b", "type": "int64[]" } - ], - "outputs": [ - { "type": "boolean" } - ] + "name": "aten::dequantize.any(Any tensors) -> Any", + "category": "Quantization" }, { - "name": "aten::eq.str(str a, str b) -> bool", - "inputs": [ - { "name": "a", "type": "string" }, - { "name": "b", "type": "string" } - ], - "outputs": [ - { "type": "boolean" } - ] + "name": "aten::dequantize.list(Tensor[] qtensors) -> Tensor[]", + "category": "Quantization" }, { - "name": "aten::eq.str_list(str[] a, str[] b) -> bool", - "inputs": [ - { "name": "a", "type": "string[]" }, - { "name": "b", "type": "string[]" } - ], - "outputs": [ - { "type": "boolean" } - ] + "name": "aten::dequantize.self(Tensor self) -> Tensor", + "category": "Quantization" }, { - "name": "aten::equal(Tensor self, Tensor other) -> bool", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "boolean" } - ] + "name": "aten::dequantize.self_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::erf(Tensor self) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::dequantize.tensor(Tensor qtensor) -> Tensor", + "category": "Quantization" }, { - "name": "aten::erf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::dequantize.tensors(Tensor[] tensors) -> Tensor[]", + "category": "Quantization" }, { - "name": "aten::erfc(Tensor self) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::dequantize.tensors_out(Tensor[] tensors, *, Tensor(a!)[] out) -> ()" }, { - "name": "aten::erfc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::detach(Tensor(a) self) -> Tensor(a)" }, { - "name": "aten::exp(Tensor self) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::detach_(Tensor(a!) self) -> Tensor(a!)" }, { - "name": "aten::exp.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::detach_copy(Tensor self) -> Tensor" }, { - "name": "aten::exp_(Tensor(a!) self) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::detach_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::expand(Tensor(a) self, SymInt[] size, *, bool implicit=False) -> Tensor(a)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "size", "type": "SymInt[]" }, - { "name": "implicit", "type": "boolean", "default": false, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::device(str a) -> Device" }, { - "name": "aten::expand_as(Tensor(a) self, Tensor other) -> Tensor(a)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::device.with_index(str type, int index) -> Device" }, { - "name": "aten::expm1(Tensor self) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::diag(Tensor self, int diagonal=0) -> Tensor" }, { - "name": "aten::expm1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::diag.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::expm1_(Tensor(a!) self) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::diag_embed(Tensor self, int offset=0, int dim1=-2, int dim2=-1) -> Tensor" }, { - "name": "aten::exponential_(Tensor(a!) self, float lambd=1, *, Generator? generator=None) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "lambd", "type": "float32", "default": 1 }, - { "name": "generator", "type": "Generator?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::diag_embed.out(Tensor self, int offset=0, int dim1=-2, int dim2=-1, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::extend.t(t[](a!) self, t[] other) -> ()", - "inputs": [ - { "name": "self", "type": "t[]" }, - { "name": "other", "type": "t[]" } - ], - "outputs": [] + "name": "aten::diagflat(Tensor self, int offset=0) -> Tensor" }, { - "name": "aten::eye(SymInt n, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", - "inputs": [ - { "name": "n", "type": "SymInt" }, - { "name": "dtype", "type": "ScalarType?", "default": null, "kwarg_only": true }, - { "name": "layout", "type": "Layout?", "default": null, "kwarg_only": true }, - { "name": "device", "type": "Device?", "default": null, "kwarg_only": true }, - { "name": "pin_memory", "type": "boolean?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::diagonal(Tensor(a) self, int offset=0, int dim1=0, int dim2=1) -> Tensor(a)" }, { - "name": "aten::eye.m(SymInt n, SymInt m, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", - "inputs": [ - { "name": "n", "type": "SymInt" }, - { "name": "m", "type": "SymInt" }, - { "name": "dtype", "type": "ScalarType?", "default": null, "kwarg_only": true }, - { "name": "layout", "type": "Layout?", "default": null, "kwarg_only": true }, - { "name": "device", "type": "Device?", "default": null, "kwarg_only": true }, - { "name": "pin_memory", "type": "boolean?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::diagonal.Dimname(Tensor(a) self, *, str outdim, str dim1, str dim2, int offset=0) -> Tensor(a)" }, { - "name": "aten::eye.m_out(SymInt n, SymInt m, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "n", "type": "SymInt" }, - { "name": "m", "type": "SymInt" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::diagonal_backward(Tensor grad_output, SymInt[] input_sizes, int offset, int dim1, int dim2) -> Tensor" }, { - "name": "aten::eye.out(SymInt n, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "n", "type": "SymInt" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::diagonal_backward.out(Tensor grad_output, SymInt[] input_sizes, int offset, int dim1, int dim2, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::fake_quantize_per_channel_affine(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "scale", "type": "Tensor" }, - { "name": "zero_point", "type": "Tensor" }, - { "name": "axis", "type": "int64" }, - { "name": "quant_min", "type": "int64" }, - { "name": "quant_max", "type": "int64" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::diagonal_copy(Tensor self, int offset=0, int dim1=0, int dim2=1) -> Tensor" }, { - "name": "aten::fake_quantize_per_tensor_affine(Tensor self, float scale, int zero_point, int quant_min, int quant_max) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "scale", "type": "float32" }, - { "name": "zero_point", "type": "int64" }, - { "name": "quant_min", "type": "int64" }, - { "name": "quant_max", "type": "int64" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::diagonal_copy.out(Tensor self, int offset=0, int dim1=0, int dim2=1, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::fake_quantize_per_tensor_affine.tensor_qparams(Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "scale", "type": "Tensor" }, - { "name": "zero_point", "type": "Tensor" }, - { "name": "quant_min", "type": "int64" }, - { "name": "quant_max", "type": "int64" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::diagonal_scatter(Tensor self, Tensor src, int offset=0, int dim1=0, int dim2=1) -> Tensor" }, { - "name": "aten::fake_quantize_per_tensor_affine_cachemask(Tensor self, float scale, int zero_point, int quant_min, int quant_max) -> (Tensor output, Tensor mask)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "scale", "type": "float32" }, - { "name": "zero_point", "type": "int64" }, - { "name": "quant_min", "type": "int64" }, - { "name": "quant_max", "type": "int64" } - ], - "outputs": [ - { "name": "output", "type": "Tensor" }, - { "name": "mask", "type": "Tensor" } - ] + "name": "aten::diagonal_scatter.out(Tensor self, Tensor src, int offset=0, int dim1=0, int dim2=1, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::fake_quantize_per_tensor_affine_cachemask_backward(Tensor grad, Tensor mask) -> Tensor", - "inputs": [ - { "name": "grad", "type": "Tensor" }, - { "name": "mask", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::dict() -> Dict(str, Tensor)" }, { - "name": "aten::feature_alpha_dropout(Tensor input, float p, bool train) -> Tensor", - "category": "Dropout", - "inputs": [ - { "name": "input", "type": "Tensor" }, - { "name": "p", "type": "float32" }, - { "name": "train", "type": "boolean" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::dict.Dict_Tensor(Dict(Tensor, t)(a) self) -> Dict(Tensor, t)" }, { - "name": "aten::feature_alpha_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)", - "category": "Dropout", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "p", "type": "float32" }, - { "name": "train", "type": "boolean" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::dict.Dict_bool(Dict(bool, t)(a) self) -> Dict(bool, t)" }, { - "name": "aten::feature_dropout(Tensor input, float p, bool train) -> Tensor", - "category": "Dropout", - "inputs": [ - { "name": "input", "type": "Tensor" }, - { "name": "p", "type": "float32" }, - { "name": "train", "type": "boolean" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::dict.Dict_complex(Dict(complex, t)(a) self) -> Dict(complex, t)" }, { - "name": "aten::feature_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)", - "category": "Dropout", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "p", "type": "float32" }, - { "name": "train", "type": "boolean" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::dict.Dict_float(Dict(float, t)(a) self) -> Dict(float, t)" }, { - "name": "aten::fft(Tensor self, int signal_ndim, bool normalized=False) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "signal_ndim", "type": "int64" }, - { "name": "normalized", "type": "boolean", "default": false } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::dict.Dict_int(Dict(int, t)(a) self) -> Dict(int, t)" }, { - "name": "aten::fft_fft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "n", "type": "SymInt?", "default": null }, - { "name": "dim", "type": "int64", "default": -1 }, - { "name": "norm", "type": "string?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::dict.Dict_str(Dict(str, t)(a) self) -> Dict(str, t)" }, { - "name": "aten::fft_fft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "n", "type": "SymInt?", "default": null }, - { "name": "dim", "type": "int64", "default": -1 }, - { "name": "norm", "type": "string?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::dict.Tensor((Tensor, tVal)[] inputs) -> Dict(Tensor, tVal)" }, { - "name": "aten::fft_fft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "s", "type": "SymInt[1]?", "default": null }, - { "name": "dim", "type": "int64[1]", "default": [ -2, -1 ] }, - { "name": "norm", "type": "string?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::dict.bool((bool, tVal)[] inputs) -> Dict(bool, tVal)" }, { - "name": "aten::fft_fft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "s", "type": "SymInt[1]?", "default": null }, - { "name": "dim", "type": "int64[1]", "default": [ -2, -1 ] }, - { "name": "norm", "type": "string?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::dict.complex((complex, tVal)[] inputs) -> Dict(complex, tVal)" }, { - "name": "aten::fft_fftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "s", "type": "SymInt[1]?", "default": null }, - { "name": "dim", "type": "int64[1]?", "default": null }, - { "name": "norm", "type": "string?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::dict.float((float, tVal)[] inputs) -> Dict(float, tVal)" }, { - "name": "aten::fft_fftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "s", "type": "SymInt[1]?", "default": null }, - { "name": "dim", "type": "int64[1]?", "default": null }, - { "name": "norm", "type": "string?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::dict.int((int, tVal)[] inputs) -> Dict(int, tVal)" }, { - "name": "aten::fft_fftshift(Tensor self, int[1]? dim=None) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64[1]?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::dict.str((str, tVal)[] inputs) -> Dict(str, tVal)" }, { - "name": "aten::fft_hfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "s", "type": "SymInt[1]?", "default": null }, - { "name": "dim", "type": "int64[1]", "default": [ -2, -1 ] }, - { "name": "norm", "type": "string?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::diff(Tensor self, int n=1, int dim=-1, Tensor? prepend=None, Tensor? append=None) -> Tensor" }, { - "name": "aten::fft_hfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "s", "type": "SymInt[1]?", "default": null }, - { "name": "dim", "type": "int64[1]", "default": [ -2, -1 ] }, - { "name": "norm", "type": "string?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::diff.out(Tensor self, int n=1, int dim=-1, Tensor? prepend=None, Tensor? append=None, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::fft_hfftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "s", "type": "SymInt[1]?", "default": null }, - { "name": "dim", "type": "int64[1]?", "default": null }, - { "name": "norm", "type": "string?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::dim(Tensor self) -> int" }, { - "name": "aten::fft_hfftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "s", "type": "SymInt[1]?", "default": null }, - { "name": "dim", "type": "int64[1]?", "default": null }, - { "name": "norm", "type": "string?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::dist(Tensor self, Tensor other, Scalar p=2) -> Tensor" }, { - "name": "aten::fft_ifft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "n", "type": "SymInt?", "default": null }, - { "name": "dim", "type": "int64", "default": -1 }, - { "name": "norm", "type": "string?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::dist.out(Tensor self, Tensor other, Scalar p=2, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::fft_ifft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "n", "type": "SymInt?", "default": null }, - { "name": "dim", "type": "int64", "default": -1 }, - { "name": "norm", "type": "string?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::div(Scalar a, Scalar b) -> float" }, { - "name": "aten::fft_ifft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "s", "type": "SymInt[1]?", "default": null }, - { "name": "dim", "type": "int64[1]", "default": [ -2, -1 ] }, - { "name": "norm", "type": "string?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::div.Scalar(Tensor self, Scalar other) -> Tensor" }, { - "name": "aten::fft_ifft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "s", "type": "SymInt[1]?", "default": null }, - { "name": "dim", "type": "int64[1]", "default": [ -2, -1 ] }, - { "name": "norm", "type": "string?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::div.Scalar_mode(Tensor self, Scalar other, *, str? rounding_mode) -> Tensor" }, { - "name": "aten::fft_ifftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "s", "type": "SymInt[1]?", "default": null }, - { "name": "dim", "type": "int64[1]?", "default": null }, - { "name": "norm", "type": "string?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::div.Scalar_mode_out(Tensor self, Scalar other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::fft_ifftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "s", "type": "SymInt[1]?", "default": null }, - { "name": "dim", "type": "int64[1]?", "default": null }, - { "name": "norm", "type": "string?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::div.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::fft_ifftshift(Tensor self, int[1]? dim=None) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64[1]?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::div.Tensor(Tensor self, Tensor other) -> Tensor" }, { - "name": "aten::fft_ihfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "s", "type": "SymInt[1]?", "default": null }, - { "name": "dim", "type": "int64[1]", "default": [ -2, -1 ] }, - { "name": "norm", "type": "string?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::div.Tensor_mode(Tensor self, Tensor other, *, str? rounding_mode) -> Tensor" }, { - "name": "aten::fft_ihfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "s", "type": "SymInt[1]?", "default": null }, - { "name": "dim", "type": "int64[1]", "default": [ -2, -1 ] }, - { "name": "norm", "type": "string?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::div.complex(complex a, complex b) -> complex" }, { - "name": "aten::fft_ihfftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "s", "type": "SymInt[1]?", "default": null }, - { "name": "dim", "type": "int64[1]?", "default": null }, - { "name": "norm", "type": "string?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::div.float(float a, float b) -> float" }, { - "name": "aten::fft_ihfftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "s", "type": "SymInt[1]?", "default": null }, - { "name": "dim", "type": "int64[1]?", "default": null }, - { "name": "norm", "type": "string?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::div.int(int a, int b) -> float" }, { - "name": "aten::fft_irfft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "n", "type": "SymInt?", "default": null }, - { "name": "dim", "type": "int64", "default": -1 }, - { "name": "norm", "type": "string?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::div.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::fft_irfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "n", "type": "SymInt?", "default": null }, - { "name": "dim", "type": "int64", "default": -1 }, - { "name": "norm", "type": "string?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::div.out_mode(Tensor self, Tensor other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::fft_irfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "s", "type": "SymInt[1]?", "default": null }, - { "name": "dim", "type": "int64[1]", "default": [ -2, -1 ] }, - { "name": "norm", "type": "string?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::div_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)" }, { - "name": "aten::fft_irfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "s", "type": "SymInt[1]?", "default": null }, - { "name": "dim", "type": "int64[1]", "default": [ -2, -1 ] }, - { "name": "norm", "type": "string?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::div_.Scalar_mode(Tensor(a!) self, Scalar other, *, str? rounding_mode) -> Tensor(a!)" }, { - "name": "aten::fft_irfftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "s", "type": "SymInt[1]?", "default": null }, - { "name": "dim", "type": "int64[1]?", "default": null }, - { "name": "norm", "type": "string?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::div_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)" }, { - "name": "aten::fft_irfftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "s", "type": "SymInt[1]?", "default": null }, - { "name": "dim", "type": "int64[1]?", "default": null }, - { "name": "norm", "type": "string?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::div_.Tensor_mode(Tensor(a!) self, Tensor other, *, str? rounding_mode) -> Tensor(a!)" }, { - "name": "aten::fft_rfft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "n", "type": "SymInt?", "default": null }, - { "name": "dim", "type": "int64", "default": -1 }, - { "name": "norm", "type": "string?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::divide.Scalar(Tensor self, Scalar other) -> Tensor" }, { - "name": "aten::fft_rfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "n", "type": "SymInt?", "default": null }, - { "name": "dim", "type": "int64", "default": -1 }, - { "name": "norm", "type": "string?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::divide.Scalar_mode(Tensor self, Scalar other, *, str? rounding_mode) -> Tensor" }, { - "name": "aten::fft_rfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "s", "type": "SymInt[1]?", "default": null }, - { "name": "dim", "type": "int64[1]", "default": [ -2, -1 ] }, - { "name": "norm", "type": "string?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::divide.Tensor(Tensor self, Tensor other) -> Tensor" }, { - "name": "aten::fft_rfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2,-1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "s", "type": "SymInt[1]?", "default": null }, - { "name": "dim", "type": "int64[1]", "default": [ -2, -1 ] }, - { "name": "norm", "type": "string?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::divide.Tensor_mode(Tensor self, Tensor other, *, str? rounding_mode) -> Tensor" }, { - "name": "aten::fft_rfftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "s", "type": "SymInt[1]?", "default": null }, - { "name": "dim", "type": "int64[1]?", "default": null }, - { "name": "norm", "type": "string?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::fft_rfftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "s", "type": "SymInt[1]?", "default": null }, - { "name": "dim", "type": "int64[1]?", "default": null }, - { "name": "norm", "type": "string?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::divide.out_mode(Tensor self, Tensor other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::fill_.Scalar(Tensor(a!) self, Scalar value) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "value", "type": "Scalar" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)" }, { - "name": "aten::fill_.Tensor(Tensor(a!) self, Tensor value) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "value", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::divide_.Scalar_mode(Tensor(a!) self, Scalar other, *, str? rounding_mode) -> Tensor(a!)" }, { - "name": "aten::find(str self, str substr, int start=0, int end=-1) -> int", - "inputs": [ - { "name": "self", "type": "string" }, - { "name": "substr", "type": "string" }, - { "name": "start", "type": "int64", "default": 0 }, - { "name": "end", "type": "int64", "default": -1 } - ], - "outputs": [ - { "type": "int64" } - ] + "name": "aten::divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)" }, { - "name": "aten::flatten.DimnameList(Tensor(a) self, Dimname[] dims, Dimname out_dim) -> Tensor(a)", - "category": "Shape", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dims", "type": "Dimname[]" }, - { "name": "out_dim", "type": "Dimname" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::divide_.Tensor_mode(Tensor(a!) self, Tensor other, *, str? rounding_mode) -> Tensor(a!)" }, { - "name": "aten::flatten.named_out_dim(Tensor(a) self, int start_dim, int end_dim, Dimname out_dim) -> Tensor(a)", - "category": "Shape", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "start_dim", "type": "int64" }, - { "name": "end_dim", "type": "int64" }, - { "name": "out_dim", "type": "Dimname" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::divmod.float(float x, float y) -> (float, float)" }, { - "name": "aten::flatten.using_ints(Tensor(a) self, int start_dim=0, int end_dim=-1) -> Tensor(a)", - "category": "Shape", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "start_dim", "type": "int64", "default": 0 }, - { "name": "end_dim", "type": "int64", "default": -1 } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::divmod.float_int(float x, int y) -> (float, float)" }, { - "name": "aten::flatten.using_names(Tensor(a) self, Dimname start_dim, Dimname end_dim, Dimname out_dim) -> Tensor(a)", - "category": "Shape", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "start_dim", "type": "Dimname" }, - { "name": "end_dim", "type": "Dimname" }, - { "name": "out_dim", "type": "Dimname" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::divmod.int(int x, int y) -> (int, int)" }, { - "name": "aten::flip(Tensor self, int[] dims) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dims", "type": "int64[]" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::divmod.int_float(int x, float y) -> (float, float)" }, { - "name": "aten::floor(Tensor self) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::dot(Tensor self, Tensor tensor) -> Tensor" }, { - "name": "aten::floor.Scalar(Scalar a) -> Scalar", - "inputs": [ - { "name": "a", "type": "Scalar" } - ], - "outputs": [ - { "type": "Scalar" } - ] + "name": "aten::dot.out(Tensor self, Tensor tensor, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::floor.float(float a) -> int", - "inputs": [ - { "name": "a", "type": "float32" } - ], - "outputs": [ - { "type": "int64" } - ] + "name": "aten::dropout(Tensor input, float p, bool train) -> Tensor", + "category": "Dropout" }, { - "name": "aten::floor.int(int a) -> int", - "inputs": [ - { "name": "a", "type": "int64" } - ], - "outputs": [ - { "type": "int64" } - ] + "name": "aten::dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)", + "category": "Dropout" }, { - "name": "aten::floor.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::einsum(str equation, Tensor[] tensors, *, int[]? path=None) -> Tensor" }, { - "name": "aten::floor_(Tensor(a!) self) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::einsum.sublist(Tensor a, ...) -> Tensor" }, { - "name": "aten::floor_divide(Tensor self, Tensor other) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::elu(Tensor self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1) -> Tensor", + "category": "Activation" }, { - "name": "aten::floor_divide.Scalar(Tensor self, Scalar other) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Scalar" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::elu.out(Tensor self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::floor_divide.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Scalar" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::elu_(Tensor(a!) self, Scalar alpha=1, Scalar scale=1, Scalar input_scale=1) -> Tensor(a!)", + "category": "Activation" }, { - "name": "aten::floor_divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::embedding(Tensor weight, Tensor indices, SymInt padding_idx=-1, bool scale_grad_by_freq=False, bool sparse=False) -> Tensor", + "category": "Transform" }, { - "name": "aten::floor_divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Scalar" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::embedding.out(Tensor weight, Tensor indices, SymInt padding_idx=-1, bool scale_grad_by_freq=False, bool sparse=False, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::floor_divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::embedding_bag(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq=False, int mode=0, bool sparse=False, Tensor? per_sample_weights=None, bool include_last_offset=False) -> (Tensor, Tensor, Tensor, Tensor)", + "category": "Transform" }, { - "name": "aten::floordiv(Scalar a, Scalar b) -> Scalar", - "inputs": [ - { "name": "a", "type": "Scalar" }, - { "name": "b", "type": "Scalar" } - ], - "outputs": [ - { "type": "Scalar" } - ] + "name": "aten::embedding_bag.padding_idx(Tensor weight, Tensor indices, Tensor offsets, bool scale_grad_by_freq, int mode, bool sparse, Tensor? per_sample_weights, bool include_last_offset, int? padding_idx) -> (Tensor, Tensor, Tensor, Tensor)" }, { - "name": "aten::floordiv.float(float a, float b) -> float", - "inputs": [ - { "name": "a", "type": "float32" }, - { "name": "b", "type": "float32" } - ], - "outputs": [ - { "type": "float32" } - ] + "name": "aten::embedding_renorm_(Tensor(a!) self, Tensor indices, float max_norm, float norm_type) -> Tensor(a!)" }, { - "name": "aten::floordiv.float_int(float a, int b) -> float", - "inputs": [ - { "name": "a", "type": "float32" }, - { "name": "b", "type": "int64" } - ], - "outputs": [ - { "type": "float32" } - ] + "name": "aten::empty.memory_format(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor" }, { - "name": "aten::floordiv.int(int a, int b) -> int", - "inputs": [ - { "name": "a", "type": "int64" }, - { "name": "b", "type": "int64" } - ], - "outputs": [ - { "type": "int64" } - ] + "name": "aten::empty.names(int[] size, *, str[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor" }, { - "name": "aten::floordiv.int_float(int a, float b) -> float", - "inputs": [ - { "name": "a", "type": "int64" }, - { "name": "b", "type": "float32" } - ], - "outputs": [ - { "type": "float32" } - ] + "name": "aten::empty.names_out(int[] size, *, str[]? names, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::fmod(Scalar a, Scalar b) -> float", - "inputs": [ - { "name": "a", "type": "Scalar" }, - { "name": "b", "type": "Scalar" } - ], - "outputs": [ - { "type": "float32" } - ] + "name": "aten::empty.out(SymInt[] size, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::fmod.Scalar(Tensor self, Scalar other) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Scalar" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::empty_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor" }, { - "name": "aten::fmod.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Scalar" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::empty_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::fmod.Tensor(Tensor self, Tensor other) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::eq(Scalar a, Scalar b) -> bool" }, { - "name": "aten::fmod.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::eq.Scalar(Tensor self, Scalar other) -> Tensor" }, { - "name": "aten::fmod.float(float a, float b) -> float", - "inputs": [ - { "name": "a", "type": "float32" }, - { "name": "b", "type": "float32" } - ], - "outputs": [ - { "type": "float32" } - ] + "name": "aten::eq.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::fmod.float_int(float a, int b) -> float", - "inputs": [ - { "name": "a", "type": "float32" }, - { "name": "b", "type": "int64" } - ], - "outputs": [ - { "type": "float32" } - ] + "name": "aten::eq.Tensor(Tensor self, Tensor other) -> Tensor" }, { - "name": "aten::fmod.int(int a, int b) -> float", - "inputs": [ - { "name": "a", "type": "int64" }, - { "name": "b", "type": "int64" } - ], - "outputs": [ - { "type": "float32" } - ] + "name": "aten::eq.Tensor_list(Tensor[] a, Tensor[] b) -> bool" }, { - "name": "aten::fmod.int_float(int a, float b) -> float", - "inputs": [ - { "name": "a", "type": "int64" }, - { "name": "b", "type": "float32" } - ], - "outputs": [ - { "type": "float32" } - ] + "name": "aten::eq.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::format(str self, ...) -> str", - "is_vararg": true, - "inputs": [ - { "name": "self", "type": "string" } - ], - "outputs": [ - { "type": "string" } - ] + "name": "aten::eq.bool(bool a, bool b) -> bool" }, { - "name": "aten::frobenius_norm.dim(Tensor self, int[1] dim, bool keepdim=False) -> Tensor", - "category": "Normalization", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64[1]" }, - { "name": "keepdim", "type": "boolean", "default": false } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::eq.bool_list(bool[] a, bool[] b) -> bool" }, { - "name": "aten::frobenius_norm.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64[1]" }, - { "name": "keepdim", "type": "boolean", "default": false } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::eq.complex(complex a, complex b) -> bool" }, { - "name": "aten::full(SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", - "inputs": [ - { "name": "size", "type": "SymInt[]" }, - { "name": "fill_value", "type": "Scalar" }, - { "name": "dtype", "type": "ScalarType?", "default": null, "kwarg_only": true }, - { "name": "layout", "type": "Layout?", "default": null, "kwarg_only": true }, - { "name": "device", "type": "Device?", "default": null, "kwarg_only": true }, - { "name": "pin_memory", "type": "boolean?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::eq.complex_float(complex a, float b) -> bool" }, { - "name": "aten::full.names(int[] size, Scalar fill_value, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", - "inputs": [ - { "name": "size", "type": "int64[]" }, - { "name": "fill_value", "type": "Scalar" }, - { "name": "names", "type": "Dimname[]?", "kwarg_only": true }, - { "name": "dtype", "type": "ScalarType?", "default": null, "kwarg_only": true }, - { "name": "layout", "type": "Layout?", "default": null, "kwarg_only": true }, - { "name": "device", "type": "Device?", "default": null, "kwarg_only": true }, - { "name": "pin_memory", "type": "boolean?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::eq.device(Device a, Device b) -> bool" }, { - "name": "aten::full.out(SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "size", "type": "SymInt[]" }, - { "name": "fill_value", "type": "Scalar" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::eq.enum(AnyEnumType a, AnyEnumType b) -> bool" }, { - "name": "aten::full_like(Tensor self, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "fill_value", "type": "Scalar" }, - { "name": "dtype", "type": "ScalarType?", "default": null, "kwarg_only": true }, - { "name": "layout", "type": "Layout?", "default": null, "kwarg_only": true }, - { "name": "device", "type": "Device?", "default": null, "kwarg_only": true }, - { "name": "pin_memory", "type": "boolean?", "default": null, "kwarg_only": true }, - { "name": "memory_format", "type": "MemoryFormat?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::eq.float(float a, float b) -> bool" }, { - "name": "aten::fused_moving_avg_obs_fake_quant(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor(a!) running_min, Tensor(b!) running_max, Tensor(c!) scale, Tensor(d!) zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "observer_on", "type": "Tensor" }, - { "name": "fake_quant_on", "type": "Tensor" }, - { "name": "running_min", "type": "Tensor" }, - { "name": "running_max", "type": "Tensor" }, - { "name": "scale", "type": "Tensor" }, - { "name": "zero_point", "type": "Tensor" }, - { "name": "averaging_const", "type": "float32" }, - { "name": "quant_min", "type": "int64" }, - { "name": "quant_max", "type": "int64" }, - { "name": "ch_axis", "type": "int64" }, - { "name": "per_row_fake_quant", "type": "boolean", "default": false }, - { "name": "symmetric_quant", "type": "boolean", "default": false } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::eq.float_complex(float a, complex b) -> bool" }, { - "name": "aten::gather(Tensor self, int dim, Tensor index, *, bool sparse_grad=False) -> Tensor", - "category": "Transform", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64" }, - { "name": "index", "type": "Tensor" }, - { "name": "sparse_grad", "type": "boolean", "default": false, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::eq.float_int(float a, int b) -> bool" }, { - "name": "aten::gather.dimname(Tensor self, Dimname dim, Tensor index, *, bool sparse_grad=False) -> Tensor", - "category": "Transform", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "Dimname" }, - { "name": "index", "type": "Tensor" }, - { "name": "sparse_grad", "type": "boolean", "default": false, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::eq.float_list(float[] a, float[] b) -> bool" }, { - "name": "aten::gather.dimname_out(Tensor self, Dimname dim, Tensor index, *, bool sparse_grad=False, Tensor(a!) out) -> Tensor(a!)", - "category": "Transform", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "Dimname" }, - { "name": "index", "type": "Tensor" }, - { "name": "sparse_grad", "type": "boolean", "default": false, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::eq.int(int a, int b) -> bool" }, { - "name": "aten::gather.out(Tensor self, int dim, Tensor index, *, bool sparse_grad=False, Tensor(a!) out) -> Tensor(a!)", - "category": "Transform", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64" }, - { "name": "index", "type": "Tensor" }, - { "name": "sparse_grad", "type": "boolean", "default": false, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::eq.int_float(int a, float b) -> bool" }, { - "name": "aten::gather_backward(Tensor grad, Tensor self, int dim, Tensor index, bool sparse_grad) -> Tensor", - "inputs": [ - { "name": "grad", "type": "Tensor" }, - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64" }, - { "name": "index", "type": "Tensor" }, - { "name": "sparse_grad", "type": "boolean" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::eq.int_list(int[] a, int[] b) -> bool" }, { - "name": "aten::gcd(Tensor self, Tensor other) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::eq.str(str a, str b) -> bool" }, { - "name": "aten::gcd.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::eq.str_list(str[] a, str[] b) -> bool" }, { - "name": "aten::gcd_(Tensor(a!) self, Tensor other) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::equal(Tensor self, Tensor other) -> bool" }, { - "name": "aten::ge.Scalar(Tensor self, Scalar other) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Scalar" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::erf(Tensor self) -> Tensor" }, { - "name": "aten::ge.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Scalar" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::erf.Scalar(Scalar a) -> Scalar" }, { - "name": "aten::ge.Tensor(Tensor self, Tensor other) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::erf.float(float a) -> float" }, { - "name": "aten::ge.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::erf.int(int a) -> float" }, { - "name": "aten::ge_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Scalar" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::erf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::ge_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::erfc(Tensor self) -> Tensor" }, { - "name": "aten::gelu(Tensor self, *, str approximate='none') -> Tensor", - "category": "Activation", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "approximate", "type": "string", "default": "none", "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::erfc.Scalar(Scalar a) -> Scalar" }, { - "name": "aten::gelu.out(Tensor self, *, str approximate='none', Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "approximate", "type": "string", "default": "none", "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::erfc.float(float a) -> float" }, { - "name": "aten::gelu_(Tensor(a!) self, *, str approximate='none') -> Tensor(a!)", - "category": "Activation", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "approximate", "type": "string", "default": "none", "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::erfc.int(int a) -> float" }, { - "name": "aten::gelu_backward(Tensor grad_output, Tensor self, *, str approximate='none') -> Tensor", - "inputs": [ - { "name": "grad_output", "type": "Tensor" }, - { "name": "self", "type": "Tensor" }, - { "name": "approximate", "type": "string", "default": "none", "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::erfc.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::gelu_backward.grad_input(Tensor grad_output, Tensor self, *, str approximate='none', Tensor(a!) grad_input) -> Tensor(a!)", - "inputs": [ - { "name": "grad_output", "type": "Tensor" }, - { "name": "self", "type": "Tensor" }, - { "name": "approximate", "type": "string", "default": "none", "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::exp(Tensor self) -> Tensor" }, { - "name": "aten::geometric_(Tensor(a!) self, float p, *, Generator? generator=None) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "p", "type": "float32" }, - { "name": "generator", "type": "Generator?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::exp.Scalar(Scalar a) -> Scalar" }, { - "name": "aten::geqrf(Tensor self) -> (Tensor a, Tensor tau)", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "name": "a", "type": "Tensor" }, - { "name": "tau", "type": "Tensor" } - ] + "name": "aten::exp.complex(complex a) -> complex" }, { - "name": "aten::geqrf.a(Tensor self, *, Tensor(a!) a, Tensor(b!) tau) -> (Tensor(a!) a, Tensor(b!) tau)", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "name": "a", "type": "Tensor" }, - { "name": "tau", "type": "Tensor" } - ] + "name": "aten::exp.float(float a) -> float" }, { - "name": "aten::ger(Tensor self, Tensor vec2) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "vec2", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::exp.int(int a) -> float" }, { - "name": "aten::ger.out(Tensor self, Tensor vec2, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "vec2", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::exp.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::get.Tensor(Dict(Tensor, t) self, Tensor key) -> t(*)?", - "inputs": [ - { "name": "self", "type": "Dict(Tensor, t)" }, - { "name": "key", "type": "Tensor" } - ], - "outputs": [ - { "type": "t?" } - ] + "name": "aten::exp_(Tensor(a!) self) -> Tensor(a!)" }, { - "name": "aten::get.bool(Dict(bool, t) self, bool key) -> t(*)?", - "inputs": [ - { "name": "self", "type": "Dict(boolean, t)" }, - { "name": "key", "type": "boolean" } - ], - "outputs": [ - { "type": "t?" } - ] + "name": "aten::expand(Tensor(a) self, SymInt[] size, *, bool implicit=False) -> Tensor(a)" }, { - "name": "aten::get.complex(Dict(complex, t) self, complex key) -> t(*)?", - "inputs": [ - { "name": "self", "type": "Dict(complex, t)" }, - { "name": "key", "type": "complex" } - ], - "outputs": [ - { "type": "t?" } - ] + "name": "aten::expand_as(Tensor(a) self, Tensor other) -> Tensor(a)" }, { - "name": "aten::get.default_Tensor(Dict(Tensor, t) self, Tensor key, t default_value) -> t(*)", - "inputs": [ - { "name": "self", "type": "Dict(Tensor, t)" }, - { "name": "key", "type": "Tensor" }, - { "name": "default_value", "type": "t" } - ], - "outputs": [ - { "type": "t" } - ] + "name": "aten::expm1(Tensor self) -> Tensor" }, { - "name": "aten::get.default_bool(Dict(bool, t) self, bool key, t default_value) -> t(*)", - "inputs": [ - { "name": "self", "type": "Dict(boolean, t)" }, - { "name": "key", "type": "boolean" }, - { "name": "default_value", "type": "t" } - ], - "outputs": [ - { "type": "t" } - ] + "name": "aten::expm1.Scalar(Scalar a) -> Scalar" }, { - "name": "aten::get.default_complex(Dict(complex, t) self, complex key, t default_value) -> t(*)", - "inputs": [ - { "name": "self", "type": "Dict(complex, t)" }, - { "name": "key", "type": "complex" }, - { "name": "default_value", "type": "t" } - ], - "outputs": [ - { "type": "t" } - ] + "name": "aten::expm1.float(float a) -> float" }, { - "name": "aten::get.default_float(Dict(float, t) self, float key, t default_value) -> t(*)", - "inputs": [ - { "name": "self", "type": "Dict(float32, t)" }, - { "name": "key", "type": "float32" }, - { "name": "default_value", "type": "t" } - ], - "outputs": [ - { "type": "t" } - ] + "name": "aten::expm1.int(int a) -> float" }, { - "name": "aten::get.default_int(Dict(int, t) self, int key, t default_value) -> t(*)", - "inputs": [ - { "name": "self", "type": "Dict(int64, t)" }, - { "name": "key", "type": "int64" }, - { "name": "default_value", "type": "t" } - ], - "outputs": [ - { "type": "t" } - ] + "name": "aten::expm1.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::get.default_str(Dict(str, t) self, str key, t default_value) -> t(*)", - "inputs": [ - { "name": "self", "type": "Dict(string, t)" }, - { "name": "key", "type": "string" }, - { "name": "default_value", "type": "t" } - ], - "outputs": [ - { "type": "t" } - ] + "name": "aten::expm1_(Tensor(a!) self) -> Tensor(a!)" }, { - "name": "aten::get.float(Dict(float, t) self, float key) -> t(*)?", - "inputs": [ - { "name": "self", "type": "Dict(float32, t)" }, - { "name": "key", "type": "float32" } - ], - "outputs": [ - { "type": "t?" } - ] + "name": "aten::exponential_(Tensor(a!) self, float lambd=1., *, Generator? generator=None) -> Tensor(a!)" }, { - "name": "aten::get.int(Dict(int, t) self, int key) -> t(*)?", - "inputs": [ - { "name": "self", "type": "Dict(int64, t)" }, - { "name": "key", "type": "int64" } - ], - "outputs": [ - { "type": "t?" } - ] + "name": "aten::extend.t(t[](a!) self, t[] other) -> ()" }, { - "name": "aten::get.str(Dict(str, t) self, str key) -> t(*)?", - "inputs": [ - { "name": "self", "type": "Dict(string, t)" }, - { "name": "key", "type": "string" } - ], - "outputs": [ - { "type": "t?" } - ] + "name": "aten::eye(SymInt n, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor" }, { - "name": "aten::get_device(Tensor self) -> int", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "int64" } - ] + "name": "aten::eye.m(SymInt n, SymInt m, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor" }, { - "name": "aten::glu(Tensor self, int dim=-1) -> Tensor", - "category": "Activation", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64", "default": -1 } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::eye.m_out(SymInt n, SymInt m, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::glu.out(Tensor self, int dim=-1, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64", "default": -1 } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::eye.out(SymInt n, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::greater_equal.Scalar(Tensor self, Scalar other) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Scalar" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::fake_quantize_per_channel_affine(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max) -> Tensor" }, { - "name": "aten::greater_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Scalar" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::fake_quantize_per_tensor_affine(Tensor self, float scale, int zero_point, int quant_min, int quant_max) -> Tensor" }, { - "name": "aten::greater_equal.Tensor(Tensor self, Tensor other) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::fake_quantize_per_tensor_affine.tensor_qparams(Tensor self, Tensor scale, Tensor zero_point, int quant_min, int quant_max) -> Tensor" }, { - "name": "aten::greater_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::fake_quantize_per_tensor_affine_cachemask(Tensor self, float scale, int zero_point, int quant_min, int quant_max) -> (Tensor output, Tensor mask)" }, { - "name": "aten::greater_equal_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Scalar" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::fake_quantize_per_tensor_affine_cachemask.out(Tensor self, float scale, int zero_point, int quant_min, int quant_max, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))" }, { - "name": "aten::greater_equal_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::fake_quantize_per_tensor_affine_cachemask_backward(Tensor grad, Tensor mask) -> Tensor" }, { - "name": "aten::grid_sampler(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor", - "inputs": [ - { "name": "input", "type": "Tensor" }, - { "name": "grid", "type": "Tensor" }, - { "name": "interpolation_mode", "type": "int64" }, - { "name": "padding_mode", "type": "int64" }, - { "name": "align_corners", "type": "boolean" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::feature_alpha_dropout(Tensor input, float p, bool train) -> Tensor", + "category": "Dropout" }, { - "name": "aten::grid_sampler.legacy(Tensor input, Tensor grid, int interpolation_mode, int padding_mode) -> Tensor", - "inputs": [ - { "name": "input", "type": "Tensor" }, - { "name": "grid", "type": "Tensor" }, - { "name": "interpolation_mode", "type": "int64" }, - { "name": "padding_mode", "type": "int64" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::feature_alpha_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)", + "category": "Dropout" }, { - "name": "aten::group_norm(Tensor input, int num_groups, Tensor? weight=None, Tensor? bias=None, float eps=1e-05, bool cudnn_enabled=True) -> Tensor", - "category": "Normalization", - "inputs": [ - { "name": "input", "type": "Tensor" }, - { "name": "num_groups", "type": "int64" }, - { "name": "weight", "type": "Tensor?", "default": null }, - { "name": "bias", "type": "Tensor?", "default": null }, - { "name": "eps", "type": "float32", "default": 1e-05 }, - { "name": "cudnn_enabled", "type": "boolean", "visible": true, "default": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::feature_dropout(Tensor input, float p, bool train) -> Tensor", + "category": "Dropout" }, { - "name": "aten::gru.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor)", - "category": "Layer", - "inputs": [ - { "name": "data", "type": "Tensor" }, - { "name": "batch_sizes", "type": "Tensor" }, - { "name": "hx", "type": "Tensor" }, - { "name": "params", "type": "Tensor[]" }, - { "name": "has_biases", "type": "boolean" }, - { "name": "num_layers", "type": "int64" }, - { "name": "dropout", "type": "float32" }, - { "name": "train", "type": "boolean" }, - { "name": "bidirectional", "type": "boolean" } - ], - "outputs": [ - { "type": "Tensor" }, - { "name": "?", "type": "Tensor" } - ] + "name": "aten::feature_dropout_(Tensor(a!) self, float p, bool train) -> Tensor(a!)", + "category": "Dropout" }, { - "name": "aten::gru.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor)", - "category": "Layer", - "inputs": [ - { "name": "input", "type": "Tensor" }, - { "name": "hx", "type": "Tensor" }, - { "name": "params", "type": "Tensor[]" }, - { "name": "has_biases", "type": "boolean" }, - { "name": "num_layers", "type": "int64" }, - { "name": "dropout", "type": "float32" }, - { "name": "train", "type": "boolean" }, - { "name": "bidirectional", "type": "boolean" }, - { "name": "batch_first", "type": "boolean" } - ], - "outputs": [ - { "type": "Tensor" }, - { "name": "?", "type": "Tensor" } - ] + "name": "aten::fft(Tensor self, int signal_ndim, bool normalized=False) -> Tensor" }, { - "name": "aten::gt(Scalar a, Scalar b) -> bool", - "inputs": [ - { "name": "a", "type": "Scalar" }, - { "name": "b", "type": "Scalar" } - ], - "outputs": [ - { "type": "boolean" } - ] + "name": "aten::fft_fft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor" }, { - "name": "aten::gt.Scalar(Tensor self, Scalar other) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Scalar" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::fft_fft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::gt.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Scalar" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::fft_fft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2, -1], str? norm=None) -> Tensor" }, { - "name": "aten::gt.Tensor(Tensor self, Tensor other) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::fft_fft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2, -1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::gt.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::fft_fftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor" }, { - "name": "aten::gt.float(float a, float b) -> bool", - "inputs": [ - { "name": "a", "type": "float32" }, - { "name": "b", "type": "float32" } - ], - "outputs": [ - { "type": "boolean" } - ] + "name": "aten::fft_fftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::gt.float_int(float a, int b) -> bool", - "inputs": [ - { "name": "a", "type": "float32" }, - { "name": "b", "type": "int64" } - ], - "outputs": [ - { "type": "boolean" } - ] + "name": "aten::fft_fftshift(Tensor self, int[1]? dim=None) -> Tensor" }, { - "name": "aten::gt.int(int a, int b) -> bool", - "inputs": [ - { "name": "a", "type": "int64" }, - { "name": "b", "type": "int64" } - ], - "outputs": [ - { "type": "boolean" } - ] + "name": "aten::fft_hfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2, -1], str? norm=None) -> Tensor" }, { - "name": "aten::gt.int_float(int a, float b) -> bool", - "inputs": [ - { "name": "a", "type": "int64" }, - { "name": "b", "type": "float32" } - ], - "outputs": [ - { "type": "boolean" } - ] + "name": "aten::fft_hfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2, -1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::gt.str(str a, str b) -> bool", - "inputs": [ - { "name": "a", "type": "string" }, - { "name": "b", "type": "string" } - ], - "outputs": [ - { "type": "boolean" } - ] + "name": "aten::fft_hfftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor" }, { - "name": "aten::hamming_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", - "inputs": [ - { "name": "window_length", "type": "int64" }, - { "name": "dtype", "type": "ScalarType?", "default": null, "kwarg_only": true }, - { "name": "layout", "type": "Layout?", "default": null, "kwarg_only": true }, - { "name": "device", "type": "Device?", "default": null, "kwarg_only": true }, - { "name": "pin_memory", "type": "boolean?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::fft_hfftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::hamming_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", - "inputs": [ - { "name": "window_length", "type": "int64" }, - { "name": "periodic", "type": "boolean" }, - { "name": "dtype", "type": "ScalarType?", "default": null, "kwarg_only": true }, - { "name": "layout", "type": "Layout?", "default": null, "kwarg_only": true }, - { "name": "device", "type": "Device?", "default": null, "kwarg_only": true }, - { "name": "pin_memory", "type": "boolean?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::fft_ifft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor" }, { - "name": "aten::hamming_window.periodic_alpha(int window_length, bool periodic, float alpha, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", - "inputs": [ - { "name": "window_length", "type": "int64" }, - { "name": "periodic", "type": "boolean" }, - { "name": "alpha", "type": "float32" }, - { "name": "dtype", "type": "ScalarType?", "default": null, "kwarg_only": true }, - { "name": "layout", "type": "Layout?", "default": null, "kwarg_only": true }, - { "name": "device", "type": "Device?", "default": null, "kwarg_only": true }, - { "name": "pin_memory", "type": "boolean?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::fft_ifft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::hamming_window.periodic_alpha_beta(int window_length, bool periodic, float alpha, float beta, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", - "inputs": [ - { "name": "window_length", "type": "int64" }, - { "name": "periodic", "type": "boolean" }, - { "name": "alpha", "type": "float32" }, - { "name": "beta", "type": "float32" }, - { "name": "dtype", "type": "ScalarType?", "default": null, "kwarg_only": true }, - { "name": "layout", "type": "Layout?", "default": null, "kwarg_only": true }, - { "name": "device", "type": "Device?", "default": null, "kwarg_only": true }, - { "name": "pin_memory", "type": "boolean?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::fft_ifft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2, -1], str? norm=None) -> Tensor" }, { - "name": "aten::hann_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", - "inputs": [ - { "name": "window_length", "type": "int64" }, - { "name": "dtype", "type": "ScalarType?", "default": null, "kwarg_only": true }, - { "name": "layout", "type": "Layout?", "default": null, "kwarg_only": true }, - { "name": "device", "type": "Device?", "default": null, "kwarg_only": true }, - { "name": "pin_memory", "type": "boolean?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::fft_ifft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2, -1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::hann_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", - "inputs": [ - { "name": "window_length", "type": "int64" }, - { "name": "periodic", "type": "boolean" }, - { "name": "dtype", "type": "ScalarType?", "default": null, "kwarg_only": true }, - { "name": "layout", "type": "Layout?", "default": null, "kwarg_only": true }, - { "name": "device", "type": "Device?", "default": null, "kwarg_only": true }, - { "name": "pin_memory", "type": "boolean?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::fft_ifftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor" }, { - "name": "aten::hardshrink(Tensor self, Scalar lambd=0.5) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "lambd", "type": "Scalar", "default": 0.5 } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::fft_ifftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::hardshrink.out(Tensor self, Scalar lambd=0.5, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "lambd", "type": "Scalar", "default": 0.5 } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::fft_ifftshift(Tensor self, int[1]? dim=None) -> Tensor" }, { - "name": "aten::hardsigmoid(Tensor self) -> Tensor", - "category": "Activation", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::fft_ihfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2, -1], str? norm=None) -> Tensor" }, { - "name": "aten::hardsigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::fft_ihfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2, -1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::hardsigmoid_(Tensor(a!) self) -> Tensor(a!)", - "category": "Activation", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::fft_ihfftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor" }, { - "name": "aten::hardswish(Tensor self) -> Tensor", - "category": "Activation", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::fft_ihfftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::hardswish.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::fft_irfft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor" }, { - "name": "aten::hardswish_(Tensor(a!) self) -> Tensor(a!)", - "category": "Activation", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::fft_irfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::hardswish_backward(Tensor grad_output, Tensor self) -> Tensor", - "inputs": [ - { "name": "grad_output", "type": "Tensor" }, - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::fft_irfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2, -1], str? norm=None) -> Tensor" }, { - "name": "aten::hardtanh(Tensor self, Scalar min_val=-1, Scalar max_val=1) -> Tensor", - "category": "Activation", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "min_val", "type": "Scalar", "default": -1 }, - { "name": "max_val", "type": "Scalar", "default": 1 } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::fft_irfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2, -1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::hardtanh.out(Tensor self, Scalar min_val=-1, Scalar max_val=1, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "min_val", "type": "Scalar", "default": -1 }, - { "name": "max_val", "type": "Scalar", "default": 1 } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::fft_irfftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor" }, { - "name": "aten::hardtanh_(Tensor(a!) self, Scalar min_val=-1, Scalar max_val=1) -> Tensor(a!)", - "category": "Activation", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "min_val", "type": "Scalar", "default": -1 }, - { "name": "max_val", "type": "Scalar", "default": 1 } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::fft_irfftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::hardtanh_backward(Tensor grad_output, Tensor self, Scalar min_val, Scalar max_val) -> Tensor", - "inputs": [ - { "name": "grad_output", "type": "Tensor" }, - { "name": "self", "type": "Tensor" }, - { "name": "min_val", "type": "Scalar" }, - { "name": "max_val", "type": "Scalar" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::fft_rfft(Tensor self, SymInt? n=None, int dim=-1, str? norm=None) -> Tensor" }, { - "name": "aten::hardtanh_backward.grad_input(Tensor grad_output, Tensor self, Scalar min_val, Scalar max_val, *, Tensor(a!) grad_input) -> Tensor(a!)", - "inputs": [ - { "name": "grad_output", "type": "Tensor" }, - { "name": "self", "type": "Tensor" }, - { "name": "min_val", "type": "Scalar" }, - { "name": "max_val", "type": "Scalar" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::fft_rfft.out(Tensor self, SymInt? n=None, int dim=-1, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::histc(Tensor self, int bins=100, Scalar min=0, Scalar max=0) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "bins", "type": "int64", "default": 100 }, - { "name": "min", "type": "Scalar", "default": 0 }, - { "name": "max", "type": "Scalar", "default": 0 } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::fft_rfft2(Tensor self, SymInt[1]? s=None, int[1] dim=[-2, -1], str? norm=None) -> Tensor" }, { - "name": "aten::histc.out(Tensor self, int bins=100, Scalar min=0, Scalar max=0, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "bins", "type": "int64", "default": 100 }, - { "name": "min", "type": "Scalar", "default": 0 }, - { "name": "max", "type": "Scalar", "default": 0 } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::fft_rfft2.out(Tensor self, SymInt[1]? s=None, int[1] dim=[-2, -1], str? norm=None, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::hstack(Tensor[] tensors) -> Tensor", - "inputs": [ - { "name": "tensors", "type": "Tensor[]" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::fft_rfftn(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None) -> Tensor" }, { - "name": "aten::hstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "tensors", "type": "Tensor[]" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::fft_rfftn.out(Tensor self, SymInt[1]? s=None, int[1]? dim=None, str? norm=None, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::huber_loss(Tensor self, Tensor target, int reduction=Mean, float delta=1.0) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "target", "type": "Tensor" }, - { "name": "reduction", "type": "int64", "default": "Mean" }, - { "name": "delta", "type": "float32", "default": 1.0 } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::fill_.Scalar(Tensor(a!) self, Scalar value) -> Tensor(a!)" }, { - "name": "aten::huber_loss.out(Tensor self, Tensor target, int reduction=Mean, float delta=1.0, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "target", "type": "Tensor" }, - { "name": "reduction", "type": "int64", "default": "Mean" }, - { "name": "delta", "type": "float32", "default": 1.0 } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::fill_.Tensor(Tensor(a!) self, Tensor value) -> Tensor(a!)" }, { - "name": "aten::huber_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, float delta) -> Tensor", - "inputs": [ - { "name": "grad_output", "type": "Tensor" }, - { "name": "self", "type": "Tensor" }, - { "name": "target", "type": "Tensor" }, - { "name": "reduction", "type": "int64" }, - { "name": "delta", "type": "float32" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::find(str self, str substr, int start=0, int end=-1) -> int" }, { - "name": "aten::huber_loss_backward.out(Tensor grad_output, Tensor self, Tensor target, int reduction, float delta, *, Tensor(a!) grad_input) -> Tensor(a!)", - "inputs": [ - { "name": "grad_output", "type": "Tensor" }, - { "name": "self", "type": "Tensor" }, - { "name": "target", "type": "Tensor" }, - { "name": "reduction", "type": "int64" }, - { "name": "delta", "type": "float32" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::flatten.DimnameList(Tensor(a) self, str[] dims, str out_dim) -> Tensor(a)", + "category": "Shape" }, { - "name": "aten::im2col(Tensor self, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "kernel_size", "type": "int64[2]" }, - { "name": "dilation", "type": "int64[2]" }, - { "name": "padding", "type": "int64[2]" }, - { "name": "stride", "type": "int64[2]" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::flatten.named_out_dim(Tensor(a) self, int start_dim, int end_dim, str out_dim) -> Tensor(a)", + "category": "Shape" }, { - "name": "aten::im2col.out(Tensor self, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "kernel_size", "type": "int64[2]" }, - { "name": "dilation", "type": "int64[2]" }, - { "name": "padding", "type": "int64[2]" }, - { "name": "stride", "type": "int64[2]" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::flatten.using_ints(Tensor(a) self, int start_dim=0, int end_dim=-1) -> Tensor(a)", + "category": "Shape" }, { - "name": "aten::imag(Tensor(a) self) -> Tensor(a)", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::flatten.using_names(Tensor(a) self, str start_dim, str end_dim, str out_dim) -> Tensor(a)", + "category": "Shape" }, { - "name": "aten::index.Tensor(Tensor self, Tensor?[] indices) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "indices", "type": "Tensor?[]" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::flip(Tensor self, int[] dims) -> Tensor" }, { - "name": "aten::index.Tensor_hacked_twin(Tensor self, Tensor[] indices) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "indices", "type": "Tensor[]" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::flip.out(Tensor self, int[] dims, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::index.Tensor_out(Tensor self, Tensor?[] indices, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "indices", "type": "Tensor?[]" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::floor(Tensor self) -> Tensor" }, { - "name": "aten::index.str(str self, str substr, int start=0, int end=-1) -> int", - "inputs": [ - { "name": "self", "type": "string" }, - { "name": "substr", "type": "string" }, - { "name": "start", "type": "int64", "default": 0 }, - { "name": "end", "type": "int64", "default": -1 } - ], - "outputs": [ - { "type": "int64" } - ] + "name": "aten::floor.Scalar(Scalar a) -> Scalar" }, { - "name": "aten::index_add(Tensor self, int dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64" }, - { "name": "index", "type": "Tensor" }, - { "name": "source", "type": "Tensor" }, - { "name": "alpha", "type": "Scalar", "default": 1, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::floor.float(float a) -> int" }, { - "name": "aten::index_add.dimname(Tensor self, Dimname dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "Dimname" }, - { "name": "index", "type": "Tensor" }, - { "name": "source", "type": "Tensor" }, - { "name": "alpha", "type": "Scalar", "default": 1, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::floor.int(int a) -> int" }, { - "name": "aten::index_add.out(Tensor self, int dim, Tensor index, Tensor source, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64" }, - { "name": "index", "type": "Tensor" }, - { "name": "source", "type": "Tensor" }, - { "name": "alpha", "type": "Scalar", "default": 1, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::floor.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::index_add_(Tensor(a!) self, int dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64" }, - { "name": "index", "type": "Tensor" }, - { "name": "source", "type": "Tensor" }, - { "name": "alpha", "type": "Scalar", "default": 1, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::floor_(Tensor(a!) self) -> Tensor(a!)" }, { - "name": "aten::index_copy(Tensor self, int dim, Tensor index, Tensor source) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64" }, - { "name": "index", "type": "Tensor" }, - { "name": "source", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::floor_divide(Tensor self, Tensor other) -> Tensor" }, { - "name": "aten::index_copy.dimname(Tensor self, Dimname dim, Tensor index, Tensor source) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "Dimname" }, - { "name": "index", "type": "Tensor" }, - { "name": "source", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::floor_divide.Scalar(Tensor self, Scalar other) -> Tensor" }, { - "name": "aten::index_copy.out(Tensor self, int dim, Tensor index, Tensor source, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64" }, - { "name": "index", "type": "Tensor" }, - { "name": "source", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::floor_divide.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::index_copy_(Tensor(a!) self, int dim, Tensor index, Tensor source) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64" }, - { "name": "index", "type": "Tensor" }, - { "name": "source", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::floor_divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::index_copy_.dimname(Tensor(a!) self, Dimname dim, Tensor index, Tensor source) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "Dimname" }, - { "name": "index", "type": "Tensor" }, - { "name": "source", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::floor_divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)" }, { - "name": "aten::index_fill.Dimname_Scalar(Tensor self, Dimname dim, Tensor index, Scalar value) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "Dimname" }, - { "name": "index", "type": "Tensor" }, - { "name": "value", "type": "Scalar" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::floor_divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)" }, { - "name": "aten::index_fill.Dimname_Tensor(Tensor self, Dimname dim, Tensor index, Tensor value) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "Dimname" }, - { "name": "index", "type": "Tensor" }, - { "name": "value", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::floordiv(Scalar a, Scalar b) -> Scalar" }, { - "name": "aten::index_fill.int_Scalar(Tensor self, int dim, Tensor index, Scalar value) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64" }, - { "name": "index", "type": "Tensor" }, - { "name": "value", "type": "Scalar" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::floordiv.float(float a, float b) -> float" }, { - "name": "aten::index_fill.int_Tensor(Tensor self, int dim, Tensor index, Tensor value) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64" }, - { "name": "index", "type": "Tensor" }, - { "name": "value", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::floordiv.float_int(float a, int b) -> float" }, { - "name": "aten::index_fill_.Dimname_Scalar(Tensor(a!) self, Dimname dim, Tensor index, Scalar value) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "Dimname" }, - { "name": "index", "type": "Tensor" }, - { "name": "value", "type": "Scalar" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::floordiv.int(int a, int b) -> int" }, { - "name": "aten::index_fill_.Dimname_Tensor(Tensor(a!) self, Dimname dim, Tensor index, Tensor value) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "Dimname" }, - { "name": "index", "type": "Tensor" }, - { "name": "value", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::floordiv.int_float(int a, float b) -> float" }, { - "name": "aten::index_fill_.int_Scalar(Tensor(a!) self, int dim, Tensor index, Scalar value) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64" }, - { "name": "index", "type": "Tensor" }, - { "name": "value", "type": "Scalar" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::fmod(Scalar a, Scalar b) -> float" }, { - "name": "aten::index_fill_.int_Tensor(Tensor(a!) self, int dim, Tensor index, Tensor value) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64" }, - { "name": "index", "type": "Tensor" }, - { "name": "value", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::fmod.Scalar(Tensor self, Scalar other) -> Tensor" }, { - "name": "aten::index_put(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "indices", "type": "Tensor?[]" }, - { "name": "values", "type": "Tensor" }, - { "name": "accumulate", "type": "boolean", "default": false } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::fmod.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::index_put.hacked_twin(Tensor self, Tensor[] indices, Tensor values, bool accumulate=False) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "indices", "type": "Tensor[]" }, - { "name": "values", "type": "Tensor" }, - { "name": "accumulate", "type": "boolean", "default": false } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::fmod.Tensor(Tensor self, Tensor other) -> Tensor" }, { - "name": "aten::index_put_(Tensor(a!) self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "indices", "type": "Tensor?[]" }, - { "name": "values", "type": "Tensor" }, - { "name": "accumulate", "type": "boolean", "default": false } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::fmod.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::index_put_.hacked_twin(Tensor(a!) self, Tensor[] indices, Tensor values, bool accumulate=False) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "indices", "type": "Tensor[]" }, - { "name": "values", "type": "Tensor" }, - { "name": "accumulate", "type": "boolean", "default": false } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::fmod.float(float a, float b) -> float" }, { - "name": "aten::index_reduce(Tensor self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64" }, - { "name": "index", "type": "Tensor" }, - { "name": "source", "type": "Tensor" }, - { "name": "reduce", "type": "string" }, - { "name": "include_self", "type": "boolean", "default": true, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::fmod.float_int(float a, int b) -> float" }, { - "name": "aten::index_reduce.out(Tensor self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64" }, - { "name": "index", "type": "Tensor" }, - { "name": "source", "type": "Tensor" }, - { "name": "reduce", "type": "string" }, - { "name": "include_self", "type": "boolean", "default": true, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::fmod.int(int a, int b) -> float" }, { - "name": "aten::index_reduce_(Tensor(a!) self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64" }, - { "name": "index", "type": "Tensor" }, - { "name": "source", "type": "Tensor" }, - { "name": "reduce", "type": "string" }, - { "name": "include_self", "type": "boolean", "default": true, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::fmod.int_float(int a, float b) -> float" }, { - "name": "aten::index_select(Tensor self, int dim, Tensor index) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64" }, - { "name": "index", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::format(str self, ...) -> str", + "is_vararg": true }, { - "name": "aten::index_select.dimname(Tensor self, Dimname dim, Tensor index) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "Dimname" }, - { "name": "index", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::frobenius_norm.dim(Tensor self, int[1] dim, bool keepdim=False) -> Tensor", + "category": "Normalization" }, { - "name": "aten::index_select.dimname_out(Tensor self, Dimname dim, Tensor index, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "Dimname" }, - { "name": "index", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::frobenius_norm.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::index_select.out(Tensor self, int dim, Tensor index, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64" }, - { "name": "index", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::full(SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor" }, { - "name": "aten::index_select_backward(Tensor grad, SymInt[] self_sizes, int dim, Tensor index) -> Tensor", - "inputs": [ - { "name": "grad", "type": "Tensor" }, - { "name": "self_sizes", "type": "SymInt[]" }, - { "name": "dim", "type": "int64" }, - { "name": "index", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::full.names(int[] size, Scalar fill_value, *, str[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor" }, { - "name": "aten::instance_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool use_input_stats, float momentum, float eps, bool cudnn_enabled) -> Tensor", - "category": "Normalization", - "inputs": [ - { "name": "input", "type": "Tensor" }, - { "name": "weight", "type": "Tensor?" }, - { "name": "bias", "type": "Tensor?" }, - { "name": "running_mean", "type": "Tensor?" }, - { "name": "running_var", "type": "Tensor?" }, - { "name": "use_input_stats", "type": "boolean" }, - { "name": "momentum", "type": "float32" }, - { "name": "eps", "type": "float32" }, - { "name": "cudnn_enabled", "type": "boolean" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::full.names_out(int[] size, Scalar fill_value, *, str[]? names, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::int_repr(Tensor self) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::full.out(SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::int_repr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::full_like(Tensor self, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor" }, { - "name": "aten::inverse(Tensor self) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::full_like.out(Tensor self, Scalar fill_value, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::inverse.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::fused_moving_avg_obs_fake_quant(Tensor self, Tensor observer_on, Tensor fake_quant_on, Tensor(a!) running_min, Tensor(b!) running_max, Tensor(c!) scale, Tensor(d!) zero_point, float averaging_const, int quant_min, int quant_max, int ch_axis, bool per_row_fake_quant=False, bool symmetric_quant=False) -> Tensor" }, { - "name": "aten::is_contiguous(Tensor self) -> bool", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "boolean" } - ] + "name": "aten::gather(Tensor self, int dim, Tensor index, *, bool sparse_grad=False) -> Tensor", + "category": "Transform" }, { - "name": "aten::is_contiguous.memory_format(Tensor self, MemoryFormat memory_format) -> bool", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "memory_format", "type": "MemoryFormat" } - ], - "outputs": [ - { "type": "boolean" } - ] + "name": "aten::gather.dimname(Tensor self, str dim, Tensor index, *, bool sparse_grad=False) -> Tensor", + "category": "Transform" }, { - "name": "aten::is_floating_point(Tensor self) -> bool", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "boolean" } - ] + "name": "aten::gather.dimname_out(Tensor self, str dim, Tensor index, *, bool sparse_grad=False, Tensor(a!) out) -> Tensor(a!)", + "category": "Transform" }, { - "name": "aten::isfinite(Tensor self) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::gather.out(Tensor self, int dim, Tensor index, *, bool sparse_grad=False, Tensor(a!) out) -> Tensor(a!)", + "category": "Transform" }, { - "name": "aten::isinf(Tensor self) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::gather_backward(Tensor grad, Tensor self, int dim, Tensor index, bool sparse_grad) -> Tensor" }, { - "name": "aten::isnan(Tensor self) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::gcd(Tensor self, Tensor other) -> Tensor" }, { - "name": "aten::istft(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool center=True, bool normalized=False, bool? onesided=None, int? length=None, bool return_complex=False) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "n_fft", "type": "int64" }, - { "name": "hop_length", "type": "int64?", "default": null }, - { "name": "win_length", "type": "int64?", "default": null }, - { "name": "window", "type": "Tensor?", "default": null }, - { "name": "center", "type": "boolean", "default": true }, - { "name": "normalized", "type": "boolean", "default": false }, - { "name": "onesided", "type": "boolean?", "default": null }, - { "name": "length", "type": "int64?", "default": null }, - { "name": "return_complex", "type": "boolean", "default": false } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::gcd.int(int a, int b) -> int" }, { - "name": "aten::item(Tensor self) -> Scalar", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Scalar" } - ] + "name": "aten::gcd.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::items.Tensor(Dict(Tensor, t) self) -> ((Tensor, t)[])", - "inputs": [ - { "name": "self", "type": "Dict(Tensor, t)" } - ], - "outputs": [ - { "type": "(Tensor, t)[]" } - ] + "name": "aten::gcd_(Tensor(a!) self, Tensor other) -> Tensor(a!)" }, { - "name": "aten::items.bool(Dict(bool, t) self) -> ((bool, t)[])", - "inputs": [ - { "name": "self", "type": "Dict(boolean, t)" } - ], - "outputs": [ - { "type": "(boolean, t)[]" } - ] + "name": "aten::ge(Scalar a, Scalar b) -> bool" }, { - "name": "aten::items.complex(Dict(complex, t) self) -> ((complex, t)[])", - "inputs": [ - { "name": "self", "type": "Dict(complex, t)" } - ], - "outputs": [ - { "type": "(complex, t)[]" } - ] + "name": "aten::ge.Scalar(Tensor self, Scalar other) -> Tensor" }, { - "name": "aten::items.float(Dict(float, t) self) -> ((float, t)[])", - "inputs": [ - { "name": "self", "type": "Dict(float32, t)" } - ], - "outputs": [ - { "type": "(float32, t)[]" } - ] + "name": "aten::ge.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::items.int(Dict(int, t) self) -> ((int, t)[])", - "inputs": [ - { "name": "self", "type": "Dict(int64, t)" } - ], - "outputs": [ - { "type": "(int64, t)[]" } - ] + "name": "aten::ge.Tensor(Tensor self, Tensor other) -> Tensor" }, { - "name": "aten::items.str(Dict(str, t) self) -> ((str, t)[])", - "inputs": [ - { "name": "self", "type": "Dict(string, t)" } - ], - "outputs": [ - { "type": "(string, t)[]" } - ] + "name": "aten::ge.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::join(str self, str[] values) -> str", - "inputs": [ - { "name": "self", "type": "string" }, - { "name": "values", "type": "string[]" } - ], - "outputs": [ - { "type": "string" } - ] + "name": "aten::ge.float(float a, float b) -> bool" }, { - "name": "aten::keys.Tensor(Dict(Tensor, t) self) -> Tensor[](*)", - "inputs": [ - { "name": "self", "type": "Dict(Tensor, t)" } - ], - "outputs": [ - { "type": "Tensor[]" } - ] + "name": "aten::ge.float_int(float a, int b) -> bool" }, { - "name": "aten::keys.bool(Dict(bool, t) self) -> bool[](*)", - "inputs": [ - { "name": "self", "type": "Dict(boolean, t)" } - ], - "outputs": [ - { "type": "boolean[]" } - ] + "name": "aten::ge.int(int a, int b) -> bool" }, { - "name": "aten::keys.complex(Dict(complex, t) self) -> complex[](*)", - "inputs": [ - { "name": "self", "type": "Dict(complex, t)" } - ], - "outputs": [ - { "type": "complex[]" } - ] + "name": "aten::ge.int_float(int a, float b) -> bool" }, { - "name": "aten::keys.float(Dict(float, t) self) -> float[](*)", - "inputs": [ - { "name": "self", "type": "Dict(float32, t)" } - ], - "outputs": [ - { "type": "float32[]" } - ] + "name": "aten::ge.str(str a, str b) -> bool" }, { - "name": "aten::keys.int(Dict(int, t) self) -> int[](*)", - "inputs": [ - { "name": "self", "type": "Dict(int64, t)" } - ], - "outputs": [ - { "type": "int64[]" } - ] + "name": "aten::ge_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)" }, { - "name": "aten::keys.str(Dict(str, t) self) -> str[](*)", - "inputs": [ - { "name": "self", "type": "Dict(string, t)" } - ], - "outputs": [ - { "type": "string[]" } - ] + "name": "aten::ge_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)" }, { - "name": "aten::kl_div(Tensor self, Tensor target, int reduction=Mean, *, bool log_target=False) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "target", "type": "Tensor" }, - { "name": "reduction", "type": "int64", "default": "Mean" }, - { "name": "log_target", "type": "boolean", "default": false, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::gelu(Tensor self, *, str approximate=\"none\") -> Tensor", + "category": "Activation" }, { - "name": "aten::kthvalue(Tensor self, int k, int dim=-1, bool keepdim=False) -> (Tensor values, Tensor indices)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "k", "type": "int64" }, - { "name": "dim", "type": "int64", "default": -1 }, - { "name": "keepdim", "type": "boolean", "default": false } - ], - "outputs": [ - { "name": "values", "type": "Tensor" }, - { "name": "indices", "type": "Tensor" } - ] + "name": "aten::gelu.out(Tensor self, *, str approximate=\"none\", Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::kthvalue.dimname(Tensor self, int k, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "k", "type": "int64" }, - { "name": "dim", "type": "Dimname" }, - { "name": "keepdim", "type": "boolean", "default": false } - ], - "outputs": [ - { "name": "values", "type": "Tensor" }, - { "name": "indices", "type": "Tensor" } - ] + "name": "aten::gelu_(Tensor(a!) self, *, str approximate=\"none\") -> Tensor(a!)", + "category": "Activation" }, { - "name": "aten::kthvalue.dimname_out(Tensor self, int k, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "k", "type": "int64" }, - { "name": "dim", "type": "Dimname" }, - { "name": "keepdim", "type": "boolean", "default": false } - ], - "outputs": [ - { "name": "values", "type": "Tensor" }, - { "name": "indices", "type": "Tensor" } - ] + "name": "aten::gelu_backward(Tensor grad_output, Tensor self, *, str approximate=\"none\") -> Tensor" }, { - "name": "aten::kthvalue.values(Tensor self, int k, int dim=-1, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "k", "type": "int64" }, - { "name": "dim", "type": "int64", "default": -1 }, - { "name": "keepdim", "type": "boolean", "default": false } - ], - "outputs": [ - { "name": "values", "type": "Tensor" }, - { "name": "indices", "type": "Tensor" } - ] + "name": "aten::gelu_backward.grad_input(Tensor grad_output, Tensor self, *, str approximate=\"none\", Tensor(a!) grad_input) -> Tensor(a!)" }, { - "name": "aten::l1_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "target", "type": "Tensor" }, - { "name": "reduction", "type": "int64", "default": "Mean" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::geometric_(Tensor(a!) self, float p, *, Generator? generator=None) -> Tensor(a!)" }, { - "name": "aten::layer_norm(Tensor input, SymInt[] normalized_shape, Tensor? weight=None, Tensor? bias=None, float eps=1e-05, bool cudnn_enable=True) -> Tensor", - "category": "Normalization", - "inputs": [ - { "name": "input", "type": "Tensor" }, - { "name": "normalized_shape", "type": "SymInt[]" }, - { "name": "weight", "type": "Tensor?", "default": null }, - { "name": "bias", "type": "Tensor?", "default": null }, - { "name": "eps", "type": "float32", "default": 1e-05 }, - { "name": "cudnn_enable", "type": "boolean", "default": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::geqrf(Tensor self) -> (Tensor a, Tensor tau)" }, { - "name": "aten::le(Scalar a, Scalar b) -> bool", - "inputs": [ - { "name": "a", "type": "Scalar" }, - { "name": "b", "type": "Scalar" } - ], - "outputs": [ - { "type": "boolean" } - ] + "name": "aten::geqrf.a(Tensor self, *, Tensor(a!) a, Tensor(b!) tau) -> (Tensor(a!) a, Tensor(b!) tau)" }, { - "name": "aten::le.Scalar(Tensor self, Scalar other) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Scalar" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::ger(Tensor self, Tensor vec2) -> Tensor" }, { - "name": "aten::le.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Scalar" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::ger.out(Tensor self, Tensor vec2, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::le.Tensor(Tensor self, Tensor other) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::get.Tensor(Dict(Tensor, t) self, Tensor key) -> t(*)?" }, { - "name": "aten::le.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::get.bool(Dict(bool, t) self, bool key) -> t(*)?" }, { - "name": "aten::le.float(float a, float b) -> bool", - "inputs": [ - { "name": "a", "type": "float32" }, - { "name": "b", "type": "float32" } - ], - "outputs": [ - { "type": "boolean" } - ] + "name": "aten::get.complex(Dict(complex, t) self, complex key) -> t(*)?" }, { - "name": "aten::le.float_int(float a, int b) -> bool", - "inputs": [ - { "name": "a", "type": "float32" }, - { "name": "b", "type": "int64" } - ], - "outputs": [ - { "type": "boolean" } - ] + "name": "aten::get.default_Tensor(Dict(Tensor, t) self, Tensor key, t default_value) -> t(*)" }, { - "name": "aten::le.int(int a, int b) -> bool", - "inputs": [ - { "name": "a", "type": "int64" }, - { "name": "b", "type": "int64" } - ], - "outputs": [ - { "type": "boolean" } - ] + "name": "aten::get.default_bool(Dict(bool, t) self, bool key, t default_value) -> t(*)" }, { - "name": "aten::le.int_float(int a, float b) -> bool", - "inputs": [ - { "name": "a", "type": "int64" }, - { "name": "b", "type": "float32" } - ], - "outputs": [ - { "type": "boolean" } - ] + "name": "aten::get.default_complex(Dict(complex, t) self, complex key, t default_value) -> t(*)" }, { - "name": "aten::le.str(str a, str b) -> bool", - "inputs": [ - { "name": "a", "type": "string" }, - { "name": "b", "type": "string" } - ], - "outputs": [ - { "type": "boolean" } - ] + "name": "aten::get.default_float(Dict(float, t) self, float key, t default_value) -> t(*)" }, { - "name": "aten::leaky_relu(Tensor self, Scalar negative_slope=0.01) -> Tensor", - "category": "Activation", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "negative_slope", "type": "Scalar", "default": 0.01 } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::get.default_int(Dict(int, t) self, int key, t default_value) -> t(*)" }, { - "name": "aten::leaky_relu.out(Tensor self, Scalar negative_slope=0.01, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "negative_slope", "type": "Scalar", "default": 0.01 } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::get.default_str(Dict(str, t) self, str key, t default_value) -> t(*)" }, { - "name": "aten::leaky_relu_(Tensor(a!) self, Scalar negative_slope=0.01) -> Tensor(a!)", - "category": "Activation", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "negative_slope", "type": "Scalar", "default": 0.01 } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::get.float(Dict(float, t) self, float key) -> t(*)?" }, { - "name": "aten::len.Dict_Tensor(Dict(Tensor, t) self) -> int", - "inputs": [ - { "name": "self", "type": "Dict(Tensor, t)" } - ], - "outputs": [ - { "type": "int64" } - ] + "name": "aten::get.int(Dict(int, t) self, int key) -> t(*)?" }, { - "name": "aten::len.Dict_bool(Dict(bool, t) self) -> int", - "inputs": [ - { "name": "self", "type": "Dict(boolean, t)" } - ], - "outputs": [ - { "type": "int64" } - ] + "name": "aten::get.str(Dict(str, t) self, str key) -> t(*)?" }, { - "name": "aten::len.Dict_complex(Dict(complex, t) self) -> int", - "inputs": [ - { "name": "self", "type": "Dict(complex, t)" } - ], - "outputs": [ - { "type": "int64" } - ] + "name": "aten::get_device(Tensor self) -> int" }, { - "name": "aten::len.Dict_float(Dict(float, t) self) -> int", - "inputs": [ - { "name": "self", "type": "Dict(float32, t)" } - ], - "outputs": [ - { "type": "int64" } - ] + "name": "aten::glu(Tensor self, int dim=-1) -> Tensor", + "category": "Activation" }, { - "name": "aten::len.Dict_int(Dict(int, t) self) -> int", - "inputs": [ - { "name": "self", "type": "Dict(int64, t)" } - ], - "outputs": [ - { "type": "int64" } - ] + "name": "aten::glu.out(Tensor self, int dim=-1, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::len.Dict_str(Dict(str, t) self) -> int", - "inputs": [ - { "name": "self", "type": "Dict(string, t)" } - ], - "outputs": [ - { "type": "int64" } - ] + "name": "aten::greater_equal.Scalar(Tensor self, Scalar other) -> Tensor" }, { - "name": "aten::len.Tensor(Tensor t) -> int", - "inputs": [ - { "name": "t", "type": "Tensor" } - ], - "outputs": [ - { "type": "int64" } - ] + "name": "aten::greater_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::len.any(Any[] a) -> int", - "inputs": [ - { "name": "a", "type": "Any[]" } - ], - "outputs": [ - { "type": "int64" } - ] + "name": "aten::greater_equal.Tensor(Tensor self, Tensor other) -> Tensor" }, { - "name": "aten::len.str(str s) -> int", - "inputs": [ - { "name": "s", "type": "string" } - ], - "outputs": [ - { "type": "int64" } - ] + "name": "aten::greater_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::len.t(t[] a) -> int", - "inputs": [ - { "name": "a", "type": "t[]" } - ], - "outputs": [ - { "type": "int64" } - ] + "name": "aten::greater_equal_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)" }, { - "name": "aten::lerp.Scalar(Tensor self, Tensor end, Scalar weight) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "end", "type": "Tensor" }, - { "name": "weight", "type": "Scalar" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::greater_equal_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)" }, { - "name": "aten::lerp.Scalar_out(Tensor self, Tensor end, Scalar weight, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "end", "type": "Tensor" }, - { "name": "weight", "type": "Scalar" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::grid_sampler(Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> Tensor" }, { - "name": "aten::lerp.Tensor(Tensor self, Tensor end, Tensor weight) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "end", "type": "Tensor" }, - { "name": "weight", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::grid_sampler.legacy(Tensor input, Tensor grid, int interpolation_mode, int padding_mode) -> Tensor" }, { - "name": "aten::lerp.Tensor_out(Tensor self, Tensor end, Tensor weight, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "end", "type": "Tensor" }, - { "name": "weight", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::group_norm(Tensor input, int num_groups, Tensor? weight=None, Tensor? bias=None, float eps=1.0000000000000001e-05, bool cudnn_enabled=True) -> Tensor", + "category": "Normalization" }, { - "name": "aten::lift_fresh(Tensor(a) self) -> Tensor(a)", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::gru.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor)", + "category": "Layer" }, { - "name": "aten::lift_fresh_copy(Tensor self) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::gru.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor)", + "category": "Layer" }, { - "name": "aten::linalg_cross(Tensor self, Tensor other, *, int dim=-1) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" }, - { "name": "dim", "type": "int64", "default": -1, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::gt(Scalar a, Scalar b) -> bool" }, { - "name": "aten::linalg_cross.out(Tensor self, Tensor other, *, int dim=-1, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" }, - { "name": "dim", "type": "int64", "default": -1, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::gt.Scalar(Tensor self, Scalar other) -> Tensor" }, { - "name": "aten::linalg_inv(Tensor A) -> Tensor", - "inputs": [ - { "name": "A", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::gt.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::linalg_inv.out(Tensor A, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "A", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::gt.Tensor(Tensor self, Tensor other) -> Tensor" }, { - "name": "aten::linalg_inv_ex(Tensor A, *, bool check_errors=False) -> (Tensor inverse, Tensor info)", - "inputs": [ - { "name": "A", "type": "Tensor" }, - { "name": "check_errors", "type": "boolean", "default": false, "kwarg_only": true } - ], - "outputs": [ - { "name": "inverse", "type": "Tensor" }, - { "name": "info", "type": "Tensor" } - ] + "name": "aten::gt.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::linalg_inv_ex.inverse(Tensor A, *, bool check_errors=False, Tensor(a!) inverse, Tensor(b!) info) -> (Tensor(a!) inverse, Tensor(b!) info)", - "inputs": [ - { "name": "A", "type": "Tensor" }, - { "name": "check_errors", "type": "boolean", "default": false, "kwarg_only": true } - ], - "outputs": [ - { "name": "inverse", "type": "Tensor" }, - { "name": "info", "type": "Tensor" } - ] + "name": "aten::gt.float(float a, float b) -> bool" }, { - "name": "aten::linalg_norm(Tensor self, Scalar? ord=None, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "ord", "type": "Scalar?", "default": null }, - { "name": "dim", "type": "int64[1]?", "default": null }, - { "name": "keepdim", "type": "boolean", "default": false }, - { "name": "dtype", "type": "ScalarType?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::gt.float_int(float a, int b) -> bool" }, { - "name": "aten::linalg_norm.ord_str(Tensor self, str ord, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "ord", "type": "string" }, - { "name": "dim", "type": "int64[1]?", "default": null }, - { "name": "keepdim", "type": "boolean", "default": false }, - { "name": "dtype", "type": "ScalarType?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::gt.int(int a, int b) -> bool" }, { - "name": "aten::linalg_norm.ord_str_out(Tensor self, str ord, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "ord", "type": "string" }, - { "name": "dim", "type": "int64[1]?", "default": null }, - { "name": "keepdim", "type": "boolean", "default": false }, - { "name": "dtype", "type": "ScalarType?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::gt.int_float(int a, float b) -> bool" }, { - "name": "aten::linalg_norm.out(Tensor self, Scalar? ord=None, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "ord", "type": "Scalar?", "default": null }, - { "name": "dim", "type": "int64[1]?", "default": null }, - { "name": "keepdim", "type": "boolean", "default": false }, - { "name": "dtype", "type": "ScalarType?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::gt.str(str a, str b) -> bool" }, { - "name": "aten::linalg_qr(Tensor A, str mode='reduced') -> (Tensor Q, Tensor R)", - "inputs": [ - { "name": "A", "type": "Tensor" }, - { "name": "mode", "type": "string", "default": "reduced" } - ], - "outputs": [ - { "name": "Q", "type": "Tensor" }, - { "name": "R", "type": "Tensor" } - ] + "name": "aten::hamming_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor" }, { - "name": "aten::linalg_qr.out(Tensor A, str mode='reduced', *, Tensor(a!) Q, Tensor(b!) R) -> (Tensor(a!) Q, Tensor(b!) R)", - "inputs": [ - { "name": "A", "type": "Tensor" }, - { "name": "mode", "type": "string", "default": "reduced" } - ], - "outputs": [ - { "name": "Q", "type": "Tensor" }, - { "name": "R", "type": "Tensor" } - ] + "name": "aten::hamming_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::linalg_solve(Tensor A, Tensor B, *, bool left=True) -> Tensor", - "inputs": [ - { "name": "A", "type": "Tensor" }, - { "name": "B", "type": "Tensor" }, - { "name": "left", "type": "boolean", "default": true, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::hamming_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor" }, { - "name": "aten::linalg_solve.out(Tensor A, Tensor B, *, bool left=True, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "A", "type": "Tensor" }, - { "name": "B", "type": "Tensor" }, - { "name": "left", "type": "boolean", "default": true, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::hamming_window.periodic_alpha(int window_length, bool periodic, float alpha, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor" }, { - "name": "aten::linalg_solve_ex(Tensor A, Tensor B, *, bool left=True, bool check_errors=False) -> (Tensor result, Tensor info)", - "inputs": [ - { "name": "A", "type": "Tensor" }, - { "name": "B", "type": "Tensor" }, - { "name": "left", "type": "boolean", "default": true, "kwarg_only": true }, - { "name": "check_errors", "type": "boolean", "default": false, "kwarg_only": true } - ], - "outputs": [ - { "name": "result", "type": "Tensor" }, - { "name": "info", "type": "Tensor" } - ] + "name": "aten::hamming_window.periodic_alpha_beta(int window_length, bool periodic, float alpha, float beta, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor" }, { - "name": "aten::linalg_solve_ex.out(Tensor A, Tensor B, *, bool left=True, bool check_errors=False, Tensor(a!) result, Tensor(b!) info) -> (Tensor(a!) result, Tensor(b!) info)", - "inputs": [ - { "name": "A", "type": "Tensor" }, - { "name": "B", "type": "Tensor" }, - { "name": "left", "type": "boolean", "default": true, "kwarg_only": true }, - { "name": "check_errors", "type": "boolean", "default": false, "kwarg_only": true } - ], - "outputs": [ - { "name": "result", "type": "Tensor" }, - { "name": "info", "type": "Tensor" } - ] + "name": "aten::hamming_window.periodic_alpha_beta_out(int window_length, bool periodic, float alpha, float beta, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::linalg_solve_triangular(Tensor self, Tensor B, *, bool upper, bool left=True, bool unitriangular=False) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "B", "type": "Tensor" }, - { "name": "upper", "type": "boolean", "kwarg_only": true }, - { "name": "left", "type": "boolean", "default": true, "kwarg_only": true }, - { "name": "unitriangular", "type": "boolean", "default": false, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::hamming_window.periodic_alpha_out(int window_length, bool periodic, float alpha, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::linalg_solve_triangular.out(Tensor self, Tensor B, *, bool upper, bool left=True, bool unitriangular=False, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "B", "type": "Tensor" }, - { "name": "upper", "type": "boolean", "kwarg_only": true }, - { "name": "left", "type": "boolean", "default": true, "kwarg_only": true }, - { "name": "unitriangular", "type": "boolean", "default": false, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::hamming_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::linalg_svd(Tensor A, bool full_matrices=True, *, str? driver=None) -> (Tensor U, Tensor S, Tensor Vh)", - "inputs": [ - { "name": "A", "type": "Tensor" }, - { "name": "full_matrices", "type": "boolean", "default": true }, - { "name": "driver", "type": "string?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "name": "U", "type": "Tensor" }, - { "name": "S", "type": "Tensor" }, - { "name": "Vh", "type": "Tensor" } - ] + "name": "aten::hann_window(int window_length, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor" }, { - "name": "aten::linalg_svd.U(Tensor A, bool full_matrices=True, *, str? driver=None, Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh)", - "inputs": [ - { "name": "A", "type": "Tensor" }, - { "name": "full_matrices", "type": "boolean", "default": true }, - { "name": "driver", "type": "string?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "name": "U", "type": "Tensor" }, - { "name": "S", "type": "Tensor" }, - { "name": "Vh", "type": "Tensor" } - ] + "name": "aten::hann_window.out(int window_length, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::linalg_tensorinv(Tensor self, int ind=2) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "ind", "type": "int64", "default": 2 } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::hann_window.periodic(int window_length, bool periodic, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor" }, { - "name": "aten::linalg_tensorinv.out(Tensor self, int ind=2, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "ind", "type": "int64", "default": 2 } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::hann_window.periodic_out(int window_length, bool periodic, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::linalg_tensorsolve(Tensor self, Tensor other, int[]? dims=None) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" }, - { "name": "dims", "type": "int64[]?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::hardshrink(Tensor self, Scalar lambd=0.5) -> Tensor" }, { - "name": "aten::linalg_tensorsolve.out(Tensor self, Tensor other, int[]? dims=None, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" }, - { "name": "dims", "type": "int64[]?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::hardshrink.out(Tensor self, Scalar lambd=0.5, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::linalg_vector_norm(Tensor self, Scalar ord=2, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "ord", "type": "Scalar", "default": 2 }, - { "name": "dim", "type": "int64[1]?", "default": null }, - { "name": "keepdim", "type": "boolean", "default": false }, - { "name": "dtype", "type": "ScalarType?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::hardsigmoid(Tensor self) -> Tensor", + "category": "Activation" }, { - "name": "aten::linalg_vector_norm.out(Tensor self, Scalar ord=2, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "ord", "type": "Scalar", "default": 2 }, - { "name": "dim", "type": "int64[1]?", "default": null }, - { "name": "keepdim", "type": "boolean", "default": false }, - { "name": "dtype", "type": "ScalarType?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::hardsigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::linear(Tensor input, Tensor weight, Tensor? bias=None) -> Tensor", - "category": "Layer", - "inputs": [ - { "name": "input", "type": "Tensor" }, - { "name": "weight", "type": "Tensor" }, - { "name": "bias", "type": "Tensor?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::hardsigmoid_(Tensor(a!) self) -> Tensor(a!)", + "category": "Activation" }, { - "name": "aten::linear.out(Tensor input, Tensor weight, Tensor? bias=None, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "input", "type": "Tensor" }, - { "name": "weight", "type": "Tensor" }, - { "name": "bias", "type": "Tensor?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::hardswish(Tensor self) -> Tensor", + "category": "Activation" }, { - "name": "aten::linear_backward(Tensor self, Tensor grad_output, Tensor weight, bool[3] output_mask) -> (Tensor, Tensor, Tensor)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "grad_output", "type": "Tensor" }, - { "name": "weight", "type": "Tensor" }, - { "name": "output_mask", "type": "boolean[3]" } - ], - "outputs": [ - { "type": "Tensor" }, - { "type": "Tensor" }, - { "type": "Tensor" } - ] + "name": "aten::hardswish.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::linspace(Scalar start, Scalar end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", - "inputs": [ - { "name": "start", "type": "Scalar" }, - { "name": "end", "type": "Scalar" }, - { "name": "steps", "type": "int64", "default": null }, - { "name": "dtype", "type": "ScalarType?", "default": null, "kwarg_only": true }, - { "name": "layout", "type": "Layout?", "default": null, "kwarg_only": true }, - { "name": "device", "type": "Device?", "default": null, "kwarg_only": true }, - { "name": "pin_memory", "type": "boolean?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::hardswish_(Tensor(a!) self) -> Tensor(a!)", + "category": "Activation" }, { - "name": "aten::linspace.Scalar_Tensor(Scalar start, Tensor end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", - "inputs": [ - { "name": "start", "type": "Scalar" }, - { "name": "end", "type": "Tensor" }, - { "name": "steps", "type": "int64" }, - { "name": "dtype", "type": "ScalarType?", "default": null, "kwarg_only": true }, - { "name": "layout", "type": "Layout?", "default": null, "kwarg_only": true }, - { "name": "device", "type": "Device?", "default": null, "kwarg_only": true }, - { "name": "pin_memory", "type": "boolean?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::hardswish_backward(Tensor grad_output, Tensor self) -> Tensor" }, { - "name": "aten::linspace.Scalar_Tensor_out(Scalar start, Tensor end, int steps, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "start", "type": "Scalar" }, - { "name": "end", "type": "Tensor" }, - { "name": "steps", "type": "int64" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::hardswish_backward.out(Tensor grad_output, Tensor self, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::linspace.Tensor_Scalar(Tensor start, Scalar end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", - "inputs": [ - { "name": "start", "type": "Tensor" }, - { "name": "end", "type": "Scalar" }, - { "name": "steps", "type": "int64" }, - { "name": "dtype", "type": "ScalarType?", "default": null, "kwarg_only": true }, - { "name": "layout", "type": "Layout?", "default": null, "kwarg_only": true }, - { "name": "device", "type": "Device?", "default": null, "kwarg_only": true }, - { "name": "pin_memory", "type": "boolean?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::hardtanh(Tensor self, Scalar min_val=-1, Scalar max_val=1) -> Tensor", + "category": "Activation" }, { - "name": "aten::linspace.Tensor_Scalar_out(Tensor start, Scalar end, int steps, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "start", "type": "Tensor" }, - { "name": "end", "type": "Scalar" }, - { "name": "steps", "type": "int64" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::hardtanh.out(Tensor self, Scalar min_val=-1, Scalar max_val=1, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::linspace.Tensor_Tensor(Tensor start, Tensor end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", - "inputs": [ - { "name": "start", "type": "Tensor" }, - { "name": "end", "type": "Tensor" }, - { "name": "steps", "type": "int64" }, - { "name": "dtype", "type": "ScalarType?", "default": null, "kwarg_only": true }, - { "name": "layout", "type": "Layout?", "default": null, "kwarg_only": true }, - { "name": "device", "type": "Device?", "default": null, "kwarg_only": true }, - { "name": "pin_memory", "type": "boolean?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::hardtanh_(Tensor(a!) self, Scalar min_val=-1, Scalar max_val=1) -> Tensor(a!)", + "category": "Activation" }, { - "name": "aten::linspace.Tensor_Tensor_out(Tensor start, Tensor end, int steps, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "start", "type": "Tensor" }, - { "name": "end", "type": "Tensor" }, - { "name": "steps", "type": "int64" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::hardtanh_backward(Tensor grad_output, Tensor self, Scalar min_val, Scalar max_val) -> Tensor" }, { - "name": "aten::linspace.out(Scalar start, Scalar end, int steps, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "start", "type": "Scalar" }, - { "name": "end", "type": "Scalar" }, - { "name": "steps", "type": "int64" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::hardtanh_backward.grad_input(Tensor grad_output, Tensor self, Scalar min_val, Scalar max_val, *, Tensor(a!) grad_input) -> Tensor(a!)" }, { - "name": "aten::list(str t) -> str[]", - "inputs": [ - { "name": "t", "type": "string" } - ], - "outputs": [ - { "type": "string[]" } - ] + "name": "aten::histc(Tensor self, int bins=100, Scalar min=0, Scalar max=0) -> Tensor" }, { - "name": "aten::list.t(t[] l) -> t[]", - "inputs": [ - { "name": "l", "type": "t[]" } - ], - "outputs": [ - { "type": "t[]" } - ] + "name": "aten::histc.out(Tensor self, int bins=100, Scalar min=0, Scalar max=0, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::log(Tensor self) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::hstack(Tensor[] tensors) -> Tensor" }, { - "name": "aten::log.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::hstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::log10(Tensor self) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::huber_loss(Tensor self, Tensor target, int reduction=1, float delta=1.) -> Tensor" }, { - "name": "aten::log10.Scalar(Scalar a) -> Scalar", - "inputs": [ - { "name": "a", "type": "Scalar" } - ], - "outputs": [ - { "type": "Scalar" } - ] + "name": "aten::huber_loss.out(Tensor self, Tensor target, int reduction=1, float delta=1., *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::log10.complex(complex a) -> complex", - "inputs": [ - { "name": "a", "type": "complex" } - ], - "outputs": [ - { "type": "complex" } - ] + "name": "aten::huber_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, float delta) -> Tensor" }, { - "name": "aten::log10.float(float a) -> float", - "inputs": [ - { "name": "a", "type": "float32" } - ], - "outputs": [ - { "type": "float32" } - ] + "name": "aten::huber_loss_backward.out(Tensor grad_output, Tensor self, Tensor target, int reduction, float delta, *, Tensor(a!) grad_input) -> Tensor(a!)" }, { - "name": "aten::log10.int(int a) -> float", - "inputs": [ - { "name": "a", "type": "int64" } - ], - "outputs": [ - { "type": "float32" } - ] + "name": "aten::im2col(Tensor self, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor" }, { - "name": "aten::log10.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::im2col.out(Tensor self, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::log10_(Tensor(a!) self) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::imag(Tensor(a) self) -> Tensor(a)" }, { - "name": "aten::log1p(Tensor self) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::index.Tensor(Tensor self, Tensor?[] indices) -> Tensor" }, { - "name": "aten::log1p.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::index.Tensor_hacked_twin(Tensor self, Tensor[] indices) -> Tensor" }, { - "name": "aten::log1p_(Tensor(a!) self) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::index.Tensor_out(Tensor self, Tensor?[] indices, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::log2(Tensor self) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::index.list_Tensor(Tensor[] self, Tensor el) -> int" }, { - "name": "aten::log2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::index.list_bool(bool[] self, bool el) -> int" }, { - "name": "aten::log2_(Tensor(a!) self) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::index.list_float(float[] self, float el) -> int" }, { - "name": "aten::log_(Tensor(a!) self) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::index.list_int(int[] self, int el) -> int" }, { - "name": "aten::log_normal_(Tensor(a!) self, float mean=1, float std=2, *, Generator? generator=None) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "mean", "type": "float32", "default": 1 }, - { "name": "std", "type": "float32", "default": 2 }, - { "name": "generator", "type": "Generator?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::index.list_str(str[] self, str el) -> int" }, { - "name": "aten::log_sigmoid(Tensor self) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::index.str(str self, str substr, int start=0, int end=-1) -> int" }, { - "name": "aten::log_sigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::index_add(Tensor self, int dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor" }, { - "name": "aten::log_sigmoid_backward(Tensor grad_output, Tensor self, Tensor buffer) -> Tensor", - "inputs": [ - { "name": "grad_output", "type": "Tensor" }, - { "name": "self", "type": "Tensor" }, - { "name": "buffer", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::index_add.dimname(Tensor self, str dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor" }, { - "name": "aten::log_sigmoid_backward.grad_input(Tensor grad_output, Tensor self, Tensor buffer, *, Tensor(a!) grad_input) -> Tensor(a!)", - "inputs": [ - { "name": "grad_output", "type": "Tensor" }, - { "name": "self", "type": "Tensor" }, - { "name": "buffer", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::index_add.out(Tensor self, int dim, Tensor index, Tensor source, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::log_sigmoid_forward(Tensor self) -> (Tensor output, Tensor buffer)", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "name": "output", "type": "Tensor" }, - { "name": "buffer", "type": "Tensor" } - ] + "name": "aten::index_add_(Tensor(a!) self, int dim, Tensor index, Tensor source, *, Scalar alpha=1) -> Tensor(a!)" }, { - "name": "aten::log_sigmoid_forward.output(Tensor self, *, Tensor(a!) output, Tensor(b!) buffer) -> (Tensor(a!), Tensor(b!))", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" }, - { "type": "Tensor" } - ] + "name": "aten::index_copy(Tensor self, int dim, Tensor index, Tensor source) -> Tensor" }, { - "name": "aten::log_softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor", - "category": "Activation", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "Dimname" }, - { "name": "dtype", "type": "ScalarType?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::index_copy.dimname(Tensor self, str dim, Tensor index, Tensor source) -> Tensor" }, { - "name": "aten::log_softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor", - "category": "Activation", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64" }, - { "name": "dtype", "type": "ScalarType?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::index_copy.out(Tensor self, int dim, Tensor index, Tensor source, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::log_softmax.int_out(Tensor self, int dim, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64" }, - { "name": "dtype", "type": "ScalarType?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::index_copy_(Tensor(a!) self, int dim, Tensor index, Tensor source) -> Tensor(a!)" }, { - "name": "aten::logaddexp(Tensor self, Tensor other) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::index_copy_.dimname(Tensor(a!) self, str dim, Tensor index, Tensor source) -> Tensor(a!)" }, { - "name": "aten::logaddexp.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::index_fill.Dimname_Scalar(Tensor self, str dim, Tensor index, Scalar value) -> Tensor" }, { - "name": "aten::logaddexp2(Tensor self, Tensor other) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::index_fill.Dimname_Tensor(Tensor self, str dim, Tensor index, Tensor value) -> Tensor" }, { - "name": "aten::logaddexp2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::index_fill.int_Scalar(Tensor self, int dim, Tensor index, Scalar value) -> Tensor" }, { - "name": "aten::logcumsumexp(Tensor self, int dim) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::index_fill.int_Scalar_out(Tensor self, int dim, Tensor index, Scalar value, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::logcumsumexp.dimname(Tensor self, Dimname dim) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "Dimname" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::index_fill.int_Tensor(Tensor self, int dim, Tensor index, Tensor value) -> Tensor" }, { - "name": "aten::logcumsumexp.dimname_out(Tensor self, Dimname dim, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "Dimname" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::index_fill.int_Tensor_out(Tensor self, int dim, Tensor index, Tensor value, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::logcumsumexp.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::index_fill_.Dimname_Scalar(Tensor(a!) self, str dim, Tensor index, Scalar value) -> Tensor(a!)" }, { - "name": "aten::logdet(Tensor self) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::index_fill_.Dimname_Tensor(Tensor(a!) self, str dim, Tensor index, Tensor value) -> Tensor(a!)" }, { - "name": "aten::logical_and(Tensor self, Tensor other) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::index_fill_.int_Scalar(Tensor(a!) self, int dim, Tensor index, Scalar value) -> Tensor(a!)" }, { - "name": "aten::logical_and.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::index_fill_.int_Tensor(Tensor(a!) self, int dim, Tensor index, Tensor value) -> Tensor(a!)" }, { - "name": "aten::logical_and_(Tensor(a!) self, Tensor other) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::index_put(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor" }, { - "name": "aten::logical_not(Tensor self) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::index_put.hacked_twin(Tensor self, Tensor[] indices, Tensor values, bool accumulate=False) -> Tensor" }, { - "name": "aten::logical_not.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::index_put.out(Tensor self, Tensor?[] indices, Tensor values, bool accumulate=False, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::logical_not_(Tensor(a!) self) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::index_put_(Tensor(a!) self, Tensor?[] indices, Tensor values, bool accumulate=False) -> Tensor(a!)" }, { - "name": "aten::logical_or(Tensor self, Tensor other) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::index_put_.hacked_twin(Tensor(a!) self, Tensor[] indices, Tensor values, bool accumulate=False) -> Tensor(a!)" }, { - "name": "aten::logical_or.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::index_reduce(Tensor self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True) -> Tensor" }, { - "name": "aten::logical_or_(Tensor(a!) self, Tensor other) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::index_reduce.out(Tensor self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::logical_xor(Tensor self, Tensor other) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::index_reduce_(Tensor(a!) self, int dim, Tensor index, Tensor source, str reduce, *, bool include_self=True) -> Tensor(a!)" }, { - "name": "aten::logical_xor.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::index_select(Tensor self, int dim, Tensor index) -> Tensor" }, { - "name": "aten::logical_xor_(Tensor(a!) self, Tensor other) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::index_select.dimname(Tensor self, str dim, Tensor index) -> Tensor" }, { - "name": "aten::logit(Tensor self, float? eps=None) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "eps", "type": "float32?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::index_select.dimname_out(Tensor self, str dim, Tensor index, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::logit.out(Tensor self, float? eps=None, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "eps", "type": "float32?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::index_select.out(Tensor self, int dim, Tensor index, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::logit_(Tensor(a!) self, float? eps=None) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "eps", "type": "float32?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::index_select_backward(Tensor grad, SymInt[] self_sizes, int dim, Tensor index) -> Tensor" }, { - "name": "aten::logit_backward(Tensor grad_output, Tensor self, float? eps=None) -> Tensor", - "inputs": [ - { "name": "grad_output", "type": "Tensor" }, - { "name": "self", "type": "Tensor" }, - { "name": "eps", "type": "float32?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::instance_norm(Tensor input, Tensor? weight, Tensor? bias, Tensor? running_mean, Tensor? running_var, bool use_input_stats, float momentum, float eps, bool cudnn_enabled) -> Tensor", + "category": "Normalization" }, { - "name": "aten::logit_backward.grad_input(Tensor grad_output, Tensor self, float? eps=None, *, Tensor(a!) grad_input) -> Tensor(a!)", - "inputs": [ - { "name": "grad_output", "type": "Tensor" }, - { "name": "self", "type": "Tensor" }, - { "name": "eps", "type": "float32?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::int_repr(Tensor self) -> Tensor" }, { - "name": "aten::logspace(Scalar start, Scalar end, int steps, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", - "inputs": [ - { "name": "start", "type": "Scalar" }, - { "name": "end", "type": "Scalar" }, - { "name": "steps", "type": "int64" }, - { "name": "base", "type": "float32", "default": 10.0 }, - { "name": "dtype", "type": "ScalarType?", "default": null, "kwarg_only": true }, - { "name": "layout", "type": "Layout?", "default": null, "kwarg_only": true }, - { "name": "device", "type": "Device?", "default": null, "kwarg_only": true }, - { "name": "pin_memory", "type": "boolean?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::int_repr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::logspace.Scalar_Tensor(Scalar start, Tensor end, int steps, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", - "inputs": [ - { "name": "start", "type": "Scalar" }, - { "name": "end", "type": "Tensor" }, - { "name": "steps", "type": "int64" }, - { "name": "base", "type": "float32", "default": 10.0 }, - { "name": "dtype", "type": "ScalarType?", "default": null, "kwarg_only": true }, - { "name": "layout", "type": "Layout?", "default": null, "kwarg_only": true }, - { "name": "device", "type": "Device?", "default": null, "kwarg_only": true }, - { "name": "pin_memory", "type": "boolean?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::inverse(Tensor self) -> Tensor" }, { - "name": "aten::logspace.Scalar_Tensor_out(Scalar start, Tensor end, int steps, float base=10.0, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "start", "type": "Scalar" }, - { "name": "end", "type": "Tensor" }, - { "name": "steps", "type": "int64" }, - { "name": "base", "type": "float32", "default": 10.0 } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::inverse.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::logspace.Tensor_Scalar(Tensor start, Scalar end, int steps, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", - "inputs": [ - { "name": "start", "type": "Tensor" }, - { "name": "end", "type": "Scalar" }, - { "name": "steps", "type": "int64" }, - { "name": "base", "type": "float32", "default": 10.0 }, - { "name": "dtype", "type": "ScalarType?", "default": null, "kwarg_only": true }, - { "name": "layout", "type": "Layout?", "default": null, "kwarg_only": true }, - { "name": "device", "type": "Device?", "default": null, "kwarg_only": true }, - { "name": "pin_memory", "type": "boolean?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::is_contiguous(Tensor self) -> bool" }, { - "name": "aten::logspace.Tensor_Scalar_out(Tensor start, Scalar end, int steps, float base=10.0, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "start", "type": "Tensor" }, - { "name": "end", "type": "Scalar" }, - { "name": "steps", "type": "int64" }, - { "name": "base", "type": "float32", "default": 10.0 } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::is_contiguous.memory_format(Tensor self, MemoryFormat memory_format) -> bool" }, { - "name": "aten::logspace.Tensor_Tensor(Tensor start, Tensor end, int steps, float base=10.0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", - "inputs": [ - { "name": "start", "type": "Tensor" }, - { "name": "end", "type": "Tensor" }, - { "name": "steps", "type": "int64" }, - { "name": "base", "type": "float32", "default": 10.0 }, - { "name": "dtype", "type": "ScalarType?", "default": null, "kwarg_only": true }, - { "name": "layout", "type": "Layout?", "default": null, "kwarg_only": true }, - { "name": "device", "type": "Device?", "default": null, "kwarg_only": true }, - { "name": "pin_memory", "type": "boolean?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::is_floating_point(Tensor self) -> bool" }, { - "name": "aten::logspace.Tensor_Tensor_out(Tensor start, Tensor end, int steps, float base=10.0, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "start", "type": "Tensor" }, - { "name": "end", "type": "Tensor" }, - { "name": "steps", "type": "int64" }, - { "name": "base", "type": "float32", "default": 10.0 } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::isfinite(Tensor self) -> Tensor" }, { - "name": "aten::logspace.out(Scalar start, Scalar end, int steps, float base=10.0, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "start", "type": "Scalar" }, - { "name": "end", "type": "Scalar" }, - { "name": "steps", "type": "int64" }, - { "name": "base", "type": "float32", "default": 10.0 } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::isfinite.complex(complex a) -> bool" }, { - "name": "aten::logsumexp(Tensor self, int[1] dim, bool keepdim=False) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64[1]" }, - { "name": "keepdim", "type": "boolean", "default": false } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::isfinite.float(float a) -> bool" }, { - "name": "aten::logsumexp.names(Tensor self, Dimname[1] dim, bool keepdim=False) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "Dimname[1]" }, - { "name": "keepdim", "type": "boolean", "default": false } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::isinf(Tensor self) -> Tensor" }, { - "name": "aten::logsumexp.names_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "Dimname[1]" }, - { "name": "keepdim", "type": "boolean", "default": false } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::isinf.complex(complex a) -> bool" }, { - "name": "aten::logsumexp.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64[1]" }, - { "name": "keepdim", "type": "boolean", "default": false } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::isinf.float(float a) -> bool" }, { - "name": "aten::lstm.data(Tensor data, Tensor batch_sizes, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor, Tensor)", - "category": "Layer", - "inputs": [ - { "name": "data", "type": "Tensor" }, - { "name": "batch_sizes", "type": "Tensor" }, - { "name": "hx", "type": "Tensor[]" }, - { "name": "params", "type": "Tensor[]" }, - { "name": "has_biases", "type": "boolean" }, - { "name": "num_layers", "type": "int64" }, - { "name": "dropout", "type": "float32" }, - { "name": "train", "type": "boolean" }, - { "name": "bidirectional", "type": "boolean" } - ], - "outputs": [ - { "type": "Tensor" }, - { "name": "?", "type": "Tensor" }, - { "name": "?", "type": "Tensor" } - ] + "name": "aten::isinf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::lstm.input(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor, Tensor)", - "category": "Layer", - "inputs": [ - { "name": "input", "type": "Tensor" }, - { "name": "hx", "type": "Tensor[]" }, - { "name": "params", "type": "Tensor[]" }, - { "name": "has_biases", "type": "boolean" }, - { "name": "num_layers", "type": "int64" }, - { "name": "dropout", "type": "float32" }, - { "name": "train", "type": "boolean" }, - { "name": "bidirectional", "type": "boolean" }, - { "name": "batch_first", "type": "boolean" } - ], - "outputs": [ - { "type": "Tensor" }, - { "name": "?", "type": "Tensor" }, - { "name": "?", "type": "Tensor" } - ] + "name": "aten::isnan(Tensor self) -> Tensor" }, { - "name": "aten::lstm_cell(Tensor input, Tensor[] hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> (Tensor, Tensor)", - "category": "Layer", - "inputs": [ - { "name": "input", "type": "Tensor" }, - { "name": "hx", "type": "Tensor[]" }, - { "name": "w_ih", "type": "Tensor" }, - { "name": "w_hh", "type": "Tensor" }, - { "name": "b_ih", "type": "Tensor?", "default": null }, - { "name": "b_hh", "type": "Tensor?", "default": null } - ], - "outputs": [ - { "type": "Tensor" }, - { "type": "Tensor" } - ] + "name": "aten::isnan.complex(complex a) -> bool" }, { - "name": "aten::lt(Scalar a, Scalar b) -> bool", - "inputs": [ - { "name": "a", "type": "Scalar" }, - { "name": "b", "type": "Scalar" } - ], - "outputs": [ - { "type": "boolean" } - ] + "name": "aten::isnan.float(float a) -> bool" }, { - "name": "aten::lt.Scalar(Tensor self, Scalar other) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Scalar" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::isnan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::lt.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Scalar" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::istft(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool center=True, bool normalized=False, bool? onesided=None, int? length=None, bool return_complex=False) -> Tensor" }, { - "name": "aten::lt.Tensor(Tensor self, Tensor other) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::item(Tensor self) -> Scalar" }, { - "name": "aten::lt.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::items.Tensor(Dict(Tensor, t) self) -> ((Tensor, t)[])" }, { - "name": "aten::lt.float(float a, float b) -> bool", - "inputs": [ - { "name": "a", "type": "float32" }, - { "name": "b", "type": "float32" } - ], - "outputs": [ - { "type": "boolean" } - ] + "name": "aten::items.bool(Dict(bool, t) self) -> ((bool, t)[])" }, { - "name": "aten::lt.float_int(float a, int b) -> bool", - "inputs": [ - { "name": "a", "type": "float32" }, - { "name": "b", "type": "int64" } - ], - "outputs": [ - { "type": "boolean" } - ] + "name": "aten::items.complex(Dict(complex, t) self) -> ((complex, t)[])" }, { - "name": "aten::lt.int(int a, int b) -> bool", - "inputs": [ - { "name": "a", "type": "int64" }, - { "name": "b", "type": "int64" } - ], - "outputs": [ - { "type": "boolean" } - ] + "name": "aten::items.float(Dict(float, t) self) -> ((float, t)[])" }, { - "name": "aten::lt.int_float(int a, float b) -> bool", - "inputs": [ - { "name": "a", "type": "int64" }, - { "name": "b", "type": "float32" } - ], - "outputs": [ - { "type": "boolean" } - ] + "name": "aten::items.int(Dict(int, t) self) -> ((int, t)[])" }, { - "name": "aten::lt.str(str a, str b) -> bool", - "inputs": [ - { "name": "a", "type": "string" }, - { "name": "b", "type": "string" } - ], - "outputs": [ - { "type": "boolean" } - ] + "name": "aten::items.str(Dict(str, t) self) -> ((str, t)[])" }, { - "name": "aten::manual_seed(int seed) -> ()", - "inputs": [ - { "name": "seed", "type": "int64" } - ], - "outputs": [] + "name": "aten::join(str self, str[] values) -> str" }, { - "name": "aten::manual_seed.generator(Generator(a!) self, int seed) -> Generator(a!)", - "inputs": [ - { "name": "self", "type": "Generator" }, - { "name": "seed", "type": "int64" } - ], - "outputs": [ - { "type": "Generator" } - ] + "name": "aten::keys.Tensor(Dict(Tensor, t) self) -> Tensor[](*)" }, { - "name": "aten::masked_fill.Scalar(Tensor self, Tensor mask, Scalar value) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "mask", "type": "Tensor" }, - { "name": "value", "type": "Scalar" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::keys.bool(Dict(bool, t) self) -> bool[](*)" }, { - "name": "aten::masked_fill.Tensor(Tensor self, Tensor mask, Tensor value) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "mask", "type": "Tensor" }, - { "name": "value", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::keys.complex(Dict(complex, t) self) -> complex[](*)" }, { - "name": "aten::masked_fill_.Scalar(Tensor(a!) self, Tensor mask, Scalar value) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "mask", "type": "Tensor" }, - { "name": "value", "type": "Scalar" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::keys.float(Dict(float, t) self) -> float[](*)" }, { - "name": "aten::masked_fill_.Tensor(Tensor(a!) self, Tensor mask, Tensor value) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "mask", "type": "Tensor" }, - { "name": "value", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::keys.int(Dict(int, t) self) -> int[](*)" }, { - "name": "aten::masked_scatter(Tensor self, Tensor mask, Tensor source) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "mask", "type": "Tensor" }, - { "name": "source", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::keys.str(Dict(str, t) self) -> str[](*)" }, { - "name": "aten::masked_scatter_(Tensor(a!) self, Tensor mask, Tensor source) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "mask", "type": "Tensor" }, - { "name": "source", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::kl_div(Tensor self, Tensor target, int reduction=1, *, bool log_target=False) -> Tensor" }, { - "name": "aten::masked_select(Tensor self, Tensor mask) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "mask", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::kthvalue(Tensor self, int k, int dim=-1, bool keepdim=False) -> (Tensor values, Tensor indices)" }, { - "name": "aten::masked_select.out(Tensor self, Tensor mask, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "mask", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::kthvalue.dimname(Tensor self, int k, str dim, bool keepdim=False) -> (Tensor values, Tensor indices)" }, { - "name": "aten::matmul(Tensor self, Tensor other) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::kthvalue.dimname_out(Tensor self, int k, str dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)" }, { - "name": "aten::matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::kthvalue.values(Tensor self, int k, int dim=-1, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)" }, { - "name": "aten::max(Tensor self) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::l1_loss(Tensor self, Tensor target, int reduction=1) -> Tensor" }, { - "name": "aten::max.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64" }, - { "name": "keepdim", "type": "boolean", "default": false } - ], - "outputs": [ - { "name": "values", "type": "Tensor" }, - { "name": "indices", "type": "Tensor" } - ] + "name": "aten::layer_norm(Tensor input, SymInt[] normalized_shape, Tensor? weight=None, Tensor? bias=None, float eps=1.0000000000000001e-05, bool cudnn_enable=True) -> Tensor", + "category": "Normalization" }, { - "name": "aten::max.dim_max(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64" }, - { "name": "keepdim", "type": "boolean", "default": false } - ], - "outputs": [ - { "name": "values", "type": "Tensor" }, - { "name": "indices", "type": "Tensor" } - ] + "name": "aten::le(Scalar a, Scalar b) -> bool" }, { - "name": "aten::max.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "Dimname" }, - { "name": "keepdim", "type": "boolean", "default": false } - ], - "outputs": [ - { "name": "values", "type": "Tensor" }, - { "name": "indices", "type": "Tensor" } - ] + "name": "aten::le.Scalar(Tensor self, Scalar other) -> Tensor" }, { - "name": "aten::max.names_dim_max(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "Dimname" }, - { "name": "keepdim", "type": "boolean", "default": false } - ], - "outputs": [ - { "name": "values", "type": "Tensor" }, - { "name": "indices", "type": "Tensor" } - ] + "name": "aten::le.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::max.other(Tensor self, Tensor other) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::le.Tensor(Tensor self, Tensor other) -> Tensor" }, { - "name": "aten::max.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::le.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::max.unary_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::le.float(float a, float b) -> bool" }, { - "name": "aten::max_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False) -> Tensor", - "category": "Pool", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "kernel_size", "type": "int64[1]" }, - { "name": "stride", "type": "int64[1]", "default": [] }, - { "name": "padding", "type": "int64[1]", "default": 0 }, - { "name": "dilation", "type": "int64[1]", "default": 1 }, - { "name": "ceil_mode", "type": "boolean", "default": false } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::le.float_int(float a, int b) -> bool" }, { - "name": "aten::max_pool1d_with_indices(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=0, int[1] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor)", - "category": "Pool", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "kernel_size", "type": "int64[1]" }, - { "name": "stride", "type": "int64[1]", "default": [] }, - { "name": "padding", "type": "int64[1]", "default": 0 }, - { "name": "dilation", "type": "int64[1]", "default": 1 }, - { "name": "ceil_mode", "type": "boolean", "default": false } - ], - "outputs": [ - { "type": "Tensor" }, - { "name": "indices", "type": "Tensor" } - ] + "name": "aten::le.int(int a, int b) -> bool" }, { - "name": "aten::max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor", - "category": "Pool", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "kernel_size", "type": "int64[2]" }, - { "name": "stride", "type": "int64[2]", "default": [] }, - { "name": "padding", "type": "int64[2]", "default": 0 }, - { "name": "dilation", "type": "int64[2]", "default": 1 }, - { "name": "ceil_mode", "type": "boolean", "default": false } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::le.int_float(int a, float b) -> bool" }, { - "name": "aten::max_pool2d_with_indices(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor)", - "category": "Pool", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "kernel_size", "type": "int64[2]" }, - { "name": "stride", "type": "int64[2]", "default": [] }, - { "name": "padding", "type": "int64[2]", "default": 0 }, - { "name": "dilation", "type": "int64[2]", "default": 1 }, - { "name": "ceil_mode", "type": "boolean", "default": false } - ], - "outputs": [ - { "type": "Tensor" }, - { "name": "indices", "type": "Tensor" } - ] + "name": "aten::le.str(str a, str b) -> bool" }, { - "name": "aten::max_pool2d_with_indices.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "kernel_size", "type": "int64[2]" }, - { "name": "stride", "type": "int64[2]", "default": [] }, - { "name": "padding", "type": "int64[2]", "default": 0 }, - { "name": "dilation", "type": "int64[2]", "default": 1 }, - { "name": "ceil_mode", "type": "boolean", "default": false } - ], - "outputs": [ - { "type": "Tensor" }, - { "type": "Tensor" } - ] + "name": "aten::leaky_relu(Tensor self, Scalar negative_slope=0.01) -> Tensor", + "category": "Activation" }, { - "name": "aten::max_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor", - "category": "Pool", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "kernel_size", "type": "int64[3]" }, - { "name": "stride", "type": "int64[3]", "default": [] }, - { "name": "padding", "type": "int64[3]", "default": 0 }, - { "name": "dilation", "type": "int64[3]", "default": 1 }, - { "name": "ceil_mode", "type": "boolean", "default": false } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::leaky_relu.out(Tensor self, Scalar negative_slope=0.01, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::max_pool3d_with_indices(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "kernel_size", "type": "int64[3]" }, - { "name": "stride", "type": "int64[3]", "default": [] }, - { "name": "padding", "type": "int64[3]", "default": 0 }, - { "name": "dilation", "type": "int64[3]", "default": 1 }, - { "name": "ceil_mode", "type": "boolean", "default": false } - ], - "outputs": [ - { "type": "Tensor" }, - { "type": "Tensor" } - ] + "name": "aten::leaky_relu_(Tensor(a!) self, Scalar negative_slope=0.01) -> Tensor(a!)", + "category": "Activation" }, { - "name": "aten::max_pool3d_with_indices.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "kernel_size", "type": "int64[3]" }, - { "name": "stride", "type": "int64[3]", "default": [] }, - { "name": "padding", "type": "int64[3]", "default": 0 }, - { "name": "dilation", "type": "int64[3]", "default": 1 }, - { "name": "ceil_mode", "type": "boolean", "default": false } - ], - "outputs": [ - { "type": "Tensor" }, - { "type": "Tensor" } - ] + "name": "aten::len.Dict_Tensor(Dict(Tensor, t) self) -> int" }, { - "name": "aten::max_pool3d_with_indices_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, int[3] dilation, bool ceil_mode, Tensor indices) -> Tensor", - "inputs": [ - { "name": "grad_output", "type": "Tensor" }, - { "name": "self", "type": "Tensor" }, - { "name": "kernel_size", "type": "int64[3]" }, - { "name": "stride", "type": "int64[3]" }, - { "name": "padding", "type": "int64[3]" }, - { "name": "dilation", "type": "int64[3]" }, - { "name": "ceil_mode", "type": "boolean" }, - { "name": "indices", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::len.Dict_bool(Dict(bool, t) self) -> int" }, { - "name": "aten::max_pool3d_with_indices_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, int[3] dilation, bool ceil_mode, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)", - "inputs": [ - { "name": "grad_output", "type": "Tensor" }, - { "name": "self", "type": "Tensor" }, - { "name": "kernel_size", "type": "int64[3]" }, - { "name": "stride", "type": "int64[3]" }, - { "name": "padding", "type": "int64[3]" }, - { "name": "dilation", "type": "int64[3]" }, - { "name": "ceil_mode", "type": "boolean" }, - { "name": "indices", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::len.Dict_complex(Dict(complex, t) self) -> int" }, { - "name": "aten::max_unpool2d(Tensor self, Tensor indices, SymInt[2] output_size) -> Tensor", - "category": "Pool", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "indices", "type": "Tensor" }, - { "name": "output_size", "type": "SymInt[2]" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::len.Dict_float(Dict(float, t) self) -> int" }, { - "name": "aten::max_unpool2d.out(Tensor self, Tensor indices, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "indices", "type": "Tensor" }, - { "name": "output_size", "type": "SymInt[2]" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::len.Dict_int(Dict(int, t) self) -> int" }, { - "name": "aten::max_unpool3d(Tensor self, Tensor indices, SymInt[3] output_size, int[3] stride, int[3] padding) -> Tensor", - "category": "Pool", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "indices", "type": "Tensor" }, - { "name": "output_size", "type": "SymInt[3]" }, - { "name": "stride", "type": "int64[3]" }, - { "name": "padding", "type": "int64[3]" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::len.Dict_str(Dict(str, t) self) -> int" }, { - "name": "aten::max_unpool3d.out(Tensor self, Tensor indices, SymInt[3] output_size, int[3] stride, int[3] padding, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "indices", "type": "Tensor" }, - { "name": "output_size", "type": "SymInt[3]" }, - { "name": "stride", "type": "int64[3]" }, - { "name": "padding", "type": "int64[3]" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::len.Tensor(Tensor t) -> int" }, { - "name": "aten::maximum(Tensor self, Tensor other) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::len.any(Any[] a) -> int" }, { - "name": "aten::maximum.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::len.str(str s) -> int" }, { - "name": "aten::mean(Tensor self, *, ScalarType? dtype=None) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dtype", "type": "ScalarType?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::len.t(t[] a) -> int" }, { - "name": "aten::mean.dim(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64[1]?" }, - { "name": "keepdim", "type": "boolean", "default": false }, - { "name": "dtype", "type": "ScalarType?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::lerp.Scalar(Tensor self, Tensor end, Scalar weight) -> Tensor" }, { - "name": "aten::mean.dtype_out(Tensor self, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dtype", "type": "ScalarType?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::lerp.Scalar_out(Tensor self, Tensor end, Scalar weight, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::mean.names_dim(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "Dimname[1]" }, - { "name": "keepdim", "type": "boolean", "default": false }, - { "name": "dtype", "type": "ScalarType?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::lerp.Tensor(Tensor self, Tensor end, Tensor weight) -> Tensor" }, { - "name": "aten::mean.names_out(Tensor self, Dimname[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "Dimname[1]" }, - { "name": "keepdim", "type": "boolean", "default": false }, - { "name": "dtype", "type": "ScalarType?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::lerp.Tensor_out(Tensor self, Tensor end, Tensor weight, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::mean.out(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64[1]?" }, - { "name": "keepdim", "type": "boolean", "default": false }, - { "name": "dtype", "type": "ScalarType?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::lift_fresh(Tensor(a) self) -> Tensor(a)" }, { - "name": "aten::median(Tensor self) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::lift_fresh_copy(Tensor self) -> Tensor" }, { - "name": "aten::median.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64" }, - { "name": "keepdim", "type": "boolean", "default": false } - ], - "outputs": [ - { "name": "values", "type": "Tensor" }, - { "name": "indices", "type": "Tensor" } - ] + "name": "aten::lift_fresh_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::median.dim_values(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64" }, - { "name": "keepdim", "type": "boolean", "default": false } - ], - "outputs": [ - { "name": "values", "type": "Tensor" }, - { "name": "indices", "type": "Tensor" } - ] + "name": "aten::linalg_cross(Tensor self, Tensor other, *, int dim=-1) -> Tensor" }, { - "name": "aten::median.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "Dimname" }, - { "name": "keepdim", "type": "boolean", "default": false } - ], - "outputs": [ - { "name": "values", "type": "Tensor" }, - { "name": "indices", "type": "Tensor" } - ] + "name": "aten::linalg_cross.out(Tensor self, Tensor other, *, int dim=-1, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::median.names_dim_values(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "Dimname" }, - { "name": "keepdim", "type": "boolean", "default": false } - ], - "outputs": [ - { "name": "values", "type": "Tensor" }, - { "name": "indices", "type": "Tensor" } - ] + "name": "aten::linalg_inv(Tensor A) -> Tensor" }, { - "name": "aten::meshgrid(Tensor[] tensors) -> Tensor[]", - "category": "Tensor", - "inputs": [ - { "name": "tensors", "type": "Tensor[]" } - ], - "outputs": [ - { "type": "Tensor[]" } - ] + "name": "aten::linalg_inv.out(Tensor A, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::meshgrid.indexing(Tensor[] tensors, *, str indexing) -> Tensor[]", - "category": "Tensor", - "inputs": [ - { "name": "tensors", "type": "Tensor[]" }, - { "name": "indexing", "type": "string", "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor[]" } - ] + "name": "aten::linalg_inv_ex(Tensor A, *, bool check_errors=False) -> (Tensor inverse, Tensor info)" }, { - "name": "aten::min(Tensor self) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::linalg_inv_ex.inverse(Tensor A, *, bool check_errors=False, Tensor(a!) inverse, Tensor(b!) info) -> (Tensor(a!) inverse, Tensor(b!) info)" }, { - "name": "aten::min.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64", "default": 0 }, - { "name": "keepdim", "type": "boolean", "default": false } - ], - "outputs": [ - { "name": "values", "type": "Tensor" }, - { "name": "indices", "type": "Tensor" } - ] + "name": "aten::linalg_norm(Tensor self, Scalar? ord=None, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor" }, { - "name": "aten::min.dim_min(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) min, Tensor(b!) min_indices) -> (Tensor(a!) values, Tensor(b!) indices)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64" }, - { "name": "keepdim", "type": "boolean", "default": false } - ], - "outputs": [ - { "name": "values", "type": "Tensor" }, - { "name": "indices", "type": "Tensor" } - ] + "name": "aten::linalg_norm.ord_str(Tensor self, str ord, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor" }, { - "name": "aten::min.names_dim(Tensor self, Dimname dim, bool keepdim=False) -> (Tensor values, Tensor indices)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "Dimname" }, - { "name": "keepdim", "type": "boolean", "default": false } - ], - "outputs": [ - { "name": "values", "type": "Tensor" }, - { "name": "indices", "type": "Tensor" } - ] + "name": "aten::linalg_norm.ord_str_out(Tensor self, str ord, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::min.names_dim_min(Tensor self, Dimname dim, bool keepdim=False, *, Tensor(a!) min, Tensor(b!) min_indices) -> (Tensor(a!) values, Tensor(b!) indices)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "Dimname" }, - { "name": "keepdim", "type": "boolean", "default": false } - ], - "outputs": [ - { "name": "values", "type": "Tensor" }, - { "name": "indices", "type": "Tensor" } - ] + "name": "aten::linalg_norm.out(Tensor self, Scalar? ord=None, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::min.other(Tensor self, Tensor other) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::linalg_qr(Tensor A, str mode=\"reduced\") -> (Tensor Q, Tensor R)" }, { - "name": "aten::min.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::linalg_qr.out(Tensor A, str mode=\"reduced\", *, Tensor(a!) Q, Tensor(b!) R) -> (Tensor(a!) Q, Tensor(b!) R)" }, { - "name": "aten::min.unary_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::linalg_solve(Tensor A, Tensor B, *, bool left=True) -> Tensor" }, { - "name": "aten::minimum(Tensor self, Tensor other) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::linalg_solve.out(Tensor A, Tensor B, *, bool left=True, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::minimum.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::linalg_solve_ex(Tensor A, Tensor B, *, bool left=True, bool check_errors=False) -> (Tensor result, Tensor info)" }, { - "name": "aten::mish(Tensor self) -> Tensor", - "category": "Activation", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::linalg_solve_ex.out(Tensor A, Tensor B, *, bool left=True, bool check_errors=False, Tensor(a!) result, Tensor(b!) info) -> (Tensor(a!) result, Tensor(b!) info)" }, { - "name": "aten::mish.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::linalg_solve_triangular(Tensor self, Tensor B, *, bool upper, bool left=True, bool unitriangular=False) -> Tensor" }, { - "name": "aten::mish_(Tensor(a!) self) -> Tensor(a!)", - "category": "Activation", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::linalg_solve_triangular.out(Tensor self, Tensor B, *, bool upper, bool left=True, bool unitriangular=False, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::mkldnn_reorder_conv2d_weight(Tensor self, SymInt[2] padding=0, SymInt[2] stride=1, SymInt[2] dilation=1, SymInt groups=1, SymInt[]? input_size=None) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "padding", "type": "SymInt[2]", "default": 0 }, - { "name": "stride", "type": "SymInt[2]", "default": 1 }, - { "name": "dilation", "type": "SymInt[2]", "default": 1 }, - { "name": "groups", "type": "SymInt", "default": 1 }, - { "name": "input_size", "type": "SymInt[]?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::linalg_svd(Tensor A, bool full_matrices=True, *, str? driver=None) -> (Tensor U, Tensor S, Tensor Vh)" }, { - "name": "aten::mm(Tensor self, Tensor mat2) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "mat2", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::linalg_svd.U(Tensor A, bool full_matrices=True, *, str? driver=None, Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh)" }, { - "name": "aten::mm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "mat2", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::linalg_tensorinv(Tensor self, int ind=2) -> Tensor" }, { - "name": "aten::moveaxis.int(Tensor(a) self, int source, int destination) -> Tensor(a)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "source", "type": "int64" }, - { "name": "destination", "type": "int64" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::linalg_tensorinv.out(Tensor self, int ind=2, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::moveaxis.intlist(Tensor(a) self, int[] source, int[] destination) -> Tensor(a)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "source", "type": "int64[]" }, - { "name": "destination", "type": "int64[]" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::linalg_tensorsolve(Tensor self, Tensor other, int[]? dims=None) -> Tensor" }, { - "name": "aten::movedim.int(Tensor(a) self, int source, int destination) -> Tensor(a)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "source", "type": "int64" }, - { "name": "destination", "type": "int64" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::linalg_tensorsolve.out(Tensor self, Tensor other, int[]? dims=None, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::movedim.intlist(Tensor(a) self, int[] source, int[] destination) -> Tensor(a)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "source", "type": "int64[]" }, - { "name": "destination", "type": "int64[]" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::linalg_vector_norm(Tensor self, Scalar ord=2, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor" }, { - "name": "aten::mse_loss(Tensor self, Tensor target, int reduction=Mean) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "target", "type": "Tensor" }, - { "name": "reduction", "type": "int64", "default": "Mean" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::linalg_vector_norm.out(Tensor self, Scalar ord=2, int[1]? dim=None, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::mse_loss.out(Tensor self, Tensor target, int reduction=Mean, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "target", "type": "Tensor" }, - { "name": "reduction", "type": "int64", "default": "Mean" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::linear(Tensor input, Tensor weight, Tensor? bias=None) -> Tensor", + "category": "Layer" }, { - "name": "aten::mse_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction) -> Tensor", - "inputs": [ - { "name": "grad_output", "type": "Tensor" }, - { "name": "self", "type": "Tensor" }, - { "name": "target", "type": "Tensor" }, - { "name": "reduction", "type": "int64" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::linear.out(Tensor input, Tensor weight, Tensor? bias=None, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::mse_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, *, Tensor(a!) grad_input) -> Tensor(a!)", - "inputs": [ - { "name": "grad_output", "type": "Tensor" }, - { "name": "self", "type": "Tensor" }, - { "name": "target", "type": "Tensor" }, - { "name": "reduction", "type": "int64" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::linear_backward(Tensor self, Tensor grad_output, Tensor weight, bool[3] output_mask) -> (Tensor, Tensor, Tensor)" }, { - "name": "aten::mul(Scalar a, Scalar b) -> Scalar", - "inputs": [ - { "name": "a", "type": "Scalar" }, - { "name": "b", "type": "Scalar" } - ], - "outputs": [ - { "type": "Scalar" } - ] + "name": "aten::linear_backward.out(Tensor self, Tensor grad_output, Tensor weight, bool[3] output_mask, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))" }, { - "name": "aten::mul.Scalar(Tensor self, Scalar other) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Scalar" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::linspace(Scalar start, Scalar end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor" }, { - "name": "aten::mul.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Scalar" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::linspace.Scalar_Tensor(Scalar start, Tensor end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor" }, { - "name": "aten::mul.Tensor(Tensor self, Tensor other) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::linspace.Scalar_Tensor_out(Scalar start, Tensor end, int steps, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::mul.complex(complex a, complex b) -> complex", - "inputs": [ - { "name": "a", "type": "complex" }, - { "name": "b", "type": "complex" } - ], - "outputs": [ - { "type": "complex" } - ] + "name": "aten::linspace.Tensor_Scalar(Tensor start, Scalar end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor" }, { - "name": "aten::mul.complex_float(complex a, float b) -> complex", - "inputs": [ - { "name": "a", "type": "complex" }, - { "name": "b", "type": "float32" } - ], - "outputs": [ - { "type": "complex" } - ] + "name": "aten::linspace.Tensor_Scalar_out(Tensor start, Scalar end, int steps, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::mul.complex_int(complex a, int b) -> complex", - "inputs": [ - { "name": "a", "type": "complex" }, - { "name": "b", "type": "int64" } - ], - "outputs": [ - { "type": "complex" } - ] + "name": "aten::linspace.Tensor_Tensor(Tensor start, Tensor end, int steps, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor" }, { - "name": "aten::mul.float(float a, float b) -> float", - "inputs": [ - { "name": "a", "type": "float32" }, - { "name": "b", "type": "float32" } - ], - "outputs": [ - { "type": "float32" } - ] + "name": "aten::linspace.Tensor_Tensor_out(Tensor start, Tensor end, int steps, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::mul.float_complex(float a, complex b) -> complex", - "inputs": [ - { "name": "a", "type": "float32" }, - { "name": "b", "type": "complex" } - ], - "outputs": [ - { "type": "complex" } - ] + "name": "aten::linspace.out(Scalar start, Scalar end, int steps, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::mul.float_int(float a, int b) -> float", - "inputs": [ - { "name": "a", "type": "float32" }, - { "name": "b", "type": "int64" } - ], - "outputs": [ - { "type": "float32" } - ] + "name": "aten::list(str t) -> str[]" }, { - "name": "aten::mul.int(int a, int b) -> int", - "inputs": [ - { "name": "a", "type": "int64" }, - { "name": "b", "type": "int64" } - ], - "outputs": [ - { "type": "int64" } - ] + "name": "aten::list.t(t[] l) -> t[]" }, { - "name": "aten::mul.int_complex(int a, complex b) -> complex", - "inputs": [ - { "name": "a", "type": "int64" }, - { "name": "b", "type": "complex" } - ], - "outputs": [ - { "type": "complex" } - ] + "name": "aten::log(Tensor self) -> Tensor" }, { - "name": "aten::mul.int_float(int a, float b) -> float", - "inputs": [ - { "name": "a", "type": "int64" }, - { "name": "b", "type": "float32" } - ], - "outputs": [ - { "type": "float32" } - ] + "name": "aten::log.Scalar(Scalar a) -> Scalar" }, { - "name": "aten::mul.left_t(t[] l, int n) -> t[]", - "inputs": [ - { "name": "l", "type": "t[]" }, - { "name": "n", "type": "int64" } - ], - "outputs": [ - { "type": "t[]" } - ] + "name": "aten::log.Scalar_Scalar(Scalar a, Scalar b) -> float" }, { - "name": "aten::mul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::log.complex(complex a) -> complex" }, { - "name": "aten::mul.right_(int n, t[] l) -> t[]", - "inputs": [ - { "name": "n", "type": "int64" }, - { "name": "l", "type": "t[]" } - ], - "outputs": [ - { "type": "t[]" } - ] + "name": "aten::log.complex_complex(complex a, complex b) -> complex" }, { - "name": "aten::mul_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Scalar" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::log.complex_float(complex a, float b) -> complex" }, { - "name": "aten::mul_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::log.complex_int(complex a, int b) -> complex" }, { - "name": "aten::mul_.t(t[](a!) l, int n) -> t[](a!)", - "inputs": [ - { "name": "l", "type": "t[]" }, - { "name": "n", "type": "int64" } - ], - "outputs": [ - { "type": "t[]" } - ] + "name": "aten::log.float(float a) -> float" }, { - "name": "aten::multinomial(Tensor self, int num_samples, bool replacement=False, *, Generator? generator=None) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "num_samples", "type": "int64" }, - { "name": "replacement", "type": "boolean", "default": false }, - { "name": "generator", "type": "Generator?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::log.float_complex(float a, complex b) -> complex" }, { - "name": "aten::multinomial.out(Tensor self, int num_samples, bool replacement=False, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "num_samples", "type": "int64" }, - { "name": "replacement", "type": "boolean", "default": false }, - { "name": "generator", "type": "Generator?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::log.float_float(float a, float b) -> float" }, { - "name": "aten::multiply.Scalar(Tensor self, Scalar other) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Scalar" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::log.float_int(float a, int b) -> float" }, { - "name": "aten::multiply.Tensor(Tensor self, Tensor other) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::log.int(int a) -> float" }, { - "name": "aten::multiply.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::log.int_complex(int a, complex b) -> complex" }, { - "name": "aten::multiply_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Scalar" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::log.int_float(int a, float b) -> float" }, { - "name": "aten::multiply_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::log.int_int(int a, int b) -> float" }, { - "name": "aten::mv(Tensor self, Tensor vec) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "vec", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::log.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::mv.out(Tensor self, Tensor vec, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "vec", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::log10(Tensor self) -> Tensor" }, { - "name": "aten::mvlgamma(Tensor self, int p) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "p", "type": "int64" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::log10.Scalar(Scalar a) -> Scalar" }, { - "name": "aten::mvlgamma.out(Tensor self, int p, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "p", "type": "int64" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::log10.complex(complex a) -> complex" }, { - "name": "aten::mvlgamma_(Tensor(a!) self, int p) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "p", "type": "int64" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::log10.float(float a) -> float" }, { - "name": "aten::nan_to_num(Tensor self, float? nan=None, float? posinf=None, float? neginf=None) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "nan", "type": "float32?", "default": null }, - { "name": "posinf", "type": "float32?", "default": null }, - { "name": "neginf", "type": "float32?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::log10.int(int a) -> float" }, { - "name": "aten::nan_to_num.out(Tensor self, float? nan=None, float? posinf=None, float? neginf=None, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "nan", "type": "float32?", "default": null }, - { "name": "posinf", "type": "float32?", "default": null }, - { "name": "neginf", "type": "float32?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::log10.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::nan_to_num_(Tensor(a!) self, float? nan=None, float? posinf=None, float? neginf=None) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "nan", "type": "float32?", "default": null }, - { "name": "posinf", "type": "float32?", "default": null }, - { "name": "neginf", "type": "float32?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::log10_(Tensor(a!) self) -> Tensor(a!)" }, { - "name": "aten::narrow(Tensor(a) self, int dim, SymInt start, SymInt length) -> Tensor(a)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64" }, - { "name": "start", "type": "SymInt" }, - { "name": "length", "type": "SymInt" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::log1p(Tensor self) -> Tensor" }, { - "name": "aten::narrow.Tensor(Tensor(a) self, int dim, Tensor start, SymInt length) -> Tensor(a)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64" }, - { "name": "start", "type": "Tensor" }, - { "name": "length", "type": "SymInt" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::log1p.Scalar(Scalar a) -> Scalar" }, { - "name": "aten::narrow_copy(Tensor self, int dim, SymInt start, SymInt length) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64" }, - { "name": "start", "type": "SymInt" }, - { "name": "length", "type": "SymInt" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::log1p.float(float a) -> float" }, { - "name": "aten::narrow_copy.out(Tensor self, int dim, SymInt start, SymInt length, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64" }, - { "name": "start", "type": "SymInt" }, - { "name": "length", "type": "SymInt" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::log1p.int(int a) -> float" }, { - "name": "aten::native_layer_norm(Tensor input, SymInt[] normalized_shape, Tensor? weight, Tensor? bias, float eps) -> (Tensor, Tensor, Tensor)", - "category": "Normalization", - "inputs": [ - { "name": "input", "type": "Tensor" }, - { "name": "normalized_shape", "type": "SymInt[]" }, - { "name": "weight", "type": "Tensor?" }, - { "name": "bias", "type": "Tensor?" }, - { "name": "eps", "type": "float32" } - ], - "outputs": [ - { "type": "Tensor" }, - { "type": "Tensor" }, - { "type": "Tensor" } - ] + "name": "aten::log1p.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::ne(Scalar a, Scalar b) -> bool", - "inputs": [ - { "name": "a", "type": "Scalar" }, - { "name": "b", "type": "Scalar" } - ], - "outputs": [ - { "type": "boolean" } - ] + "name": "aten::log1p_(Tensor(a!) self) -> Tensor(a!)" }, { - "name": "aten::ne.Scalar(Tensor self, Scalar other) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Scalar" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::log2(Tensor self) -> Tensor" }, { - "name": "aten::ne.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Scalar" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::log2.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::ne.Tensor(Tensor self, Tensor other) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::log2_(Tensor(a!) self) -> Tensor(a!)" }, { - "name": "aten::ne.Tensor_list(Tensor[] a, Tensor[] b) -> bool", - "inputs": [ - { "name": "a", "type": "Tensor[]" }, - { "name": "b", "type": "Tensor[]" } - ], - "outputs": [ - { "type": "boolean" } - ] + "name": "aten::log_(Tensor(a!) self) -> Tensor(a!)" }, { - "name": "aten::ne.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::log_normal_(Tensor(a!) self, float mean=1., float std=2., *, Generator? generator=None) -> Tensor(a!)" }, { - "name": "aten::ne.bool(bool a, bool b) -> bool", - "inputs": [ - { "name": "a", "type": "boolean" }, - { "name": "b", "type": "boolean" } - ], - "outputs": [ - { "type": "boolean" } - ] + "name": "aten::log_sigmoid(Tensor self) -> Tensor" }, { - "name": "aten::ne.bool_list(bool[] a, bool[] b) -> bool", - "inputs": [ - { "name": "a", "type": "boolean[]" }, - { "name": "b", "type": "boolean[]" } - ], - "outputs": [ - { "type": "boolean" } - ] + "name": "aten::log_sigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::ne.complex(complex a, complex b) -> bool", - "inputs": [ - { "name": "a", "type": "complex" }, - { "name": "b", "type": "complex" } - ], - "outputs": [ - { "type": "boolean" } - ] + "name": "aten::log_sigmoid_backward(Tensor grad_output, Tensor self, Tensor buffer) -> Tensor" }, { - "name": "aten::ne.complex_float(complex a, float b) -> bool", - "inputs": [ - { "name": "a", "type": "complex" }, - { "name": "b", "type": "float32" } - ], - "outputs": [ - { "type": "boolean" } - ] + "name": "aten::log_sigmoid_backward.grad_input(Tensor grad_output, Tensor self, Tensor buffer, *, Tensor(a!) grad_input) -> Tensor(a!)" }, { - "name": "aten::ne.device(Device a, Device b) -> bool", - "inputs": [ - { "name": "a", "type": "Device" }, - { "name": "b", "type": "Device" } - ], - "outputs": [ - { "type": "boolean" } - ] + "name": "aten::log_sigmoid_forward(Tensor self) -> (Tensor output, Tensor buffer)" }, { - "name": "aten::ne.enum(AnyEnumType a, AnyEnumType b) -> bool", - "inputs": [ - { "name": "a", "type": "AnyEnumType" }, - { "name": "b", "type": "AnyEnumType" } - ], - "outputs": [ - { "type": "boolean" } - ] + "name": "aten::log_sigmoid_forward.output(Tensor self, *, Tensor(a!) output, Tensor(b!) buffer) -> (Tensor(a!), Tensor(b!))" }, { - "name": "aten::ne.float(float a, float b) -> bool", - "inputs": [ - { "name": "a", "type": "float32" }, - { "name": "b", "type": "float32" } - ], - "outputs": [ - { "type": "boolean" } - ] + "name": "aten::log_softmax.Dimname(Tensor self, str dim, *, ScalarType? dtype=None) -> Tensor", + "category": "Activation" }, { - "name": "aten::ne.float_complex(float a, complex b) -> bool", - "inputs": [ - { "name": "a", "type": "float32" }, - { "name": "b", "type": "complex" } - ], - "outputs": [ - { "type": "boolean" } - ] + "name": "aten::log_softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor", + "category": "Activation" }, { - "name": "aten::ne.float_int(float a, int b) -> bool", - "inputs": [ - { "name": "a", "type": "float32" }, - { "name": "b", "type": "int64" } - ], - "outputs": [ - { "type": "boolean" } - ] + "name": "aten::log_softmax.int_out(Tensor self, int dim, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::ne.float_list(float[] a, float[] b) -> bool", - "inputs": [ - { "name": "a", "type": "float32[]" }, - { "name": "b", "type": "float32[]" } - ], - "outputs": [ - { "type": "boolean" } - ] + "name": "aten::logaddexp(Tensor self, Tensor other) -> Tensor" }, { - "name": "aten::ne.int(int a, int b) -> bool", - "inputs": [ - { "name": "a", "type": "int64" }, - { "name": "b", "type": "int64" } - ], - "outputs": [ - { "type": "boolean" } - ] + "name": "aten::logaddexp.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::ne.int_float(int a, float b) -> bool", - "inputs": [ - { "name": "a", "type": "int64" }, - { "name": "b", "type": "float32" } - ], - "outputs": [ - { "type": "boolean" } - ] + "name": "aten::logaddexp2(Tensor self, Tensor other) -> Tensor" }, { - "name": "aten::ne.int_list(int[] a, int[] b) -> bool", - "inputs": [ - { "name": "a", "type": "int64[]" }, - { "name": "b", "type": "int64[]" } - ], - "outputs": [ - { "type": "boolean" } - ] + "name": "aten::logaddexp2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::ne.str(str a, str b) -> bool", - "inputs": [ - { "name": "a", "type": "string" }, - { "name": "b", "type": "string" } - ], - "outputs": [ - { "type": "boolean" } - ] + "name": "aten::logcumsumexp(Tensor self, int dim) -> Tensor" }, { - "name": "aten::ne.str_list(str[] a, str[] b) -> bool", - "inputs": [ - { "name": "a", "type": "string[]" }, - { "name": "b", "type": "string[]" } - ], - "outputs": [ - { "type": "boolean" } - ] + "name": "aten::logcumsumexp.dimname(Tensor self, str dim) -> Tensor" }, { - "name": "aten::neg(Tensor self) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::logcumsumexp.dimname_out(Tensor self, str dim, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::neg.Scalar(Scalar a) -> Scalar", - "inputs": [ - { "name": "a", "type": "Scalar" } - ], - "outputs": [ - { "type": "Scalar" } - ] + "name": "aten::logcumsumexp.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::neg.complex(complex a) -> complex", - "inputs": [ - { "name": "a", "type": "complex" } - ], - "outputs": [ - { "type": "complex" } - ] + "name": "aten::logdet(Tensor self) -> Tensor" }, { - "name": "aten::neg.float(float a) -> float", - "inputs": [ - { "name": "a", "type": "float32" } - ], - "outputs": [ - { "type": "float32" } - ] + "name": "aten::logical_and(Tensor self, Tensor other) -> Tensor" }, { - "name": "aten::neg.int(int a) -> int", - "inputs": [ - { "name": "a", "type": "int64" } - ], - "outputs": [ - { "type": "int64" } - ] + "name": "aten::logical_and.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::neg.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::logical_and_(Tensor(a!) self, Tensor other) -> Tensor(a!)" }, { - "name": "aten::nested_to_padded_tensor(Tensor self, float padding, int[]? output_size=None) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "padding", "type": "float32" }, - { "name": "output_size", "type": "int64[]?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::logical_not(Tensor self) -> Tensor" }, { - "name": "aten::new_empty(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "size", "type": "SymInt[]" }, - { "name": "dtype", "type": "ScalarType?", "default": null, "kwarg_only": true }, - { "name": "layout", "type": "Layout?", "default": null, "kwarg_only": true }, - { "name": "device", "type": "Device?", "default": null, "kwarg_only": true }, - { "name": "pin_memory", "type": "boolean?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::logical_not.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::new_empty_strided(Tensor self, SymInt[] size, SymInt[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "size", "type": "SymInt[]" }, - { "name": "stride", "type": "SymInt[]" }, - { "name": "dtype", "type": "ScalarType?", "default": null, "kwarg_only": true }, - { "name": "layout", "type": "Layout?", "default": null, "kwarg_only": true }, - { "name": "device", "type": "Device?", "default": null, "kwarg_only": true }, - { "name": "pin_memory", "type": "boolean?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::logical_not_(Tensor(a!) self) -> Tensor(a!)" }, { - "name": "aten::new_full(Tensor self, SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "size", "type": "SymInt[]" }, - { "name": "fill_value", "type": "Scalar" }, - { "name": "dtype", "type": "ScalarType?", "default": null, "kwarg_only": true }, - { "name": "layout", "type": "Layout?", "default": null, "kwarg_only": true }, - { "name": "device", "type": "Device?", "default": null, "kwarg_only": true }, - { "name": "pin_memory", "type": "boolean?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::logical_or(Tensor self, Tensor other) -> Tensor" }, { - "name": "aten::new_ones(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "size", "type": "SymInt[]" }, - { "name": "dtype", "type": "ScalarType?", "default": null, "kwarg_only": true }, - { "name": "layout", "type": "Layout?", "default": null, "kwarg_only": true }, - { "name": "device", "type": "Device?", "default": null, "kwarg_only": true }, - { "name": "pin_memory", "type": "boolean?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::logical_or.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::new_zeros(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "size", "type": "SymInt[]" }, - { "name": "dtype", "type": "ScalarType?", "default": null, "kwarg_only": true }, - { "name": "layout", "type": "Layout?", "default": null, "kwarg_only": true }, - { "name": "device", "type": "Device?", "default": null, "kwarg_only": true }, - { "name": "pin_memory", "type": "boolean?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::logical_or_(Tensor(a!) self, Tensor other) -> Tensor(a!)" }, { - "name": "aten::nll_loss_nd(Tensor self, Tensor target, Tensor? weight=None, int reduction=Mean, SymInt ignore_index=-100) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "target", "type": "Tensor" }, - { "name": "weight", "type": "Tensor?", "default": null }, - { "name": "reduction", "type": "int64", "default": "Mean" }, - { "name": "ignore_index", "type": "SymInt", "default": -100 } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::logical_xor(Tensor self, Tensor other) -> Tensor" }, { - "name": "aten::nonzero(Tensor self) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::logical_xor.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::nonzero.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::logical_xor_(Tensor(a!) self, Tensor other) -> Tensor(a!)" }, { - "name": "aten::nonzero_numpy(Tensor self) -> Tensor[]", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor[]" } - ] + "name": "aten::logit(Tensor self, float? eps=None) -> Tensor" }, { - "name": "aten::norm.Scalar(Tensor self, Scalar p=2) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "p", "type": "Scalar", "default": 2 } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::logit.out(Tensor self, float? eps=None, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::norm.ScalarOpt_dim(Tensor self, Scalar? p, int[1] dim, bool keepdim=False) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "p", "type": "Scalar?" }, - { "name": "dim", "type": "int64[1]" }, - { "name": "keepdim", "type": "boolean", "default": false } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::logit_(Tensor(a!) self, float? eps=None) -> Tensor(a!)" }, { - "name": "aten::norm.ScalarOpt_dim_dtype(Tensor self, Scalar? p, int[1] dim, bool keepdim, *, ScalarType dtype) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "p", "type": "Scalar?" }, - { "name": "dim", "type": "int64[1]" }, - { "name": "keepdim", "type": "boolean" }, - { "name": "dtype", "type": "ScalarType", "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::logit_backward(Tensor grad_output, Tensor self, float? eps=None) -> Tensor" }, { - "name": "aten::norm.ScalarOpt_dtype(Tensor self, Scalar? p, *, ScalarType dtype) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "p", "type": "Scalar?" }, - { "name": "dtype", "type": "ScalarType", "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::logit_backward.grad_input(Tensor grad_output, Tensor self, float? eps=None, *, Tensor(a!) grad_input) -> Tensor(a!)" }, { - "name": "aten::norm.dtype_out(Tensor self, Scalar? p, int[1] dim, bool keepdim, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "p", "type": "Scalar?" }, - { "name": "dim", "type": "int64[1]" }, - { "name": "keepdim", "type": "boolean", "default": false }, - { "name": "dtype", "type": "ScalarType", "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::logspace(Scalar start, Scalar end, int steps, float base=10., *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor" }, { - "name": "aten::norm.names_ScalarOpt_dim(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim=False) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "p", "type": "Scalar?" }, - { "name": "dim", "type": "Dimname[1]" }, - { "name": "keepdim", "type": "boolean", "default": false } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::logspace.Scalar_Tensor(Scalar start, Tensor end, int steps, float base=10., *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor" }, { - "name": "aten::norm.names_ScalarOpt_dim_dtype(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim, *, ScalarType dtype) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "p", "type": "Scalar?" }, - { "name": "dim", "type": "Dimname[1]" }, - { "name": "keepdim", "type": "boolean" }, - { "name": "dtype", "type": "ScalarType", "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::logspace.Scalar_Tensor_out(Scalar start, Tensor end, int steps, float base=10., *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::norm.names_dtype_out(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "p", "type": "Scalar?" }, - { "name": "dim", "type": "Dimname[1]" }, - { "name": "keepdim", "type": "boolean" }, - { "name": "dtype", "type": "ScalarType", "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::logspace.Tensor_Scalar(Tensor start, Scalar end, int steps, float base=10., *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor" }, { - "name": "aten::norm.names_out(Tensor self, Scalar? p, Dimname[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "p", "type": "Scalar?" }, - { "name": "dim", "type": "Dimname[1]" }, - { "name": "keepdim", "type": "boolean", "default": false } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::logspace.Tensor_Scalar_out(Tensor start, Scalar end, int steps, float base=10., *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::norm.out(Tensor self, Scalar? p, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "p", "type": "Scalar?" }, - { "name": "dim", "type": "int64[1]" }, - { "name": "keepdim", "type": "boolean", "default": false } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::logspace.Tensor_Tensor(Tensor start, Tensor end, int steps, float base=10., *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor" }, { - "name": "aten::normal.Tensor_Tensor(Tensor mean, Tensor std, *, Generator? generator=None) -> Tensor", - "inputs": [ - { "name": "mean", "type": "Tensor" }, - { "name": "std", "type": "Tensor" }, - { "name": "generator", "type": "Generator?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::logspace.Tensor_Tensor_out(Tensor start, Tensor end, int steps, float base=10., *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::normal.Tensor_Tensor_out(Tensor mean, Tensor std, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "mean", "type": "Tensor" }, - { "name": "std", "type": "Tensor" }, - { "name": "generator", "type": "Generator?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::logspace.out(Scalar start, Scalar end, int steps, float base=10., *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::normal.Tensor_float(Tensor mean, float std=1, *, Generator? generator=None) -> Tensor", - "inputs": [ - { "name": "mean", "type": "Tensor" }, - { "name": "std", "type": "float32", "default": 1 }, - { "name": "generator", "type": "Generator?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::logsumexp(Tensor self, int[1] dim, bool keepdim=False) -> Tensor" }, { - "name": "aten::normal.Tensor_float_out(Tensor mean, float std=1, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "mean", "type": "Tensor" }, - { "name": "std", "type": "float32", "default": 1 }, - { "name": "generator", "type": "Generator?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::logsumexp.names(Tensor self, str[1] dim, bool keepdim=False) -> Tensor" }, { - "name": "aten::normal.float_Tensor(float mean, Tensor std, *, Generator? generator=None) -> Tensor", - "inputs": [ - { "name": "mean", "type": "float32" }, - { "name": "std", "type": "Tensor" }, - { "name": "generator", "type": "Generator?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::logsumexp.names_out(Tensor self, str[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::normal.float_Tensor_out(float mean, Tensor std, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "mean", "type": "float32" }, - { "name": "std", "type": "Tensor" }, - { "name": "generator", "type": "Generator?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::logsumexp.out(Tensor self, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::normal.float_float(float mean, float std, SymInt[] size, *, Generator? generator=None, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", - "inputs": [ - { "name": "mean", "type": "float32", "default": 0 }, - { "name": "std", "type": "float32" }, - { "name": "size", "type": "SymInt[]" }, - { "name": "generator", "type": "Generator?", "default": null, "kwarg_only": true }, - { "name": "dtype", "type": "ScalarType?", "default": null, "kwarg_only": true }, - { "name": "layout", "type": "Layout?", "default": null, "kwarg_only": true }, - { "name": "device", "type": "Device?", "default": null, "kwarg_only": true }, - { "name": "pin_memory", "type": "boolean?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::lstm.data(Tensor data, Tensor batch_sizes, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor, Tensor)", + "category": "Layer" }, { - "name": "aten::normal.float_float_out(float mean, float std, SymInt[] size, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "mean", "type": "float32" }, - { "name": "std", "type": "float32" }, - { "name": "size", "type": "SymInt[]" }, - { "name": "generator", "type": "Generator?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::lstm.input(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor, Tensor)", + "category": "Layer" }, { - "name": "aten::normal_(Tensor(a!) self, float mean=0, float std=1, *, Generator? generator=None) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "mean", "type": "float32", "default": 0 }, - { "name": "std", "type": "float32", "default": 1 }, - { "name": "generator", "type": "Generator?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::lstm_cell(Tensor input, Tensor[] hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> (Tensor, Tensor)", + "category": "Layer" }, { - "name": "aten::numel(Tensor self) -> int", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "int64" } - ] + "name": "aten::lt(Scalar a, Scalar b) -> bool" }, { - "name": "aten::numpy_T(Tensor(a) self) -> Tensor(a)", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::lt.Scalar(Tensor self, Scalar other) -> Tensor" }, { - "name": "aten::numpy_T.a(Tensor(a) self) -> Tensor(a)", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::lt.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::one_hot(Tensor self, int num_classes=-1) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "num_classes", "type": "int64", "default": -1 } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::lt.Tensor(Tensor self, Tensor other) -> Tensor" }, { - "name": "aten::ones(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", - "inputs": [ - { "name": "size", "type": "SymInt[]" }, - { "name": "dtype", "type": "ScalarType?", "default": null, "kwarg_only": true }, - { "name": "layout", "type": "Layout?", "default": null, "kwarg_only": true }, - { "name": "device", "type": "Device?", "default": null, "kwarg_only": true }, - { "name": "pin_memory", "type": "boolean?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::lt.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::ones.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", - "inputs": [ - { "name": "size", "type": "int64[]" }, - { "name": "names", "type": "Dimname[]?", "kwarg_only": true }, - { "name": "dtype", "type": "ScalarType?", "default": null, "kwarg_only": true }, - { "name": "layout", "type": "Layout?", "default": null, "kwarg_only": true }, - { "name": "device", "type": "Device?", "default": null, "kwarg_only": true }, - { "name": "pin_memory", "type": "boolean?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::lt.float(float a, float b) -> bool" }, { - "name": "aten::ones.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "size", "type": "SymInt[]" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::lt.float_int(float a, int b) -> bool" }, { - "name": "aten::ones_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dtype", "type": "ScalarType?", "default": null, "kwarg_only": true }, - { "name": "layout", "type": "Layout?", "default": null, "kwarg_only": true }, - { "name": "device", "type": "Device?", "default": null, "kwarg_only": true }, - { "name": "pin_memory", "type": "boolean?", "default": null, "kwarg_only": true }, - { "name": "memory_format", "type": "MemoryFormat?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::lt.int(int a, int b) -> bool" }, { - "name": "aten::outer(Tensor self, Tensor vec2) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "vec2", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::lt.int_float(int a, float b) -> bool" }, { - "name": "aten::outer.out(Tensor self, Tensor vec2, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "vec2", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::lt.str(str a, str b) -> bool" }, { - "name": "aten::pad(Tensor self, SymInt[] pad, str mode=\"constant\", float? value=None) -> Tensor", - "category": "Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "pad", "type": "SymInt[]" }, - { "name": "mode", "type": "string", "default": "constant" }, - { "name": "value", "type": "float32?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::manual_seed(int seed) -> ()" }, { - "name": "aten::pad_sequence(Tensor[] sequences, bool batch_first=False, float padding_value=0.0, str padding_side=\"right\") -> Tensor", - "inputs": [ - { "name": "sequences", "type": "Tensor[]" }, - { "name": "batch_first", "type": "boolean", "default": false }, - { "name": "padding_value", "type": "float32", "default": 0.0 }, - { "name": "padding_side", "type": "string", "default": "right" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::manual_seed.generator(Generator(a!) self, int seed) -> Generator(a!)" }, { - "name": "aten::pairwise_distance(Tensor x1, Tensor x2, float p=2, float eps=1e-06, bool keepdim=False) -> Tensor", - "inputs": [ - { "name": "x1", "type": "Tensor" }, - { "name": "x2", "type": "Tensor" }, - { "name": "p", "type": "float32", "default": 2 }, - { "name": "eps", "type": "float32", "default": 1e-06 }, - { "name": "keepdim", "type": "boolean", "default": false } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::masked_fill.Scalar(Tensor self, Tensor mask, Scalar value) -> Tensor" }, { - "name": "aten::pdist(Tensor self, float p=2) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "p", "type": "float32", "default": 2 } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::masked_fill.Scalar_out(Tensor self, Tensor mask, Scalar value, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::permute(Tensor(a) self, int[] dims) -> Tensor(a)", - "category": "Shape", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dims", "type": "int64[]" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::masked_fill.Tensor(Tensor self, Tensor mask, Tensor value) -> Tensor" + }, + { + "name": "aten::masked_fill.Tensor_out(Tensor self, Tensor mask, Tensor value, *, Tensor(a!) out) -> Tensor(a!)" + }, + { + "name": "aten::masked_fill_.Scalar(Tensor(a!) self, Tensor mask, Scalar value) -> Tensor(a!)" + }, + { + "name": "aten::masked_fill_.Tensor(Tensor(a!) self, Tensor mask, Tensor value) -> Tensor(a!)" + }, + { + "name": "aten::masked_scatter(Tensor self, Tensor mask, Tensor source) -> Tensor" + }, + { + "name": "aten::masked_scatter.out(Tensor self, Tensor mask, Tensor source, *, Tensor(a!) out) -> Tensor(a!)" + }, + { + "name": "aten::masked_scatter_(Tensor(a!) self, Tensor mask, Tensor source) -> Tensor(a!)" + }, + { + "name": "aten::masked_select(Tensor self, Tensor mask) -> Tensor" + }, + { + "name": "aten::masked_select.out(Tensor self, Tensor mask, *, Tensor(a!) out) -> Tensor(a!)" + }, + { + "name": "aten::matmul(Tensor self, Tensor other) -> Tensor" + }, + { + "name": "aten::matmul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)" + }, + { + "name": "aten::max(Tensor self) -> Tensor" + }, + { + "name": "aten::max.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)" + }, + { + "name": "aten::max.dim_max(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices)" + }, + { + "name": "aten::max.names_dim(Tensor self, str dim, bool keepdim=False) -> (Tensor values, Tensor indices)" + }, + { + "name": "aten::max.names_dim_max(Tensor self, str dim, bool keepdim=False, *, Tensor(a!) max, Tensor(b!) max_values) -> (Tensor(a!) values, Tensor(b!) indices)" + }, + { + "name": "aten::max.other(Tensor self, Tensor other) -> Tensor" + }, + { + "name": "aten::max.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)" + }, + { + "name": "aten::max.unary_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)" + }, + { + "name": "aten::max_pool1d(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=[0], int[1] dilation=[1], bool ceil_mode=False) -> Tensor", + "category": "Pool" + }, + { + "name": "aten::max_pool1d_with_indices(Tensor self, int[1] kernel_size, int[1] stride=[], int[1] padding=[0], int[1] dilation=[1], bool ceil_mode=False) -> (Tensor, Tensor)", + "category": "Pool" + }, + { + "name": "aten::max_pool2d(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> Tensor", + "category": "Pool" + }, + { + "name": "aten::max_pool2d_with_indices(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor)", + "category": "Pool" + }, + { + "name": "aten::max_pool2d_with_indices.out(Tensor self, int[2] kernel_size, int[2] stride=[], int[2] padding=0, int[2] dilation=1, bool ceil_mode=False, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))" + }, + { + "name": "aten::max_pool3d(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> Tensor", + "category": "Pool" + }, + { + "name": "aten::max_pool3d_with_indices(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False) -> (Tensor, Tensor)" + }, + { + "name": "aten::max_pool3d_with_indices.out(Tensor self, int[3] kernel_size, int[3] stride=[], int[3] padding=0, int[3] dilation=1, bool ceil_mode=False, *, Tensor(a!) out, Tensor(b!) indices) -> (Tensor(a!), Tensor(b!))" + }, + { + "name": "aten::max_pool3d_with_indices_backward(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, int[3] dilation, bool ceil_mode, Tensor indices) -> Tensor" + }, + { + "name": "aten::max_pool3d_with_indices_backward.grad_input(Tensor grad_output, Tensor self, int[3] kernel_size, int[3] stride, int[3] padding, int[3] dilation, bool ceil_mode, Tensor indices, *, Tensor(a!) grad_input) -> Tensor(a!)" + }, + { + "name": "aten::max_unpool2d(Tensor self, Tensor indices, SymInt[2] output_size) -> Tensor", + "category": "Pool" + }, + { + "name": "aten::max_unpool2d.out(Tensor self, Tensor indices, SymInt[2] output_size, *, Tensor(a!) out) -> Tensor(a!)" + }, + { + "name": "aten::max_unpool3d(Tensor self, Tensor indices, SymInt[3] output_size, int[3] stride, int[3] padding) -> Tensor", + "category": "Pool" + }, + { + "name": "aten::max_unpool3d.out(Tensor self, Tensor indices, SymInt[3] output_size, int[3] stride, int[3] padding, *, Tensor(a!) out) -> Tensor(a!)" + }, + { + "name": "aten::maximum(Tensor self, Tensor other) -> Tensor" + }, + { + "name": "aten::maximum.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)" + }, + { + "name": "aten::mean(Tensor self, *, ScalarType? dtype=None) -> Tensor" + }, + { + "name": "aten::mean.dim(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor" + }, + { + "name": "aten::mean.dtype_out(Tensor self, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)" + }, + { + "name": "aten::mean.names_dim(Tensor self, str[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor" + }, + { + "name": "aten::mean.names_out(Tensor self, str[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)" + }, + { + "name": "aten::mean.out(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)" + }, + { + "name": "aten::median(Tensor self) -> Tensor" + }, + { + "name": "aten::median.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)" + }, + { + "name": "aten::median.dim_values(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)" + }, + { + "name": "aten::median.names_dim(Tensor self, str dim, bool keepdim=False) -> (Tensor values, Tensor indices)" + }, + { + "name": "aten::median.names_dim_values(Tensor self, str dim, bool keepdim=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)" + }, + { + "name": "aten::median.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)" + }, + { + "name": "aten::meshgrid(Tensor[] tensors) -> Tensor[]", + "category": "Tensor" + }, + { + "name": "aten::meshgrid.indexing(Tensor[] tensors, *, str indexing) -> Tensor[]", + "category": "Tensor" + }, + { + "name": "aten::min(Tensor self) -> Tensor" + }, + { + "name": "aten::min.dim(Tensor self, int dim, bool keepdim=False) -> (Tensor values, Tensor indices)" + }, + { + "name": "aten::min.dim_min(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) min, Tensor(b!) min_indices) -> (Tensor(a!) values, Tensor(b!) indices)" + }, + { + "name": "aten::min.names_dim(Tensor self, str dim, bool keepdim=False) -> (Tensor values, Tensor indices)" + }, + { + "name": "aten::min.names_dim_min(Tensor self, str dim, bool keepdim=False, *, Tensor(a!) min, Tensor(b!) min_indices) -> (Tensor(a!) values, Tensor(b!) indices)" + }, + { + "name": "aten::min.other(Tensor self, Tensor other) -> Tensor" + }, + { + "name": "aten::min.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)" + }, + { + "name": "aten::min.unary_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)" + }, + { + "name": "aten::minimum(Tensor self, Tensor other) -> Tensor" + }, + { + "name": "aten::minimum.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)" + }, + { + "name": "aten::mish(Tensor self) -> Tensor", + "category": "Activation" + }, + { + "name": "aten::mish.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)" + }, + { + "name": "aten::mish_(Tensor(a!) self) -> Tensor(a!)", + "category": "Activation" + }, + { + "name": "aten::mkldnn_reorder_conv2d_weight(Tensor self, SymInt[2] padding=[0, 0], SymInt[2] stride=[1, 1], SymInt[2] dilation=[1, 1], SymInt groups=1, SymInt[]? input_size=None) -> Tensor" + }, + { + "name": "aten::mkldnn_reorder_conv2d_weight.out(Tensor self, SymInt[2] padding=[0, 0], SymInt[2] stride=[1, 1], SymInt[2] dilation=[1, 1], SymInt groups=1, SymInt[]? input_size=None, *, Tensor(a!) out) -> Tensor(a!)" + }, + { + "name": "aten::mm(Tensor self, Tensor mat2) -> Tensor" + }, + { + "name": "aten::mm.out(Tensor self, Tensor mat2, *, Tensor(a!) out) -> Tensor(a!)" + }, + { + "name": "aten::moveaxis.int(Tensor(a) self, int source, int destination) -> Tensor(a)" + }, + { + "name": "aten::moveaxis.intlist(Tensor(a) self, int[] source, int[] destination) -> Tensor(a)" + }, + { + "name": "aten::movedim.int(Tensor(a) self, int source, int destination) -> Tensor(a)" + }, + { + "name": "aten::movedim.intlist(Tensor(a) self, int[] source, int[] destination) -> Tensor(a)" + }, + { + "name": "aten::mse_loss(Tensor self, Tensor target, int reduction=1) -> Tensor" + }, + { + "name": "aten::mse_loss.out(Tensor self, Tensor target, int reduction=1, *, Tensor(a!) out) -> Tensor(a!)" + }, + { + "name": "aten::mse_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction) -> Tensor" + }, + { + "name": "aten::mse_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, *, Tensor(a!) grad_input) -> Tensor(a!)" + }, + { + "name": "aten::mul(Scalar a, Scalar b) -> Scalar" + }, + { + "name": "aten::mul.Scalar(Tensor self, Scalar other) -> Tensor" + }, + { + "name": "aten::mul.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)" + }, + { + "name": "aten::mul.Tensor(Tensor self, Tensor other) -> Tensor" + }, + { + "name": "aten::mul.complex(complex a, complex b) -> complex" + }, + { + "name": "aten::mul.complex_float(complex a, float b) -> complex" + }, + { + "name": "aten::mul.complex_int(complex a, int b) -> complex" + }, + { + "name": "aten::mul.float(float a, float b) -> float" + }, + { + "name": "aten::mul.float_complex(float a, complex b) -> complex" + }, + { + "name": "aten::mul.float_int(float a, int b) -> float" + }, + { + "name": "aten::mul.int(int a, int b) -> int" + }, + { + "name": "aten::mul.int_complex(int a, complex b) -> complex" + }, + { + "name": "aten::mul.int_float(int a, float b) -> float" + }, + { + "name": "aten::mul.left_t(t[] l, int n) -> t[]" + }, + { + "name": "aten::mul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)" + }, + { + "name": "aten::mul.right_(int n, t[] l) -> t[]" + }, + { + "name": "aten::mul_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)" + }, + { + "name": "aten::mul_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)" + }, + { + "name": "aten::mul_.t(t[](a!) l, int n) -> t[](a!)" + }, + { + "name": "aten::multinomial(Tensor self, int num_samples, bool replacement=False, *, Generator? generator=None) -> Tensor" + }, + { + "name": "aten::multinomial.out(Tensor self, int num_samples, bool replacement=False, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)" + }, + { + "name": "aten::multiply.Scalar(Tensor self, Scalar other) -> Tensor" + }, + { + "name": "aten::multiply.Tensor(Tensor self, Tensor other) -> Tensor" + }, + { + "name": "aten::multiply.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)" + }, + { + "name": "aten::multiply_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)" + }, + { + "name": "aten::multiply_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)" + }, + { + "name": "aten::mv(Tensor self, Tensor vec) -> Tensor" + }, + { + "name": "aten::mv.out(Tensor self, Tensor vec, *, Tensor(a!) out) -> Tensor(a!)" + }, + { + "name": "aten::mvlgamma(Tensor self, int p) -> Tensor" + }, + { + "name": "aten::mvlgamma.out(Tensor self, int p, *, Tensor(a!) out) -> Tensor(a!)" + }, + { + "name": "aten::mvlgamma_(Tensor(a!) self, int p) -> Tensor(a!)" + }, + { + "name": "aten::nan_to_num(Tensor self, float? nan=None, float? posinf=None, float? neginf=None) -> Tensor" + }, + { + "name": "aten::nan_to_num.out(Tensor self, float? nan=None, float? posinf=None, float? neginf=None, *, Tensor(a!) out) -> Tensor(a!)" + }, + { + "name": "aten::nan_to_num_(Tensor(a!) self, float? nan=None, float? posinf=None, float? neginf=None) -> Tensor(a!)" + }, + { + "name": "aten::narrow(Tensor(a) self, int dim, SymInt start, SymInt length) -> Tensor(a)" + }, + { + "name": "aten::narrow.Tensor(Tensor(a) self, int dim, Tensor start, SymInt length) -> Tensor(a)" + }, + { + "name": "aten::narrow_copy(Tensor self, int dim, SymInt start, SymInt length) -> Tensor" + }, + { + "name": "aten::narrow_copy.out(Tensor self, int dim, SymInt start, SymInt length, *, Tensor(a!) out) -> Tensor(a!)" + }, + { + "name": "aten::native_layer_norm(Tensor input, SymInt[] normalized_shape, Tensor? weight, Tensor? bias, float eps) -> (Tensor, Tensor, Tensor)", + "category": "Normalization" + }, + { + "name": "aten::native_layer_norm.out(Tensor input, SymInt[] normalized_shape, Tensor? weight, Tensor? bias, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))" + }, + { + "name": "aten::ne(Scalar a, Scalar b) -> bool" + }, + { + "name": "aten::ne.Scalar(Tensor self, Scalar other) -> Tensor" + }, + { + "name": "aten::ne.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)" + }, + { + "name": "aten::ne.Tensor(Tensor self, Tensor other) -> Tensor" + }, + { + "name": "aten::ne.Tensor_list(Tensor[] a, Tensor[] b) -> bool" + }, + { + "name": "aten::ne.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)" + }, + { + "name": "aten::ne.bool(bool a, bool b) -> bool" + }, + { + "name": "aten::ne.bool_list(bool[] a, bool[] b) -> bool" + }, + { + "name": "aten::ne.complex(complex a, complex b) -> bool" + }, + { + "name": "aten::ne.complex_float(complex a, float b) -> bool" + }, + { + "name": "aten::ne.device(Device a, Device b) -> bool" + }, + { + "name": "aten::ne.enum(AnyEnumType a, AnyEnumType b) -> bool" + }, + { + "name": "aten::ne.float(float a, float b) -> bool" + }, + { + "name": "aten::ne.float_complex(float a, complex b) -> bool" + }, + { + "name": "aten::ne.float_int(float a, int b) -> bool" + }, + { + "name": "aten::ne.float_list(float[] a, float[] b) -> bool" + }, + { + "name": "aten::ne.int(int a, int b) -> bool" + }, + { + "name": "aten::ne.int_float(int a, float b) -> bool" + }, + { + "name": "aten::ne.int_list(int[] a, int[] b) -> bool" + }, + { + "name": "aten::ne.str(str a, str b) -> bool" + }, + { + "name": "aten::ne.str_list(str[] a, str[] b) -> bool" + }, + { + "name": "aten::neg(Tensor self) -> Tensor" + }, + { + "name": "aten::neg.Scalar(Scalar a) -> Scalar" + }, + { + "name": "aten::neg.complex(complex a) -> complex" + }, + { + "name": "aten::neg.float(float a) -> float" + }, + { + "name": "aten::neg.int(int a) -> int" + }, + { + "name": "aten::neg.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)" + }, + { + "name": "aten::nested_to_padded_tensor(Tensor self, float padding, int[]? output_size=None) -> Tensor" + }, + { + "name": "aten::new_empty(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor" + }, + { + "name": "aten::new_empty.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)" + }, + { + "name": "aten::new_empty_strided(Tensor self, SymInt[] size, SymInt[] stride, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor" + }, + { + "name": "aten::new_empty_strided.out(Tensor self, SymInt[] size, SymInt[] stride, *, Tensor(a!) out) -> Tensor(a!)" + }, + { + "name": "aten::new_full(Tensor self, SymInt[] size, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor" + }, + { + "name": "aten::new_full.out(Tensor self, SymInt[] size, Scalar fill_value, *, Tensor(a!) out) -> Tensor(a!)" + }, + { + "name": "aten::new_ones(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor" + }, + { + "name": "aten::new_ones.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)" + }, + { + "name": "aten::new_zeros(Tensor self, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor" + }, + { + "name": "aten::new_zeros.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)" + }, + { + "name": "aten::nll_loss_nd(Tensor self, Tensor target, Tensor? weight=None, int reduction=1, SymInt ignore_index=-100) -> Tensor" + }, + { + "name": "aten::nonzero(Tensor self) -> Tensor" + }, + { + "name": "aten::nonzero.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)" + }, + { + "name": "aten::nonzero_numpy(Tensor self) -> Tensor[]" + }, + { + "name": "aten::norm.Scalar(Tensor self, Scalar p=2) -> Tensor" + }, + { + "name": "aten::norm.ScalarOpt_dim(Tensor self, Scalar? p, int[1] dim, bool keepdim=False) -> Tensor" + }, + { + "name": "aten::norm.ScalarOpt_dim_dtype(Tensor self, Scalar? p, int[1] dim, bool keepdim, *, ScalarType dtype) -> Tensor" + }, + { + "name": "aten::norm.ScalarOpt_dtype(Tensor self, Scalar? p, *, ScalarType dtype) -> Tensor" + }, + { + "name": "aten::norm.ScalarOpt_dtype_out(Tensor self, Scalar? p, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!)" + }, + { + "name": "aten::norm.Scalar_out(Tensor self, Scalar p=2, *, Tensor(a!) out) -> Tensor(a!)" + }, + { + "name": "aten::norm.dtype_out(Tensor self, Scalar? p, int[1] dim, bool keepdim, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!)" + }, + { + "name": "aten::norm.names_ScalarOpt_dim(Tensor self, Scalar? p, str[1] dim, bool keepdim=False) -> Tensor" + }, + { + "name": "aten::norm.names_ScalarOpt_dim_dtype(Tensor self, Scalar? p, str[1] dim, bool keepdim, *, ScalarType dtype) -> Tensor" + }, + { + "name": "aten::norm.names_dtype_out(Tensor self, Scalar? p, str[1] dim, bool keepdim, *, ScalarType dtype, Tensor(a!) out) -> Tensor(a!)" + }, + { + "name": "aten::norm.names_out(Tensor self, Scalar? p, str[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)" + }, + { + "name": "aten::norm.out(Tensor self, Scalar? p, int[1] dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)" + }, + { + "name": "aten::normal.Tensor_Tensor(Tensor mean, Tensor std, *, Generator? generator=None) -> Tensor" + }, + { + "name": "aten::normal.Tensor_Tensor_out(Tensor mean, Tensor std, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)" + }, + { + "name": "aten::normal.Tensor_float(Tensor mean, float std=1., *, Generator? generator=None) -> Tensor" + }, + { + "name": "aten::normal.Tensor_float_out(Tensor mean, float std=1., *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::pin_memory(Tensor(a) self, Device? device=None) -> Tensor(a)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "device", "type": "Device?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::normal.float_Tensor(float mean, Tensor std, *, Generator? generator=None) -> Tensor" }, { - "name": "aten::pinverse(Tensor self, float rcond=1e-15) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "rcond", "type": "float32", "default": 1e-15 } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::normal.float_Tensor_out(float mean, Tensor std, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::pixel_shuffle(Tensor self, int upscale_factor) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "upscale_factor", "type": "int64" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::normal.float_float(float mean, float std, SymInt[] size, *, Generator? generator=None, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor" }, { - "name": "aten::pixel_unshuffle(Tensor self, int downscale_factor) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "downscale_factor", "type": "int64" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::normal.float_float_out(float mean, float std, SymInt[] size, *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::polar(Tensor abs, Tensor angle) -> Tensor", - "inputs": [ - { "name": "abs", "type": "Tensor" }, - { "name": "angle", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::normal.out(Tensor self, float mean=0., float std=1., *, Generator? generator=None, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::polar.out(Tensor abs, Tensor angle, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "abs", "type": "Tensor" }, - { "name": "angle", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::normal_(Tensor(a!) self, float mean=0., float std=1., *, Generator? generator=None) -> Tensor(a!)" }, { - "name": "aten::pop.t(t[](a!) self, int idx=-1) -> t(*)", - "inputs": [ - { "name": "self", "type": "t[]" }, - { "name": "idx", "type": "int64", "default": -1 } - ], - "outputs": [ - { "type": "t" } - ] + "name": "aten::numel(Tensor self) -> int" }, { - "name": "aten::pow.Scalar(Scalar self, Tensor exponent) -> Tensor", - "inputs": [ - { "name": "self", "type": "Scalar" }, - { "name": "exponent", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::numpy_T(Tensor(a) self) -> Tensor(a)" }, { - "name": "aten::pow.Scalar_Scalar(Scalar a, Scalar b) -> float", - "inputs": [ - { "name": "a", "type": "Scalar" }, - { "name": "b", "type": "Scalar" } - ], - "outputs": [ - { "type": "float32" } - ] + "name": "aten::numpy_T.a(Tensor(a) self) -> Tensor(a)" }, { - "name": "aten::pow.Scalar_out(Scalar self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Scalar" }, - { "name": "exponent", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::one_hot(Tensor self, int num_classes=-1) -> Tensor" }, { - "name": "aten::pow.Tensor_Scalar(Tensor self, Scalar exponent) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "exponent", "type": "Scalar" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::ones(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor" }, { - "name": "aten::pow.Tensor_Scalar_out(Tensor self, Scalar exponent, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "exponent", "type": "Scalar" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::ones.names(int[] size, *, str[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor" }, { - "name": "aten::pow.Tensor_Tensor(Tensor self, Tensor exponent) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "exponent", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::ones.names_out(int[] size, *, str[]? names, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::pow.Tensor_Tensor_out(Tensor self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "exponent", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::ones.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::pow.complex(complex a, complex b) -> complex", - "inputs": [ - { "name": "a", "type": "complex" }, - { "name": "b", "type": "complex" } - ], - "outputs": [ - { "type": "complex" } - ] + "name": "aten::ones_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor" }, { - "name": "aten::pow.complex_float(complex a, float b) -> complex", - "inputs": [ - { "name": "a", "type": "complex" }, - { "name": "b", "type": "float32" } - ], - "outputs": [ - { "type": "complex" } - ] + "name": "aten::ones_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::pow.float(float a, float b) -> float", - "inputs": [ - { "name": "a", "type": "float32" }, - { "name": "b", "type": "float32" } - ], - "outputs": [ - { "type": "float32" } - ] + "name": "aten::outer(Tensor self, Tensor vec2) -> Tensor" }, { - "name": "aten::pow.float_complex(float a, complex b) -> complex", - "inputs": [ - { "name": "a", "type": "float32" }, - { "name": "b", "type": "complex" } - ], - "outputs": [ - { "type": "complex" } - ] + "name": "aten::outer.out(Tensor self, Tensor vec2, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::pow.float_int(float a, int b) -> float", - "inputs": [ - { "name": "a", "type": "float32" }, - { "name": "b", "type": "int64" } - ], - "outputs": [ - { "type": "float32" } - ] + "name": "aten::pad(Tensor self, SymInt[] pad, str mode=\"constant\", float? value=None) -> Tensor", + "category": "Tensor" }, { - "name": "aten::pow.int(int a, int b) -> float", - "inputs": [ - { "name": "a", "type": "int64" }, - { "name": "b", "type": "int64" } - ], - "outputs": [ - { "type": "float32" } - ] + "name": "aten::pad_sequence(Tensor[] sequences, bool batch_first=False, float padding_value=0., str padding_side=\"right\") -> Tensor" }, { - "name": "aten::pow.int_float(int a, float b) -> float", - "inputs": [ - { "name": "a", "type": "int64" }, - { "name": "b", "type": "float32" } - ], - "outputs": [ - { "type": "float32" } - ] + "name": "aten::pairwise_distance(Tensor x1, Tensor x2, float p=2., float eps=9.9999999999999995e-07, bool keepdim=False) -> Tensor" }, { - "name": "aten::pow.int_to_int(int a, int b) -> int", - "inputs": [ - { "name": "a", "type": "int64" }, - { "name": "b", "type": "int64" } - ], - "outputs": [ - { "type": "int64" } - ] + "name": "aten::pdist(Tensor self, float p=2.) -> Tensor" }, { - "name": "aten::pow_.Scalar(Tensor(a!) self, Scalar exponent) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "exponent", "type": "Scalar" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::permute(Tensor(a) self, int[] dims) -> Tensor(a)", + "category": "Shape" }, { - "name": "aten::pow_.Tensor(Tensor(a!) self, Tensor exponent) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "exponent", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::pin_memory(Tensor(a) self, Device? device=None) -> Tensor(a)" + }, + { + "name": "aten::pinverse(Tensor self, float rcond=1.0000000000000001e-15) -> Tensor" + }, + { + "name": "aten::pixel_shuffle(Tensor self, int upscale_factor) -> Tensor" + }, + { + "name": "aten::pixel_shuffle.out(Tensor self, int upscale_factor, *, Tensor(a!) out) -> Tensor(a!)" + }, + { + "name": "aten::pixel_unshuffle(Tensor self, int downscale_factor) -> Tensor" + }, + { + "name": "aten::pixel_unshuffle.out(Tensor self, int downscale_factor, *, Tensor(a!) out) -> Tensor(a!)" + }, + { + "name": "aten::polar(Tensor abs, Tensor angle) -> Tensor" + }, + { + "name": "aten::polar.Scalar_Scalar(Scalar a, Scalar b) -> Scalar" + }, + { + "name": "aten::polar.float(float a, float b) -> complex" + }, + { + "name": "aten::polar.float_int(float a, int b) -> complex" + }, + { + "name": "aten::polar.int(int a, int b) -> complex" + }, + { + "name": "aten::polar.int_float(int a, float b) -> complex" + }, + { + "name": "aten::polar.out(Tensor abs, Tensor angle, *, Tensor(a!) out) -> Tensor(a!)" + }, + { + "name": "aten::pop.Dict_Tensor(Dict(Tensor, t)(a!) self, Tensor key) -> t(*)" + }, + { + "name": "aten::pop.Dict_bool(Dict(bool, t)(a!) self, bool key) -> t(*)" + }, + { + "name": "aten::pop.Dict_complex(Dict(complex, t)(a!) self, complex key) -> t(*)" + }, + { + "name": "aten::pop.Dict_default_Tensor(Dict(Tensor, t)(a!) self, Tensor key, t default_value) -> t(*)" + }, + { + "name": "aten::pop.Dict_default_bool(Dict(bool, t)(a!) self, bool key, t default_value) -> t(*)" + }, + { + "name": "aten::pop.Dict_default_complex(Dict(complex, t)(a!) self, complex key, t default_value) -> t(*)" + }, + { + "name": "aten::pop.Dict_default_float(Dict(float, t)(a!) self, float key, t default_value) -> t(*)" + }, + { + "name": "aten::pop.Dict_default_int(Dict(int, t)(a!) self, int key, t default_value) -> t(*)" + }, + { + "name": "aten::pop.Dict_default_str(Dict(str, t)(a!) self, str key, t default_value) -> t(*)" + }, + { + "name": "aten::pop.Dict_float(Dict(float, t)(a!) self, float key) -> t(*)" + }, + { + "name": "aten::pop.Dict_int(Dict(int, t)(a!) self, int key) -> t(*)" + }, + { + "name": "aten::pop.Dict_str(Dict(str, t)(a!) self, str key) -> t(*)" + }, + { + "name": "aten::pop.t(t[](a!) self, int idx=-1) -> t(*)" + }, + { + "name": "aten::pow.Scalar(Scalar self, Tensor exponent) -> Tensor" + }, + { + "name": "aten::pow.Scalar_Scalar(Scalar a, Scalar b) -> float" + }, + { + "name": "aten::pow.Scalar_out(Scalar self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!)" + }, + { + "name": "aten::pow.Tensor_Scalar(Tensor self, Scalar exponent) -> Tensor" + }, + { + "name": "aten::pow.Tensor_Scalar_out(Tensor self, Scalar exponent, *, Tensor(a!) out) -> Tensor(a!)" + }, + { + "name": "aten::pow.Tensor_Tensor(Tensor self, Tensor exponent) -> Tensor" + }, + { + "name": "aten::pow.Tensor_Tensor_out(Tensor self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!)" + }, + { + "name": "aten::pow.complex(complex a, complex b) -> complex" + }, + { + "name": "aten::pow.complex_float(complex a, float b) -> complex" + }, + { + "name": "aten::pow.float(float a, float b) -> float" + }, + { + "name": "aten::pow.float_complex(float a, complex b) -> complex" + }, + { + "name": "aten::pow.float_int(float a, int b) -> float" + }, + { + "name": "aten::pow.int(int a, int b) -> float" + }, + { + "name": "aten::pow.int_float(int a, float b) -> float" + }, + { + "name": "aten::pow.int_to_int(int a, int b) -> int" + }, + { + "name": "aten::pow_.Scalar(Tensor(a!) self, Scalar exponent) -> Tensor(a!)" + }, + { + "name": "aten::pow_.Tensor(Tensor(a!) self, Tensor exponent) -> Tensor(a!)" }, { "name": "aten::prelu(Tensor self, Tensor weight) -> Tensor", - "category": "Activation", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "weight", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "category": "Activation" }, { - "name": "aten::prod(Tensor self, *, ScalarType? dtype=None) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dtype", "type": "ScalarType?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::prod(Tensor self, *, ScalarType? dtype=None) -> Tensor" }, { - "name": "aten::prod.Dimname_out(Tensor self, Dimname dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "Dimname" }, - { "name": "keepdim", "type": "boolean", "default": false }, - { "name": "dtype", "type": "ScalarType?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::prod.Dimname_out(Tensor self, str dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::prod.dim_Dimname(Tensor self, Dimname dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "Dimname" }, - { "name": "keepdim", "type": "boolean", "default": false }, - { "name": "dtype", "type": "ScalarType?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::prod.dim_Dimname(Tensor self, str dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor" }, { - "name": "aten::prod.dim_int(Tensor self, int dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64" }, - { "name": "keepdim", "type": "boolean", "default": false }, - { "name": "dtype", "type": "ScalarType?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::prod.dim_int(Tensor self, int dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor" }, { - "name": "aten::prod.int_out(Tensor self, int dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64" }, - { "name": "keepdim", "type": "boolean", "default": false }, - { "name": "dtype", "type": "ScalarType?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::prod.int_out(Tensor self, int dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::put_(Tensor(a!) self, Tensor index, Tensor source, bool accumulate=False) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "index", "type": "Tensor" }, - { "name": "source", "type": "Tensor" }, - { "name": "accumulate", "type": "boolean", "default": false } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::prod.out(Tensor self, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::quantile(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "q", "type": "Tensor" }, - { "name": "dim", "type": "int64?", "default": null }, - { "name": "keepdim", "type": "boolean", "default": false }, - { "name": "interpolation", "type": "string", "default": "linear", "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::put_(Tensor(a!) self, Tensor index, Tensor source, bool accumulate=False) -> Tensor(a!)" }, { - "name": "aten::quantile.out(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "q", "type": "Tensor" }, - { "name": "dim", "type": "int64?", "default": null }, - { "name": "keepdim", "type": "boolean", "default": false }, - { "name": "interpolation", "type": "string", "default": "linear", "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::quantile(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation=\"linear\") -> Tensor" }, { - "name": "aten::quantile.scalar(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear') -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "q", "type": "float32" }, - { "name": "dim", "type": "int64?", "default": null }, - { "name": "keepdim", "type": "boolean", "default": false }, - { "name": "interpolation", "type": "string", "default": "linear", "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::quantile.out(Tensor self, Tensor q, int? dim=None, bool keepdim=False, *, str interpolation=\"linear\", Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::quantile.scalar_out(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation='linear', Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "q", "type": "float32" }, - { "name": "dim", "type": "int64?", "default": null }, - { "name": "keepdim", "type": "boolean", "default": false }, - { "name": "interpolation", "type": "string", "default": "linear", "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::quantile.scalar(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation=\"linear\") -> Tensor" + }, + { + "name": "aten::quantile.scalar_out(Tensor self, float q, int? dim=None, bool keepdim=False, *, str interpolation=\"linear\", Tensor(a!) out) -> Tensor(a!)" }, { "name": "aten::quantize_per_channel(Tensor self, Tensor scales, Tensor zero_points, int axis, ScalarType dtype) -> Tensor", - "category": "Quantization", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "scales", "type": "Tensor" }, - { "name": "zero_points", "type": "Tensor" }, - { "name": "axis", "type": "int64" }, - { "name": "dtype", "type": "ScalarType" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "category": "Quantization" + }, + { + "name": "aten::quantize_per_channel.out(Tensor self, Tensor scales, Tensor zero_points, int axis, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!)" }, { "name": "aten::quantize_per_tensor(Tensor self, float scale, int zero_point, ScalarType dtype) -> Tensor", - "category": "Quantization", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "scale", "type": "float32" }, - { "name": "zero_point", "type": "int64" }, - { "name": "dtype", "type": "ScalarType" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "category": "Quantization" + }, + { + "name": "aten::quantize_per_tensor.out(Tensor self, float scale, int zero_point, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!)" }, { "name": "aten::quantize_per_tensor.tensor_qparams(Tensor self, Tensor scale, Tensor zero_point, ScalarType dtype) -> Tensor", - "category": "Quantization", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "scale", "type": "Tensor" }, - { "name": "zero_point", "type": "Tensor" }, - { "name": "dtype", "type": "ScalarType" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "category": "Quantization" + }, + { + "name": "aten::quantize_per_tensor.tensor_qparams_out(Tensor self, Tensor scale, Tensor zero_point, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!)" }, { "name": "aten::quantize_per_tensor.tensors(Tensor[] tensors, Tensor scales, Tensor zero_points, ScalarType dtype) -> Tensor[]", - "category": "Quantization", - "inputs": [ - { "name": "tensors", "type": "Tensor[]" }, - { "name": "scales", "type": "Tensor" }, - { "name": "zero_points", "type": "Tensor" }, - { "name": "dtype", "type": "ScalarType" } - ], - "outputs": [ - { "name": "outputs", "type": "Tensor[]" } - ] + "category": "Quantization" + }, + { + "name": "aten::quantize_per_tensor.tensors_out(Tensor[] tensors, Tensor scales, Tensor zero_points, ScalarType dtype, *, Tensor(a!)[] out) -> ()" }, { "name": "aten::quantize_per_tensor_dynamic(Tensor self, ScalarType dtype, bool reduce_range) -> Tensor", - "category": "Quantization", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dtype", "type": "ScalarType" }, - { "name": "reduce_range", "type": "boolean" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "category": "Quantization" + }, + { + "name": "aten::quantize_per_tensor_dynamic.out(Tensor self, ScalarType dtype, bool reduce_range, *, Tensor(a!) out) -> Tensor(a!)" }, { "name": "aten::quantized_gru.data(Tensor data, Tensor batch_sizes, Tensor hx, __torch__.torch.classes.rnn.CellParamsBase[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor)", - "category": "Layer", - "inputs": [ - { "name": "data", "type": "Tensor" }, - { "name": "batch_sizes", "type": "Tensor" }, - { "name": "hx", "type": "Tensor" }, - { "name": "params", "type": "__torch__.torch.classes.rnn.CellParamsBase[]" }, - { "name": "has_biases", "type": "boolean" }, - { "name": "num_layers", "type": "int64" }, - { "name": "dropout", "type": "float32" }, - { "name": "train", "type": "boolean" }, - { "name": "bidirectional", "type": "boolean" } - ], - "outputs": [ - { "type": "Tensor" }, - { "type": "Tensor" } - ] + "category": "Layer" }, { "name": "aten::quantized_gru.data_legacy(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor)", - "category": "Layer", - "inputs": [ - { "name": "data", "type": "Tensor" }, - { "name": "batch_sizes", "type": "Tensor" }, - { "name": "hx", "type": "Tensor" }, - { "name": "params", "type": "Tensor[]" }, - { "name": "has_biases", "type": "boolean" }, - { "name": "num_layers", "type": "int64" }, - { "name": "dropout", "type": "float32" }, - { "name": "train", "type": "boolean" }, - { "name": "bidirectional", "type": "boolean" } - ], - "outputs": [ - { "type": "Tensor" }, - { "type": "Tensor" } - ] + "category": "Layer" }, { "name": "aten::quantized_gru.input(Tensor input, Tensor hx, __torch__.torch.classes.rnn.CellParamsBase[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor)", - "category": "Layer", - "inputs": [ - { "name": "input", "type": "Tensor" }, - { "name": "hx", "type": "Tensor" }, - { "name": "params", "type": "__torch__.torch.classes.rnn.CellParamsBase[]" }, - { "name": "has_biases", "type": "boolean" }, - { "name": "num_layers", "type": "int64" }, - { "name": "dropout", "type": "float32" }, - { "name": "train", "type": "boolean" }, - { "name": "bidirectional", "type": "boolean" }, - { "name": "batch_first", "type": "boolean" } - ], - "outputs": [ - { "type": "Tensor" }, - { "type": "Tensor" } - ] + "category": "Layer" }, { "name": "aten::quantized_gru.input_legacy(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor)", - "category": "Layer", - "inputs": [ - { "name": "input", "type": "Tensor" }, - { "name": "hx", "type": "Tensor" }, - { "name": "params", "type": "Tensor[]" }, - { "name": "has_biases", "type": "boolean" }, - { "name": "num_layers", "type": "int64" }, - { "name": "dropout", "type": "float32" }, - { "name": "train", "type": "boolean" }, - { "name": "bidirectional", "type": "boolean" }, - { "name": "batch_first", "type": "boolean" } - ], - "outputs": [ - { "type": "Tensor" }, - { "type": "Tensor" } - ] + "category": "Layer" }, { "name": "aten::quantized_lstm.data(Tensor data, Tensor batch_sizes, Tensor[] hx, __torch__.torch.classes.rnn.CellParamsBase[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, *, ScalarType? dtype=None, bool use_dynamic=False) -> (Tensor, Tensor, Tensor)", - "category": "Layer", - "inputs": [ - { "name": "data", "type": "Tensor" }, - { "name": "batch_sizes", "type": "Tensor" }, - { "name": "hx", "type": "Tensor[]" }, - { "name": "params", "type": "__torch__.torch.classes.rnn.CellParamsBase[]" }, - { "name": "has_biases", "type": "boolean" }, - { "name": "num_layers", "type": "int64" }, - { "name": "dropout", "type": "float32" }, - { "name": "train", "type": "boolean" }, - { "name": "bidirectional", "type": "boolean" }, - { "name": "dtype", "type": "ScalarType?", "default": null, "kwarg_only": true }, - { "name": "use_dynamic", "type": "boolean", "default": false, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" }, - { "type": "Tensor" }, - { "type": "Tensor" } - ] + "category": "Layer" }, { "name": "aten::quantized_lstm.data_legacy(Tensor data, Tensor batch_sizes, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, *, ScalarType? dtype=None, bool use_dynamic=False) -> (Tensor, Tensor, Tensor)", - "category": "Layer", - "inputs": [ - { "name": "data", "type": "Tensor" }, - { "name": "batch_sizes", "type": "Tensor" }, - { "name": "hx", "type": "Tensor[]" }, - { "name": "params", "type": "Tensor[]" }, - { "name": "has_biases", "type": "boolean" }, - { "name": "num_layers", "type": "int64" }, - { "name": "dropout", "type": "float32" }, - { "name": "train", "type": "boolean" }, - { "name": "bidirectional", "type": "boolean" }, - { "name": "dtype", "type": "ScalarType?", "default": null, "kwarg_only": true }, - { "name": "use_dynamic", "type": "boolean", "default": false, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" }, - { "type": "Tensor" }, - { "type": "Tensor" } - ] + "category": "Layer" }, { "name": "aten::quantized_lstm.input(Tensor input, Tensor[] hx, __torch__.torch.classes.rnn.CellParamsBase[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first, *, ScalarType? dtype=None, bool use_dynamic=False) -> (Tensor, Tensor, Tensor)", - "category": "Layer", - "inputs": [ - { "name": "input", "type": "Tensor" }, - { "name": "hx", "type": "Tensor[]" }, - { "name": "params", "type": "__torch__.torch.classes.rnn.CellParamsBase[]" }, - { "name": "has_biases", "type": "boolean" }, - { "name": "num_layers", "type": "int64" }, - { "name": "dropout", "type": "float32" }, - { "name": "train", "type": "boolean" }, - { "name": "bidirectional", "type": "boolean" }, - { "name": "batch_first", "type": "boolean" }, - { "name": "dtype", "type": "ScalarType?", "default": null, "kwarg_only": true }, - { "name": "use_dynamic", "type": "boolean", "default": false, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" }, - { "type": "Tensor" }, - { "type": "Tensor" } - ] + "category": "Layer" }, { "name": "aten::quantized_lstm.input_legacy(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first, *, ScalarType? dtype=None, bool use_dynamic=False) -> (Tensor, Tensor, Tensor)", - "category": "Layer", - "inputs": [ - { "name": "input", "type": "Tensor" }, - { "name": "hx", "type": "Tensor[]" }, - { "name": "params", "type": "Tensor[]" }, - { "name": "has_biases", "type": "boolean" }, - { "name": "num_layers", "type": "int64" }, - { "name": "dropout", "type": "float32" }, - { "name": "train", "type": "boolean" }, - { "name": "bidirectional", "type": "boolean" }, - { "name": "batch_first", "type": "boolean" }, - { "name": "dtype", "type": "ScalarType?", "default": null, "kwarg_only": true }, - { "name": "use_dynamic", "type": "boolean", "default": false, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" }, - { "type": "Tensor" }, - { "type": "Tensor" } - ] + "category": "Layer" }, { - "name": "aten::quantized_lstm_cell(Tensor input, Tensor[] hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> (Tensor, Tensor)", - "inputs": [ - { "name": "input", "type": "Tensor" }, - { "name": "hx", "type": "Tensor[]" }, - { "name": "w_ih", "type": "Tensor" }, - { "name": "w_hh", "type": "Tensor" }, - { "name": "b_ih", "type": "Tensor" }, - { "name": "b_hh", "type": "Tensor" }, - { "name": "packed_ih", "type": "Tensor" }, - { "name": "packed_hh", "type": "Tensor" }, - { "name": "col_offsets_ih", "type": "Tensor" }, - { "name": "col_offsets_hh", "type": "Tensor" }, - { "name": "scale_ih", "type": "Scalar" }, - { "name": "scale_hh", "type": "Scalar" }, - { "name": "zero_point_ih", "type": "Scalar" }, - { "name": "zero_point_hh", "type": "Scalar" } - ], - "outputs": [ - { "type": "Tensor" }, - { "type": "Tensor" } - ] + "name": "aten::quantized_lstm_cell(Tensor input, Tensor[] hx, Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh, Tensor packed_ih, Tensor packed_hh, Tensor col_offsets_ih, Tensor col_offsets_hh, Scalar scale_ih, Scalar scale_hh, Scalar zero_point_ih, Scalar zero_point_hh) -> (Tensor, Tensor)" }, { - "name": "aten::rand(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", - "inputs": [ - { "name": "size", "type": "SymInt[]" }, - { "name": "dtype", "type": "ScalarType?", "default": null, "kwarg_only": true }, - { "name": "layout", "type": "Layout?", "default": null, "kwarg_only": true }, - { "name": "device", "type": "Device?", "default": null, "kwarg_only": true }, - { "name": "pin_memory", "type": "boolean?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::rand(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor" }, { - "name": "aten::rand.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", - "inputs": [ - { "name": "size", "type": "SymInt[]", "default": null }, - { "name": "generator", "type": "Generator?", "kwarg_only": true }, - { "name": "dtype", "type": "ScalarType?", "default": null, "kwarg_only": true }, - { "name": "layout", "type": "Layout?", "default": null, "kwarg_only": true }, - { "name": "device", "type": "Device?", "default": null, "kwarg_only": true }, - { "name": "pin_memory", "type": "boolean?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::rand.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor" }, { - "name": "aten::rand.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "size", "type": "SymInt[]" }, - { "name": "generator", "type": "Generator?", "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::rand.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::rand.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", - "inputs": [ - { "name": "size", "type": "SymInt[]" }, - { "name": "generator", "type": "Generator?", "kwarg_only": true }, - { "name": "names", "type": "Dimname[]?", "kwarg_only": true }, - { "name": "dtype", "type": "ScalarType?", "default": null, "kwarg_only": true }, - { "name": "layout", "type": "Layout?", "default": null, "kwarg_only": true }, - { "name": "device", "type": "Device?", "default": null, "kwarg_only": true }, - { "name": "pin_memory", "type": "boolean?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::rand.generator_with_names(SymInt[] size, *, Generator? generator, str[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor" }, { - "name": "aten::rand.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", - "inputs": [ - { "name": "size", "type": "SymInt[]" }, - { "name": "names", "type": "Dimname[]?", "kwarg_only": true }, - { "name": "dtype", "type": "ScalarType?", "default": null, "kwarg_only": true }, - { "name": "layout", "type": "Layout?", "default": null, "kwarg_only": true }, - { "name": "device", "type": "Device?", "default": null, "kwarg_only": true }, - { "name": "pin_memory", "type": "boolean?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::rand.generator_with_names_out(SymInt[] size, *, Generator? generator, str[]? names, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::rand.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "size", "type": "SymInt[]" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::rand.names(SymInt[] size, *, str[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor" }, { - "name": "aten::rand_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dtype", "type": "ScalarType?", "default": null, "kwarg_only": true }, - { "name": "layout", "type": "Layout?", "default": null, "kwarg_only": true }, - { "name": "device", "type": "Device?", "default": null, "kwarg_only": true }, - { "name": "pin_memory", "type": "boolean?", "default": null, "kwarg_only": true }, - { "name": "memory_format", "type": "MemoryFormat?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::rand.names_out(SymInt[] size, *, str[]? names, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::randint(SymInt high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", - "inputs": [ - { "name": "high", "type": "SymInt" }, - { "name": "size", "type": "SymInt[]" }, - { "name": "dtype", "type": "ScalarType?", "default": "long", "kwarg_only": true }, - { "name": "layout", "type": "Layout?", "default": null, "kwarg_only": true }, - { "name": "device", "type": "Device?", "default": null, "kwarg_only": true }, - { "name": "pin_memory", "type": "boolean?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::rand.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::randint.generator(SymInt high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", - "inputs": [ - { "name": "high", "type": "SymInt" }, - { "name": "size", "type": "SymInt[]" }, - { "name": "generator", "type": "Generator?", "kwarg_only": true }, - { "name": "dtype", "type": "ScalarType?", "default": "long", "kwarg_only": true }, - { "name": "layout", "type": "Layout?", "default": null, "kwarg_only": true }, - { "name": "device", "type": "Device?", "default": null, "kwarg_only": true }, - { "name": "pin_memory", "type": "boolean?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::rand_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor" }, { - "name": "aten::randint.generator_out(SymInt high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "high", "type": "SymInt" }, - { "name": "size", "type": "SymInt[]" }, - { "name": "generator", "type": "Generator?", "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::rand_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::randint.low(SymInt low, SymInt high, SymInt[] size, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", - "inputs": [ - { "name": "low", "type": "SymInt", "default": null }, - { "name": "high", "type": "SymInt", "default": null }, - { "name": "size", "type": "SymInt[]" }, - { "name": "dtype", "type": "ScalarType?", "default": "long", "kwarg_only": true }, - { "name": "layout", "type": "Layout?", "default": null, "kwarg_only": true }, - { "name": "device", "type": "Device?", "default": null, "kwarg_only": true }, - { "name": "pin_memory", "type": "boolean?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::randint(SymInt high, SymInt[] size, *, ScalarType? dtype=4, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor" }, { - "name": "aten::randint.low_generator(SymInt low, SymInt high, SymInt[] size, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", - "inputs": [ - { "name": "low", "type": "SymInt", "default": null }, - { "name": "high", "type": "SymInt", "default": null }, - { "name": "size", "type": "SymInt[]" }, - { "name": "generator", "type": "Generator?", "kwarg_only": true }, - { "name": "dtype", "type": "ScalarType?", "default": "long", "kwarg_only": true }, - { "name": "layout", "type": "Layout?", "default": null, "kwarg_only": true }, - { "name": "device", "type": "Device?", "default": null, "kwarg_only": true }, - { "name": "pin_memory", "type": "boolean?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::randint.generator(SymInt high, SymInt[] size, *, Generator? generator, ScalarType? dtype=4, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor" }, { - "name": "aten::randint.low_generator_out(SymInt low, SymInt high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "low", "type": "SymInt" }, - { "name": "high", "type": "SymInt" }, - { "name": "size", "type": "SymInt[]" }, - { "name": "generator", "type": "Generator?", "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::randint.generator_out(SymInt high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::randint.low_out(SymInt low, SymInt high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "low", "type": "SymInt" }, - { "name": "high", "type": "SymInt" }, - { "name": "size", "type": "SymInt[]" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::randint.low(SymInt low, SymInt high, SymInt[] size, *, ScalarType? dtype=4, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor" }, { - "name": "aten::randint.out(SymInt high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "high", "type": "SymInt" }, - { "name": "size", "type": "SymInt[]" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::randint.low_generator(SymInt low, SymInt high, SymInt[] size, *, Generator? generator, ScalarType? dtype=4, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor" }, { - "name": "aten::randint_like(Tensor self, SymInt high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "high", "type": "SymInt" }, - { "name": "dtype", "type": "ScalarType?", "default": null, "kwarg_only": true }, - { "name": "layout", "type": "Layout?", "default": null, "kwarg_only": true }, - { "name": "device", "type": "Device?", "default": null, "kwarg_only": true }, - { "name": "pin_memory", "type": "boolean?", "default": null, "kwarg_only": true }, - { "name": "memory_format", "type": "MemoryFormat?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::randint.low_generator_out(SymInt low, SymInt high, SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::randint_like.low_dtype(Tensor self, SymInt low, SymInt high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "low", "type": "SymInt" }, - { "name": "high", "type": "SymInt" }, - { "name": "dtype", "type": "ScalarType?", "default": null, "kwarg_only": true }, - { "name": "layout", "type": "Layout?", "default": null, "kwarg_only": true }, - { "name": "device", "type": "Device?", "default": null, "kwarg_only": true }, - { "name": "pin_memory", "type": "boolean?", "default": null, "kwarg_only": true }, - { "name": "memory_format", "type": "MemoryFormat?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::randint.low_out(SymInt low, SymInt high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::randn(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", - "inputs": [ - { "name": "size", "type": "SymInt[]" }, - { "name": "dtype", "type": "ScalarType?", "default": null, "kwarg_only": true }, - { "name": "layout", "type": "Layout?", "default": null, "kwarg_only": true }, - { "name": "device", "type": "Device?", "default": null, "kwarg_only": true }, - { "name": "pin_memory", "type": "boolean?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::randint.out(SymInt high, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::randn.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", - "inputs": [ - { "name": "size", "type": "SymInt[]" }, - { "name": "generator", "type": "Generator?", "kwarg_only": true }, - { "name": "dtype", "type": "ScalarType?", "default": null, "kwarg_only": true }, - { "name": "layout", "type": "Layout?", "default": null, "kwarg_only": true }, - { "name": "device", "type": "Device?", "default": null, "kwarg_only": true }, - { "name": "pin_memory", "type": "boolean?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::randint_like(Tensor self, SymInt high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor" }, { - "name": "aten::randn.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "size", "type": "SymInt[]" }, - { "name": "generator", "type": "Generator?", "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::randint_like.low_dtype(Tensor self, SymInt low, SymInt high, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor" }, { - "name": "aten::randn.generator_with_names(SymInt[] size, *, Generator? generator, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", - "inputs": [ - { "name": "size", "type": "SymInt[]" }, - { "name": "generator", "type": "Generator?", "kwarg_only": true }, - { "name": "names", "type": "Dimname[]?", "kwarg_only": true }, - { "name": "dtype", "type": "ScalarType?", "default": null, "kwarg_only": true }, - { "name": "layout", "type": "Layout?", "default": null, "kwarg_only": true }, - { "name": "device", "type": "Device?", "default": null, "kwarg_only": true }, - { "name": "pin_memory", "type": "boolean?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::randint_like.low_dtype_out(Tensor self, SymInt low, SymInt high, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::randn.names(SymInt[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", - "inputs": [ - { "name": "size", "type": "SymInt[]" }, - { "name": "names", "type": "Dimname[]?", "kwarg_only": true }, - { "name": "dtype", "type": "ScalarType?", "default": null, "kwarg_only": true }, - { "name": "layout", "type": "Layout?", "default": null, "kwarg_only": true }, - { "name": "device", "type": "Device?", "default": null, "kwarg_only": true }, - { "name": "pin_memory", "type": "boolean?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::randint_like.out(Tensor self, SymInt high, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::randn.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "size", "type": "SymInt[]" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::randn(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor" }, { - "name": "aten::randn_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dtype", "type": "ScalarType?", "default": null, "kwarg_only": true }, - { "name": "layout", "type": "Layout?", "default": null, "kwarg_only": true }, - { "name": "device", "type": "Device?", "default": null, "kwarg_only": true }, - { "name": "pin_memory", "type": "boolean?", "default": null, "kwarg_only": true }, - { "name": "memory_format", "type": "MemoryFormat?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::randn.generator(SymInt[] size, *, Generator? generator, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor" }, { - "name": "aten::random_(Tensor(a!) self, *, Generator? generator=None) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "generator", "type": "Generator?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::randn.generator_out(SymInt[] size, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::random_.from(Tensor(a!) self, int from, int? to, *, Generator? generator=None) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "from", "type": "int64" }, - { "name": "to", "type": "int64?" }, - { "name": "generator", "type": "Generator?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::randn.generator_with_names(SymInt[] size, *, Generator? generator, str[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor" }, { - "name": "aten::random_.to(Tensor(a!) self, int to, *, Generator? generator=None) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "to", "type": "int64" }, - { "name": "generator", "type": "Generator?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::randn.generator_with_names_out(SymInt[] size, *, Generator? generator, str[]? names, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::randperm(SymInt n, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", - "inputs": [ - { "name": "n", "type": "SymInt" }, - { "name": "dtype", "type": "ScalarType?", "default": "long", "kwarg_only": true }, - { "name": "layout", "type": "Layout?", "default": null, "kwarg_only": true }, - { "name": "device", "type": "Device?", "default": null, "kwarg_only": true }, - { "name": "pin_memory", "type": "boolean?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::randn.names(SymInt[] size, *, str[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor" }, { - "name": "aten::randperm.generator(SymInt n, *, Generator? generator, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", - "inputs": [ - { "name": "n", "type": "SymInt" }, - { "name": "generator", "type": "Generator?", "kwarg_only": true }, - { "name": "dtype", "type": "ScalarType?", "default": "long", "kwarg_only": true }, - { "name": "layout", "type": "Layout?", "default": null, "kwarg_only": true }, - { "name": "device", "type": "Device?", "default": null, "kwarg_only": true }, - { "name": "pin_memory", "type": "boolean?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::randn.names_out(SymInt[] size, *, str[]? names, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::randperm.generator_out(SymInt n, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "n", "type": "SymInt" }, - { "name": "generator", "type": "Generator?", "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::randn.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::randperm.out(SymInt n, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "n", "type": "SymInt" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::randn_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor" }, { - "name": "aten::real(Tensor(a) self) -> Tensor(a)", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::randn_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::reciprocal(Tensor self) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::random_(Tensor(a!) self, *, Generator? generator=None) -> Tensor(a!)" }, { - "name": "aten::reciprocal.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::random_.from(Tensor(a!) self, int from, int? to, *, Generator? generator=None) -> Tensor(a!)" + }, + { + "name": "aten::random_.to(Tensor(a!) self, int to, *, Generator? generator=None) -> Tensor(a!)" + }, + { + "name": "aten::randperm(SymInt n, *, ScalarType? dtype=4, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor" + }, + { + "name": "aten::randperm.generator(SymInt n, *, Generator? generator, ScalarType? dtype=4, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor" + }, + { + "name": "aten::randperm.generator_out(SymInt n, *, Generator? generator, Tensor(a!) out) -> Tensor(a!)" + }, + { + "name": "aten::randperm.out(SymInt n, *, Tensor(a!) out) -> Tensor(a!)" + }, + { + "name": "aten::real(Tensor(a) self) -> Tensor(a)" + }, + { + "name": "aten::reciprocal(Tensor self) -> Tensor" + }, + { + "name": "aten::reciprocal.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)" }, { "name": "aten::reflection_pad1d(Tensor self, SymInt[2] padding) -> Tensor", - "category": "Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "padding", "type": "SymInt[2]" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "category": "Tensor" }, { - "name": "aten::reflection_pad1d.out(Tensor self, SymInt[2] padding, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "padding", "type": "SymInt[2]" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::reflection_pad1d.out(Tensor self, SymInt[2] padding, *, Tensor(a!) out) -> Tensor(a!)" }, { "name": "aten::reflection_pad2d(Tensor self, SymInt[4] padding) -> Tensor", - "category": "Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "padding", "type": "SymInt[4]" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "category": "Tensor" }, { - "name": "aten::reflection_pad2d.out(Tensor self, SymInt[4] padding, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "padding", "type": "SymInt[4]" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::reflection_pad2d.out(Tensor self, SymInt[4] padding, *, Tensor(a!) out) -> Tensor(a!)" }, { "name": "aten::reflection_pad3d(Tensor self, SymInt[6] padding) -> Tensor", - "category": "Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "padding", "type": "SymInt[6]" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "category": "Tensor" }, { - "name": "aten::reflection_pad3d.out(Tensor self, SymInt[6] padding, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "padding", "type": "SymInt[6]" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::reflection_pad3d.out(Tensor self, SymInt[6] padding, *, Tensor(a!) out) -> Tensor(a!)" }, { "name": "aten::relu(Tensor self) -> Tensor", - "category": "Activation", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "category": "Activation" + }, + { + "name": "aten::relu.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)" }, { "name": "aten::relu6(Tensor self) -> Tensor", - "category": "Activation", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "category": "Activation" + }, + { + "name": "aten::relu6_(Tensor(a!) self) -> Tensor(a!)", + "category": "Activation" + }, + { + "name": "aten::relu_(Tensor(a!) self) -> Tensor(a!)", + "category": "Activation" }, { - "name": "aten::relu6_(Tensor(a!) self) -> Tensor(a!)", - "category": "Activation", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::remainder(Scalar a, Scalar b) -> Scalar" }, { - "name": "aten::relu_(Tensor(a!) self) -> Tensor(a!)", - "category": "Activation", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::remainder.Scalar(Tensor self, Scalar other) -> Tensor" }, { - "name": "aten::remainder(Scalar a, Scalar b) -> Scalar", - "inputs": [ - { "name": "a", "type": "Scalar" }, - { "name": "b", "type": "Scalar" } - ], - "outputs": [ - { "type": "Scalar" } - ] + "name": "aten::remainder.Scalar_Tensor(Scalar self, Tensor other) -> Tensor" }, { - "name": "aten::remainder.Scalar(Tensor self, Scalar other) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Scalar" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::remainder.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::remainder.Scalar_Tensor(Scalar self, Tensor other) -> Tensor", - "inputs": [ - { "name": "self", "type": "Scalar" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::remainder.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::remainder.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Scalar" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::remainder.Tensor(Tensor self, Tensor other) -> Tensor" }, { - "name": "aten::remainder.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Scalar" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::remainder.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::remainder.Tensor(Tensor self, Tensor other) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::remainder.float(float a, float b) -> float" }, { - "name": "aten::remainder.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::remainder.float_int(float a, int b) -> float" }, { - "name": "aten::remainder.float(float a, float b) -> float", - "inputs": [ - { "name": "a", "type": "float32" }, - { "name": "b", "type": "float32" } - ], - "outputs": [ - { "type": "float32" } - ] + "name": "aten::remainder.int(int a, int b) -> int" }, { - "name": "aten::remainder.float_int(float a, int b) -> float", - "inputs": [ - { "name": "a", "type": "float32" }, - { "name": "b", "type": "int64" } - ], - "outputs": [ - { "type": "float32" } - ] + "name": "aten::remainder.int_float(int a, float b) -> float" }, { - "name": "aten::remainder.int(int a, int b) -> int", - "inputs": [ - { "name": "a", "type": "int64" }, - { "name": "b", "type": "int64" } - ], - "outputs": [ - { "type": "int64" } - ] + "name": "aten::remainder_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)" }, { - "name": "aten::remainder.int_float(int a, float b) -> float", - "inputs": [ - { "name": "a", "type": "int64" }, - { "name": "b", "type": "float32" } - ], - "outputs": [ - { "type": "float32" } - ] + "name": "aten::remainder_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)" }, { - "name": "aten::remainder_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Scalar" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::renorm(Tensor self, Scalar p, int dim, Scalar maxnorm) -> Tensor" }, { - "name": "aten::remainder_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::renorm.out(Tensor self, Scalar p, int dim, Scalar maxnorm, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::renorm(Tensor self, Scalar p, int dim, Scalar maxnorm) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "p", "type": "Scalar" }, - { "name": "dim", "type": "int64" }, - { "name": "maxnorm", "type": "Scalar" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::repeat(Tensor self, SymInt[] repeats) -> Tensor" }, { - "name": "aten::renorm.out(Tensor self, Scalar p, int dim, Scalar maxnorm, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "p", "type": "Scalar" }, - { "name": "dim", "type": "int64" }, - { "name": "maxnorm", "type": "Scalar" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::repeat.out(Tensor self, SymInt[] repeats, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::repeat(Tensor self, SymInt[] repeats) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "repeats", "type": "SymInt[]" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::repeat_interleave.Tensor(Tensor repeats, *, SymInt? output_size=None) -> Tensor" }, { - "name": "aten::repeat_interleave.Tensor(Tensor repeats, *, SymInt? output_size=None) -> Tensor", - "inputs": [ - { "name": "repeats", "type": "Tensor" }, - { "name": "output_size", "type": "SymInt?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::repeat_interleave.Tensor_out(Tensor repeats, *, SymInt? output_size=None, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::repeat_interleave.self_Tensor(Tensor self, Tensor repeats, int? dim=None, *, SymInt? output_size=None) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "repeats", "type": "Tensor" }, - { "name": "dim", "type": "int64?", "default": null }, - { "name": "output_size", "type": "SymInt?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::repeat_interleave.self_Tensor(Tensor self, Tensor repeats, int? dim=None, *, SymInt? output_size=None) -> Tensor" }, { - "name": "aten::repeat_interleave.self_int(Tensor self, SymInt repeats, int? dim=None, *, SymInt? output_size=None) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "repeats", "type": "SymInt" }, - { "name": "dim", "type": "int64?", "default": null }, - { "name": "output_size", "type": "SymInt?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::repeat_interleave.self_int(Tensor self, SymInt repeats, int? dim=None, *, SymInt? output_size=None) -> Tensor" }, { - "name": "aten::replace(str self, str old, str new, int max=-1) -> str", - "inputs": [ - { "name": "self", "type": "string" }, - { "name": "old", "type": "string" }, - { "name": "new", "type": "string" }, - { "name": "max", "type": "int64", "default": -1 } - ], - "outputs": [ - { "type": "string" } - ] + "name": "aten::replace(str self, str old, str new, int max=-1) -> str" }, { "name": "aten::replication_pad1d(Tensor self, SymInt[2] padding) -> Tensor", - "category": "Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "padding", "type": "SymInt[2]" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "category": "Tensor" }, { - "name": "aten::replication_pad1d.out(Tensor self, SymInt[2] padding, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "padding", "type": "SymInt[2]" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::replication_pad1d.out(Tensor self, SymInt[2] padding, *, Tensor(a!) out) -> Tensor(a!)" }, { "name": "aten::replication_pad2d(Tensor self, SymInt[4] padding) -> Tensor", - "category": "Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "padding", "type": "SymInt[4]" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "category": "Tensor" }, { - "name": "aten::replication_pad2d.out(Tensor self, SymInt[4] padding, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "padding", "type": "SymInt[4]" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::replication_pad2d.out(Tensor self, SymInt[4] padding, *, Tensor(a!) out) -> Tensor(a!)" }, { "name": "aten::replication_pad3d(Tensor self, SymInt[6] padding) -> Tensor", - "category": "Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "padding", "type": "SymInt[6]" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "category": "Tensor" }, { - "name": "aten::replication_pad3d.out(Tensor self, SymInt[6] padding, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "padding", "type": "SymInt[6]" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::replication_pad3d.out(Tensor self, SymInt[6] padding, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::requires_grad_(Tensor(a!) self, bool requires_grad=True) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "requires_grad", "type": "boolean", "default": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::requires_grad_(Tensor(a!) self, bool requires_grad=True) -> Tensor(a!)" }, { "name": "aten::reshape(Tensor(a) self, SymInt[] shape) -> Tensor(a)", - "category": "Shape", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "shape", "type": "SymInt[]" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "category": "Shape" }, { "name": "aten::reshape_as(Tensor(a) self, Tensor other) -> Tensor(a)", - "category": "Shape", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "category": "Shape" }, { - "name": "aten::resize_(Tensor(a!) self, SymInt[] size, *, MemoryFormat? memory_format=None) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "size", "type": "SymInt[]" }, - { "name": "memory_format", "type": "MemoryFormat?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::resize_(Tensor(a!) self, SymInt[] size, *, MemoryFormat? memory_format=None) -> Tensor(a!)" }, { - "name": "aten::resolve_conj(Tensor(a) self) -> Tensor(a)", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::resolve_conj(Tensor(a) self) -> Tensor(a)" }, { - "name": "aten::resolve_neg(Tensor(a) self) -> Tensor(a)", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::resolve_neg(Tensor(a) self) -> Tensor(a)" }, { - "name": "aten::reverse.t(t[](a!) self) -> ()", - "inputs": [ - { "name": "self", "type": "t[]" } - ], - "outputs": [] + "name": "aten::reverse.t(t[](a!) self) -> ()" }, { - "name": "aten::rnn_relu.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor)", - "inputs": [ - { "name": "data", "type": "Tensor" }, - { "name": "batch_sizes", "type": "Tensor" }, - { "name": "hx", "type": "Tensor" }, - { "name": "params", "type": "Tensor[]" }, - { "name": "has_biases", "type": "boolean" }, - { "name": "num_layers", "type": "int64" }, - { "name": "dropout", "type": "float32" }, - { "name": "train", "type": "boolean" }, - { "name": "bidirectional", "type": "boolean" } - ], - "outputs": [ - { "type": "Tensor" }, - { "type": "Tensor" } - ] + "name": "aten::rnn_relu.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor)" }, { "name": "aten::rnn_relu.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor)", - "category": "Layer", - "inputs": [ - { "name": "input", "type": "Tensor" }, - { "name": "hx", "type": "Tensor" }, - { "name": "params", "type": "Tensor[]" }, - { "name": "has_biases", "type": "boolean" }, - { "name": "num_layers", "type": "int64" }, - { "name": "dropout", "type": "float32" }, - { "name": "train", "type": "boolean" }, - { "name": "bidirectional", "type": "boolean" }, - { "name": "batch_first", "type": "boolean" } - ], - "outputs": [ - { "type": "Tensor" }, - { "name": "?", "type": "Tensor" } - ] + "category": "Layer" }, { - "name": "aten::rnn_tanh.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor)", - "inputs": [ - { "name": "data", "type": "Tensor" }, - { "name": "batch_sizes", "type": "Tensor" }, - { "name": "hx", "type": "Tensor" }, - { "name": "params", "type": "Tensor[]" }, - { "name": "has_biases", "type": "boolean" }, - { "name": "num_layers", "type": "int64" }, - { "name": "dropout", "type": "float32" }, - { "name": "train", "type": "boolean" }, - { "name": "bidirectional", "type": "boolean" } - ], - "outputs": [ - { "type": "Tensor" }, - { "type": "Tensor" } - ] + "name": "aten::rnn_tanh.data(Tensor data, Tensor batch_sizes, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional) -> (Tensor, Tensor)" }, { "name": "aten::rnn_tanh.input(Tensor input, Tensor hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor)", - "category": "Layer", - "inputs": [ - { "name": "input", "type": "Tensor" }, - { "name": "hx", "type": "Tensor" }, - { "name": "params", "type": "Tensor[]" }, - { "name": "has_biases", "type": "boolean" }, - { "name": "num_layers", "type": "int64" }, - { "name": "dropout", "type": "float32" }, - { "name": "train", "type": "boolean" }, - { "name": "bidirectional", "type": "boolean" }, - { "name": "batch_first", "type": "boolean" } - ], - "outputs": [ - { "type": "Tensor" }, - { "name": "?", "type": "Tensor" } - ] + "category": "Layer" }, { - "name": "aten::rnn_tanh_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor", - "inputs": [ - { "name": "input", "type": "Tensor" }, - { "name": "hx", "type": "Tensor" }, - { "name": "w_ih", "type": "Tensor" }, - { "name": "w_hh", "type": "Tensor" }, - { "name": "b_ih", "type": "Tensor?", "default": null }, - { "name": "b_hh", "type": "Tensor?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::rnn_tanh_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor" }, { "name": "aten::roll(Tensor self, SymInt[1] shifts, int[1] dims=[]) -> Tensor", - "category": "Layer", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "shifts", "type": "SymInt[1]" }, - { "name": "dims", "type": "int64[1]", "default": [] } - ], - "outputs": [ - { "type": "Tensor" } - ] + "category": "Layer" }, { - "name": "aten::rot90(Tensor self, int k=1, int[] dims=[0,1]) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "k", "type": "int64", "default": 1 }, - { "name": "dims", "type": "int64[]", "default": [ 0, 1 ] } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::roll.out(Tensor self, SymInt[1] shifts, int[1] dims=[], *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::round(Tensor self) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::rot90(Tensor self, int k=1, int[] dims=[0, 1]) -> Tensor" }, { - "name": "aten::round.decimals(Tensor self, *, int decimals) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "decimals", "type": "int64", "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::rot90.out(Tensor self, int k=1, int[] dims=[0, 1], *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::round.decimals_out(Tensor self, *, int decimals, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "decimals", "type": "int64", "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::round(Tensor self) -> Tensor" }, { - "name": "aten::round.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::round.Scalar(Scalar a) -> Scalar" }, { - "name": "aten::round_(Tensor(a!) self) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::round.decimals(Tensor self, *, int decimals) -> Tensor" }, { - "name": "aten::round_.decimals(Tensor(a!) self, *, int decimals) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "decimals", "type": "int64", "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::round.decimals_out(Tensor self, *, int decimals, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::rrelu(Tensor self, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "lower", "type": "Scalar", "default": 0.125 }, - { "name": "upper", "type": "Scalar", "default": 0.3333333333333333 }, - { "name": "training", "type": "boolean", "default": false }, - { "name": "generator", "type": "Generator?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::round.float(float a) -> float" }, { - "name": "aten::rsqrt(Tensor self) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::round.int(int a) -> float" }, { - "name": "aten::rsqrt.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::round.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::rsqrt_(Tensor(a!) self) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::round_(Tensor(a!) self) -> Tensor(a!)" }, { - "name": "aten::rsub.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Scalar" }, - { "name": "alpha", "type": "Scalar", "default": 1 } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::round_.decimals(Tensor(a!) self, *, int decimals) -> Tensor(a!)" }, { - "name": "aten::rsub.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" }, - { "name": "alpha", "type": "Scalar", "default": 1, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::rrelu(Tensor self, Scalar lower=0.125, Scalar upper=0.33333333333333331, bool training=False, Generator? generator=None) -> Tensor" }, { - "name": "aten::scalar_tensor(Scalar s, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", - "inputs": [ - { "name": "s", "type": "Scalar" }, - { "name": "dtype", "type": "ScalarType?", "default": null, "kwarg_only": true }, - { "name": "layout", "type": "Layout?", "default": null, "kwarg_only": true }, - { "name": "device", "type": "Device?", "default": null, "kwarg_only": true }, - { "name": "pin_memory", "type": "boolean?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::rsqrt(Tensor self) -> Tensor" + }, + { + "name": "aten::rsqrt.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)" + }, + { + "name": "aten::rsqrt_(Tensor(a!) self) -> Tensor(a!)" + }, + { + "name": "aten::rsub.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor" + }, + { + "name": "aten::rsub.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!)" + }, + { + "name": "aten::rsub.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor" + }, + { + "name": "aten::rsub.Tensor_out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)" + }, + { + "name": "aten::scalar_tensor(Scalar s, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor" + }, + { + "name": "aten::scalar_tensor.out(Scalar s, *, Tensor(a!) out) -> Tensor(a!)" + }, + { + "name": "aten::scaled_dot_product_attention(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0., bool is_causal=False, *, float? scale=None, bool enable_gqa=False) -> Tensor", + "category": "Attention" + }, + { + "name": "aten::scatter.dimname_src(Tensor self, str dim, Tensor index, Tensor src) -> Tensor" + }, + { + "name": "aten::scatter.dimname_value(Tensor self, str dim, Tensor index, Scalar value) -> Tensor" }, { - "name": "aten::scaled_dot_product_attention(Tensor query, Tensor key, Tensor value, Tensor? attn_mask=None, float dropout_p=0.0, bool is_causal=False, *, float? scale=None, bool enable_gqa=False) -> Tensor", - "category": "Attention", - "inputs": [ - { "name": "query", "type": "Tensor" }, - { "name": "key", "type": "Tensor" }, - { "name": "value", "type": "Tensor" }, - { "name": "attn_mask", "type": "Tensor?", "default": null }, - { "name": "dropout_p", "type": "float32", "default": 0.0 }, - { "name": "is_causal", "type": "boolean", "default": false }, - { "name": "scale", "type": "float32?", "default": null, "kwarg_only": true }, - { "name": "enable_gqa", "type": "boolean", "default": false, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::scatter.reduce(Tensor self, int dim, Tensor index, Tensor src, *, str reduce) -> Tensor" }, { - "name": "aten::scatter.dimname_src(Tensor self, Dimname dim, Tensor index, Tensor src) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "Dimname" }, - { "name": "index", "type": "Tensor" }, - { "name": "src", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::scatter.reduce_out(Tensor self, int dim, Tensor index, Tensor src, *, str reduce, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::scatter.dimname_value(Tensor self, Dimname dim, Tensor index, Scalar value) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "Dimname" }, - { "name": "index", "type": "Tensor" }, - { "name": "value", "type": "Scalar" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::scatter.src(Tensor self, int dim, Tensor index, Tensor src) -> Tensor" }, { - "name": "aten::scatter.reduce(Tensor self, int dim, Tensor index, Tensor src, *, str reduce) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64" }, - { "name": "index", "type": "Tensor" }, - { "name": "src", "type": "Tensor" }, - { "name": "reduce", "type": "string", "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::scatter.src_out(Tensor self, int dim, Tensor index, Tensor src, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::scatter.reduce_out(Tensor self, int dim, Tensor index, Tensor src, *, str reduce, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64" }, - { "name": "index", "type": "Tensor" }, - { "name": "src", "type": "Tensor" }, - { "name": "reduce", "type": "string", "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::scatter.value(Tensor self, int dim, Tensor index, Scalar value) -> Tensor" }, { - "name": "aten::scatter.src(Tensor self, int dim, Tensor index, Tensor src) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64" }, - { "name": "index", "type": "Tensor" }, - { "name": "src", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::scatter.value_out(Tensor self, int dim, Tensor index, Scalar value, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::scatter.src_out(Tensor self, int dim, Tensor index, Tensor src, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64" }, - { "name": "index", "type": "Tensor" }, - { "name": "src", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::scatter.value_reduce(Tensor self, int dim, Tensor index, Scalar value, *, str reduce) -> Tensor" }, { - "name": "aten::scatter.value(Tensor self, int dim, Tensor index, Scalar value) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64" }, - { "name": "index", "type": "Tensor" }, - { "name": "value", "type": "Scalar" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::scatter.value_reduce_out(Tensor self, int dim, Tensor index, Scalar value, *, str reduce, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::scatter.value_out(Tensor self, int dim, Tensor index, Scalar value, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64" }, - { "name": "index", "type": "Tensor" }, - { "name": "value", "type": "Scalar" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::scatter_.reduce(Tensor(a!) self, int dim, Tensor index, Tensor src, *, str reduce) -> Tensor(a!)" }, { - "name": "aten::scatter.value_reduce(Tensor self, int dim, Tensor index, Scalar value, *, str reduce) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64" }, - { "name": "index", "type": "Tensor" }, - { "name": "value", "type": "Scalar" }, - { "name": "reduce", "type": "string", "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::scatter_.src(Tensor(a!) self, int dim, Tensor index, Tensor src) -> Tensor(a!)" }, { - "name": "aten::scatter.value_reduce_out(Tensor self, int dim, Tensor index, Scalar value, *, str reduce, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64" }, - { "name": "index", "type": "Tensor" }, - { "name": "value", "type": "Scalar" }, - { "name": "reduce", "type": "string", "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::scatter_.value(Tensor(a!) self, int dim, Tensor index, Scalar value) -> Tensor(a!)" }, { - "name": "aten::scatter_.reduce(Tensor(a!) self, int dim, Tensor index, Tensor src, *, str reduce) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64" }, - { "name": "index", "type": "Tensor" }, - { "name": "src", "type": "Tensor" }, - { "name": "reduce", "type": "string", "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::scatter_.value_reduce(Tensor(a!) self, int dim, Tensor index, Scalar value, *, str reduce) -> Tensor(a!)" }, { - "name": "aten::scatter_.src(Tensor(a!) self, int dim, Tensor index, Tensor src) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64" }, - { "name": "index", "type": "Tensor" }, - { "name": "src", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::scatter_add(Tensor self, int dim, Tensor index, Tensor src) -> Tensor" }, { - "name": "aten::scatter_.value(Tensor(a!) self, int dim, Tensor index, Scalar value) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64" }, - { "name": "index", "type": "Tensor" }, - { "name": "value", "type": "Scalar" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::scatter_add.dimname(Tensor self, str dim, Tensor index, Tensor src) -> Tensor" }, { - "name": "aten::scatter_.value_reduce(Tensor(a!) self, int dim, Tensor index, Scalar value, *, str reduce) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64" }, - { "name": "index", "type": "Tensor" }, - { "name": "value", "type": "Scalar" }, - { "name": "reduce", "type": "string", "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::scatter_add.out(Tensor self, int dim, Tensor index, Tensor src, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::scatter_add(Tensor self, int dim, Tensor index, Tensor src) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64" }, - { "name": "index", "type": "Tensor" }, - { "name": "src", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::scatter_add_(Tensor(a!) self, int dim, Tensor index, Tensor src) -> Tensor(a!)" }, { - "name": "aten::scatter_add.dimname(Tensor self, Dimname dim, Tensor index, Tensor src) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "Dimname" }, - { "name": "index", "type": "Tensor" }, - { "name": "src", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::scatter_reduce.two(Tensor self, int dim, Tensor index, Tensor src, str reduce, *, bool include_self=True) -> Tensor" }, { - "name": "aten::scatter_add.out(Tensor self, int dim, Tensor index, Tensor src, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64" }, - { "name": "index", "type": "Tensor" }, - { "name": "src", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::scatter_reduce.two_out(Tensor self, int dim, Tensor index, Tensor src, str reduce, *, bool include_self=True, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::scatter_add_(Tensor(a!) self, int dim, Tensor index, Tensor src) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64" }, - { "name": "index", "type": "Tensor" }, - { "name": "src", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::scatter_reduce_.two(Tensor(a!) self, int dim, Tensor index, Tensor src, str reduce, *, bool include_self=True) -> Tensor(a!)" }, { - "name": "aten::scatter_reduce.two(Tensor self, int dim, Tensor index, Tensor src, str reduce, *, bool include_self=True) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64" }, - { "name": "index", "type": "Tensor" }, - { "name": "src", "type": "Tensor" }, - { "name": "reduce", "type": "string" }, - { "name": "include_self", "type": "boolean", "default": true, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::searchsorted.Scalar(Tensor sorted_sequence, Scalar self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None) -> Tensor" }, { - "name": "aten::scatter_reduce.two_out(Tensor self, int dim, Tensor index, Tensor src, str reduce, *, bool include_self=True, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64" }, - { "name": "index", "type": "Tensor" }, - { "name": "src", "type": "Tensor" }, - { "name": "reduce", "type": "string" }, - { "name": "include_self", "type": "boolean", "default": true, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::searchsorted.Scalar_out(Tensor sorted_sequence, Scalar self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::scatter_reduce_.two(Tensor(a!) self, int dim, Tensor index, Tensor src, str reduce, *, bool include_self=True) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64" }, - { "name": "index", "type": "Tensor" }, - { "name": "src", "type": "Tensor" }, - { "name": "reduce", "type": "string" }, - { "name": "include_self", "type": "boolean", "default": true, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::searchsorted.Tensor(Tensor sorted_sequence, Tensor self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None) -> Tensor" }, { - "name": "aten::searchsorted.Scalar(Tensor sorted_sequence, Scalar self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None) -> Tensor", - "inputs": [ - { "name": "sorted_sequence", "type": "Tensor" }, - { "name": "self", "type": "Scalar" }, - { "name": "out_int32", "type": "boolean", "default": false, "kwarg_only": true }, - { "name": "right", "type": "boolean", "default": false, "kwarg_only": true }, - { "name": "side", "type": "string?", "default": null, "kwarg_only": true }, - { "name": "sorter", "type": "Tensor?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::searchsorted.Tensor_out(Tensor sorted_sequence, Tensor self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::searchsorted.Scalar_out(Tensor sorted_sequence, Scalar self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "sorted_sequence", "type": "Tensor" }, - { "name": "self", "type": "Scalar" }, - { "name": "out_int32", "type": "boolean", "default": false, "kwarg_only": true }, - { "name": "right", "type": "boolean", "default": false, "kwarg_only": true }, - { "name": "side", "type": "string?", "default": null, "kwarg_only": true }, - { "name": "sorter", "type": "Tensor?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::select.Dimname(Tensor(a) self, str dim, int index) -> Tensor(a)" }, { - "name": "aten::searchsorted.Tensor(Tensor sorted_sequence, Tensor self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None) -> Tensor", - "inputs": [ - { "name": "sorted_sequence", "type": "Tensor" }, - { "name": "self", "type": "Tensor" }, - { "name": "out_int32", "type": "boolean", "default": false, "kwarg_only": true }, - { "name": "right", "type": "boolean", "default": false, "kwarg_only": true }, - { "name": "side", "type": "string?", "default": null, "kwarg_only": true }, - { "name": "sorter", "type": "Tensor?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::select.int(Tensor(a) self, int dim, SymInt index) -> Tensor(a)" }, { - "name": "aten::searchsorted.Tensor_out(Tensor sorted_sequence, Tensor self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "sorted_sequence", "type": "Tensor" }, - { "name": "self", "type": "Tensor" }, - { "name": "out_int32", "type": "boolean", "default": false, "kwarg_only": true }, - { "name": "right", "type": "boolean", "default": false, "kwarg_only": true }, - { "name": "side", "type": "string?", "default": null, "kwarg_only": true }, - { "name": "sorter", "type": "Tensor?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::select.t(t[](a) list, int idx) -> t(*)" }, { - "name": "aten::select.Dimname(Tensor(a) self, Dimname dim, int index) -> Tensor(a)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "Dimname" }, - { "name": "index", "type": "int64" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::select_backward(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt index) -> Tensor" }, { - "name": "aten::select.int(Tensor(a) self, int dim, SymInt index) -> Tensor(a)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64" }, - { "name": "index", "type": "SymInt" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::select_backward.out(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt index, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::select.t(t[](a) list, int idx) -> t(*)", - "inputs": [ - { "name": "list", "type": "t[]" }, - { "name": "idx", "type": "int64" } - ], - "outputs": [ - { "type": "t" } - ] + "name": "aten::select_copy.int(Tensor self, int dim, SymInt index) -> Tensor" }, { - "name": "aten::select_backward(Tensor grad_output, SymInt[] input_sizes, int dim, SymInt index) -> Tensor", - "inputs": [ - { "name": "grad_output", "type": "Tensor" }, - { "name": "input_sizes", "type": "SymInt[]" }, - { "name": "dim", "type": "int64" }, - { "name": "index", "type": "SymInt" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::select_copy.int_out(Tensor self, int dim, SymInt index, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::select_copy.int(Tensor self, int dim, SymInt index) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64" }, - { "name": "index", "type": "SymInt" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::select_scatter(Tensor self, Tensor src, int dim, SymInt index) -> Tensor" }, { - "name": "aten::select_scatter(Tensor self, Tensor src, int dim, SymInt index) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "src", "type": "Tensor" }, - { "name": "dim", "type": "int64" }, - { "name": "index", "type": "SymInt" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::select_scatter.out(Tensor self, Tensor src, int dim, SymInt index, *, Tensor(a!) out) -> Tensor(a!)" }, { "name": "aten::selu(Tensor self) -> Tensor", - "category": "Activation", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "category": "Activation" }, { "name": "aten::selu_(Tensor(a!) self) -> Tensor(a!)", - "category": "Activation", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "category": "Activation" }, { "name": "aten::sigmoid(Tensor self) -> Tensor", - "category": "Activation", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "category": "Activation" }, { - "name": "aten::sigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::sigmoid.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)" }, { "name": "aten::sigmoid_(Tensor(a!) self) -> Tensor(a!)", - "category": "Activation", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "category": "Activation" }, { - "name": "aten::sign(Tensor self) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::sign(Tensor self) -> Tensor" }, { - "name": "aten::sign.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::sign.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::sign_(Tensor(a!) self) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::sign_(Tensor(a!) self) -> Tensor(a!)" }, { - "name": "aten::signbit(Tensor self) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::signbit(Tensor self) -> Tensor" }, { - "name": "aten::signbit.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::signbit.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)" }, { "name": "aten::silu(Tensor self) -> Tensor", - "category": "Activation", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "category": "Activation" }, { - "name": "aten::silu.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::silu.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)" }, { "name": "aten::silu_(Tensor(a!) self) -> Tensor(a!)", - "category": "Activation", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "category": "Activation" }, { - "name": "aten::silu_backward(Tensor grad_output, Tensor self) -> Tensor", - "inputs": [ - { "name": "grad_output", "type": "Tensor" }, - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::silu_backward(Tensor grad_output, Tensor self) -> Tensor" }, { - "name": "aten::silu_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!)", - "inputs": [ - { "name": "grad_output", "type": "Tensor" }, - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::silu_backward.grad_input(Tensor grad_output, Tensor self, *, Tensor(a!) grad_input) -> Tensor(a!)" }, { - "name": "aten::sin(Tensor self) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::sin(Tensor self) -> Tensor" }, { - "name": "aten::sin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::sin.Scalar(Scalar a) -> Scalar" }, { - "name": "aten::sinh(Tensor self) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::sin.complex(complex a) -> complex" }, { - "name": "aten::sinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::sin.float(float a) -> float" }, { - "name": "aten::size(Tensor self) -> int[]", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "int64[]" } - ] + "name": "aten::sin.int(int a) -> float" }, { - "name": "aten::size.Dimname(Tensor self, Dimname dim) -> int", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "Dimname" } - ], - "outputs": [ - { "type": "int64" } - ] + "name": "aten::sin.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::size.int(Tensor self, int dim) -> int", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64" } - ], - "outputs": [ - { "type": "int64" } - ] + "name": "aten::sinh(Tensor self) -> Tensor" + }, + { + "name": "aten::sinh.Scalar(Scalar a) -> Scalar" + }, + { + "name": "aten::sinh.complex(complex a) -> complex" + }, + { + "name": "aten::sinh.float(float a) -> float" + }, + { + "name": "aten::sinh.int(int a) -> float" + }, + { + "name": "aten::sinh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)" + }, + { + "name": "aten::size(Tensor self) -> int[]" + }, + { + "name": "aten::size.Dimname(Tensor self, str dim) -> int" + }, + { + "name": "aten::size.int(Tensor self, int dim) -> int" }, { "name": "aten::slice.Tensor(Tensor(a) self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor(a)", - "category": "Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64", "default": 0 }, - { "name": "start", "type": "SymInt?", "default": null }, - { "name": "end", "type": "SymInt?", "default": null }, - { "name": "step", "type": "SymInt", "default": 1 } - ], - "outputs": [ - { "type": "Tensor" } - ] + "category": "Tensor" }, { - "name": "aten::slice.str(str string, int? start=None, int? end=None, int step=1) -> str", - "inputs": [ - { "name": "string", "type": "string" }, - { "name": "start", "type": "int64?", "default": null }, - { "name": "end", "type": "int64?", "default": null }, - { "name": "step", "type": "int64", "default": 1 } - ], - "outputs": [ - { "type": "string" } - ] + "name": "aten::slice.str(str string, int? start=None, int? end=None, int step=1) -> str" + }, + { + "name": "aten::slice.t(t[] l, int? start=None, int? end=None, int step=1) -> t[]" + }, + { + "name": "aten::slice_copy.Tensor(Tensor self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor" }, { - "name": "aten::slice.t(t[] l, int? start=None, int? end=None, int step=1) -> t[]", - "inputs": [ - { "name": "l", "type": "t[]" }, - { "name": "start", "type": "int64?", "default": null }, - { "name": "end", "type": "int64?", "default": null }, - { "name": "step", "type": "int64", "default": 1 } - ], - "outputs": [ - { "type": "t[]" } - ] + "name": "aten::slice_copy.Tensor_out(Tensor self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::slice_copy.Tensor(Tensor self, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64", "default": 0 }, - { "name": "start", "type": "SymInt?", "default": null }, - { "name": "end", "type": "SymInt?", "default": null }, - { "name": "step", "type": "SymInt", "default": 1 } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::slice_scatter(Tensor self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor" }, { - "name": "aten::slice_scatter(Tensor self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "src", "type": "Tensor" }, - { "name": "dim", "type": "int64", "default": 0 }, - { "name": "start", "type": "SymInt?", "default": null }, - { "name": "end", "type": "SymInt?", "default": null }, - { "name": "step", "type": "SymInt", "default": 1 } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::slice_scatter.out(Tensor self, Tensor src, int dim=0, SymInt? start=None, SymInt? end=None, SymInt step=1, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::smooth_l1_loss(Tensor self, Tensor target, int reduction=Mean, float beta=1.0) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "target", "type": "Tensor" }, - { "name": "reduction", "type": "int64", "default": "Mean" }, - { "name": "beta", "type": "float32", "default": 1.0 } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::smooth_l1_loss(Tensor self, Tensor target, int reduction=1, float beta=1.) -> Tensor" }, { - "name": "aten::smooth_l1_loss.out(Tensor self, Tensor target, int reduction=Mean, float beta=1.0, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "target", "type": "Tensor" }, - { "name": "reduction", "type": "int64", "default": "Mean" }, - { "name": "beta", "type": "float32", "default": 1.0 } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::smooth_l1_loss.out(Tensor self, Tensor target, int reduction=1, float beta=1., *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::smooth_l1_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, float beta) -> Tensor", - "inputs": [ - { "name": "grad_output", "type": "Tensor" }, - { "name": "self", "type": "Tensor" }, - { "name": "target", "type": "Tensor" }, - { "name": "reduction", "type": "int64" }, - { "name": "beta", "type": "float32" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::smooth_l1_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, float beta) -> Tensor" }, { - "name": "aten::smooth_l1_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, float beta, *, Tensor(a!) grad_input) -> Tensor(a!)", - "inputs": [ - { "name": "grad_output", "type": "Tensor" }, - { "name": "self", "type": "Tensor" }, - { "name": "target", "type": "Tensor" }, - { "name": "reduction", "type": "int64" }, - { "name": "beta", "type": "float32" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::smooth_l1_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, float beta, *, Tensor(a!) grad_input) -> Tensor(a!)" }, { - "name": "aten::softmax.Dimname(Tensor self, Dimname dim, *, ScalarType? dtype=None) -> Tensor", - "category": "Activation", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "Dimname" }, - { "name": "dtype", "type": "ScalarType?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::softmax.Dimname(Tensor self, str dim, *, ScalarType? dtype=None) -> Tensor", + "category": "Activation" }, { "name": "aten::softmax.int(Tensor self, int dim, ScalarType? dtype=None) -> Tensor", - "category": "Activation", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64" }, - { "name": "dtype", "type": "ScalarType?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "category": "Activation" }, { - "name": "aten::softmax.int_out(Tensor self, int dim, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64" }, - { "name": "dtype", "type": "ScalarType?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::softmax.int_out(Tensor self, int dim, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!)" }, { "name": "aten::softplus(Tensor self, Scalar beta=1, Scalar threshold=20) -> Tensor", - "category": "Activation", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "beta", "type": "Scalar", "default": 1 }, - { "name": "threshold", "type": "Scalar", "default": 20 } - ], - "outputs": [ - { "type": "Tensor" } - ] + "category": "Activation" }, { - "name": "aten::softplus.out(Tensor self, Scalar beta=1, Scalar threshold=20, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "beta", "type": "Scalar", "default": 1 }, - { "name": "threshold", "type": "Scalar", "default": 20 } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::softplus.out(Tensor self, Scalar beta=1, Scalar threshold=20, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::softshrink(Tensor self, Scalar lambd=0.5) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "lambd", "type": "Scalar", "default": 0.5 } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::softshrink(Tensor self, Scalar lambd=0.5) -> Tensor" }, { - "name": "aten::softshrink.out(Tensor self, Scalar lambd=0.5, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "lambd", "type": "Scalar", "default": 0.5 } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::softshrink.out(Tensor self, Scalar lambd=0.5, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::sort(Tensor self, int dim=-1, bool descending=False) -> (Tensor values, Tensor indices)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64", "default": -1 }, - { "name": "descending", "type": "boolean", "default": false } - ], - "outputs": [ - { "name": "values", "type": "Tensor" }, - { "name": "indices", "type": "Tensor" } - ] + "name": "aten::sort(Tensor self, int dim=-1, bool descending=False) -> (Tensor values, Tensor indices)" }, { - "name": "aten::sort.Tensor(Tensor[](a!) self, bool reverse=False) -> ()", - "inputs": [ - { "name": "self", "type": "Tensor[]" }, - { "name": "reverse", "type": "boolean", "default": false } - ], - "outputs": [] + "name": "aten::sort.Tensor(Tensor[](a!) self, bool reverse=False) -> ()" }, { - "name": "aten::sort.any(t[](a!) self, bool reverse=False) -> ()", - "inputs": [ - { "name": "self", "type": "t[]" }, - { "name": "reverse", "type": "boolean", "default": false } - ], - "outputs": [] + "name": "aten::sort.any(t[](a!) self, bool reverse=False) -> ()" }, { - "name": "aten::sort.bool(bool[](a!) self, bool reverse=False) -> ()", - "inputs": [ - { "name": "self", "type": "boolean[]" }, - { "name": "reverse", "type": "boolean", "default": false } - ], - "outputs": [] + "name": "aten::sort.bool(bool[](a!) self, bool reverse=False) -> ()" }, { - "name": "aten::sort.dimname(Tensor self, Dimname dim, bool descending=False) -> (Tensor values, Tensor indices)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "Dimname" }, - { "name": "descending", "type": "boolean", "default": false } - ], - "outputs": [ - { "name": "values", "type": "Tensor" }, - { "name": "indices", "type": "Tensor" } - ] + "name": "aten::sort.dimname(Tensor self, str dim, bool descending=False) -> (Tensor values, Tensor indices)" }, { - "name": "aten::sort.dimname_stable(Tensor self, *, bool? stable, Dimname dim, bool descending=False) -> (Tensor values, Tensor indices)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "stable", "type": "boolean?", "kwarg_only": true }, - { "name": "dim", "type": "Dimname", "kwarg_only": true }, - { "name": "descending", "type": "boolean", "default": false, "kwarg_only": true } - ], - "outputs": [ - { "name": "values", "type": "Tensor" }, - { "name": "indices", "type": "Tensor" } - ] + "name": "aten::sort.dimname_stable(Tensor self, *, bool? stable, str dim, bool descending=False) -> (Tensor values, Tensor indices)" }, { - "name": "aten::sort.dimname_values(Tensor self, Dimname dim, bool descending=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "Dimname" }, - { "name": "descending", "type": "boolean", "default": false } - ], - "outputs": [ - { "name": "values", "type": "Tensor" }, - { "name": "indices", "type": "Tensor" } - ] + "name": "aten::sort.dimname_values(Tensor self, str dim, bool descending=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)" }, { - "name": "aten::sort.dimname_values_stable(Tensor self, *, bool? stable, Dimname dim, bool descending=False, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "stable", "type": "boolean?", "kwarg_only": true }, - { "name": "dim", "type": "Dimname", "kwarg_only": true }, - { "name": "descending", "type": "boolean", "default": false, "kwarg_only": true } - ], - "outputs": [ - { "name": "values", "type": "Tensor" }, - { "name": "indices", "type": "Tensor" } - ] + "name": "aten::sort.dimname_values_stable(Tensor self, *, bool? stable, str dim, bool descending=False, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)" }, { - "name": "aten::sort.float(float[](a!) self, bool reverse=False) -> ()", - "inputs": [ - { "name": "self", "type": "float32[]" }, - { "name": "reverse", "type": "boolean", "default": false } - ], - "outputs": [] + "name": "aten::sort.float(float[](a!) self, bool reverse=False) -> ()" }, { - "name": "aten::sort.int(int[](a!) self, bool reverse=False) -> ()", - "inputs": [ - { "name": "self", "type": "int64[]" }, - { "name": "reverse", "type": "boolean", "default": false } - ], - "outputs": [] + "name": "aten::sort.int(int[](a!) self, bool reverse=False) -> ()" }, { - "name": "aten::sort.stable(Tensor self, *, bool? stable, int dim=-1, bool descending=False) -> (Tensor values, Tensor indices)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "stable", "type": "boolean?", "kwarg_only": true }, - { "name": "dim", "type": "int64", "default": -1, "kwarg_only": true }, - { "name": "descending", "type": "boolean", "default": false, "kwarg_only": true } - ], - "outputs": [ - { "name": "values", "type": "Tensor" }, - { "name": "indices", "type": "Tensor" } - ] + "name": "aten::sort.stable(Tensor self, *, bool? stable, int dim=-1, bool descending=False) -> (Tensor values, Tensor indices)" }, { - "name": "aten::sort.str(str[](a!) self, bool reverse=False) -> ()", - "inputs": [ - { "name": "self", "type": "string[]" }, - { "name": "reverse", "type": "boolean", "default": false } - ], - "outputs": [] + "name": "aten::sort.str(str[](a!) self, bool reverse=False) -> ()" }, { - "name": "aten::sort.values(Tensor self, int dim=-1, bool descending=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64", "default": -1 }, - { "name": "descending", "type": "boolean", "default": false } - ], - "outputs": [ - { "name": "values", "type": "Tensor" }, - { "name": "indices", "type": "Tensor" } - ] + "name": "aten::sort.values(Tensor self, int dim=-1, bool descending=False, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)" }, { - "name": "aten::sort.values_stable(Tensor self, *, bool? stable, int dim=-1, bool descending=False, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "stable", "type": "boolean?", "kwarg_only": true }, - { "name": "dim", "type": "int64", "default": -1, "kwarg_only": true }, - { "name": "descending", "type": "boolean", "default": false, "kwarg_only": true } - ], - "outputs": [ - { "name": "values", "type": "Tensor" }, - { "name": "indices", "type": "Tensor" } - ] + "name": "aten::sort.values_stable(Tensor self, *, bool? stable, int dim=-1, bool descending=False, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)" }, { - "name": "aten::special_expit(Tensor self) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::special_expit(Tensor self) -> Tensor" }, { - "name": "aten::special_expit.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::special_expit.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::split(Tensor(a -> *) self, int[] split_sizes, int dim=0) -> Tensor(a)[]", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "split_sizes", "type": "int64[]" }, - { "name": "dim", "type": "int64", "default": 0 } - ], - "outputs": [ - { "type": "Tensor[]" } - ] + "name": "aten::split(Tensor(a -> *) self, int[] split_sizes, int dim=0) -> Tensor(a)[]" }, { "name": "aten::split.Tensor(Tensor(a -> *) self, SymInt split_size, int dim=0) -> Tensor(a)[]", - "category": "Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "split_size", "type": "SymInt" }, - { "name": "dim", "type": "int64", "default": 0 } - ], - "outputs": [ - { "name": "outputs", "type": "Tensor[]" } - ] + "category": "Tensor" }, { "name": "aten::split.sizes(Tensor(a -> *) self, SymInt[] split_size, int dim=0) -> Tensor(a)[]", - "category": "Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "split_size", "type": "SymInt[]" }, - { "name": "dim", "type": "int64", "default": 0 } - ], - "outputs": [ - { "name": "outputs", "type": "Tensor[]" } - ] + "category": "Tensor" }, { - "name": "aten::split.str(str self, str? separator=None, int max=-1) -> str[]", - "inputs": [ - { "name": "self", "type": "string" }, - { "name": "separator", "type": "string?", "default": null }, - { "name": "max", "type": "int64", "default": -1 } - ], - "outputs": [ - { "type": "string[]" } - ] + "name": "aten::split.str(str self, str? separator=None, int max=-1) -> str[]" }, { - "name": "aten::split_copy.Tensor(Tensor self, SymInt split_size, int dim=0) -> Tensor[]", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "split_size", "type": "SymInt" }, - { "name": "dim", "type": "int64", "default": 0 } - ], - "outputs": [ - { "type": "Tensor[]" } - ] + "name": "aten::split_copy.Tensor(Tensor self, SymInt split_size, int dim=0) -> Tensor[]" }, { - "name": "aten::split_copy.Tensor_out(Tensor self, SymInt split_size, int dim=0, *, Tensor(a!)[] out) -> ()", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "split_size", "type": "SymInt" }, - { "name": "dim", "type": "int64", "default": 0 } - ], - "outputs": [] + "name": "aten::split_copy.Tensor_out(Tensor self, SymInt split_size, int dim=0, *, Tensor(a!)[] out) -> ()" }, { "name": "aten::split_with_sizes(Tensor(a -> *) self, SymInt[] split_sizes, int dim=0) -> Tensor(a)[]", - "category": "Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "split_sizes", "type": "SymInt[]" }, - { "name": "dim", "type": "int64", "default": 0 } - ], - "outputs": [ - { "name": "outputs", "type": "Tensor[]" } - ] + "category": "Tensor" }, { - "name": "aten::split_with_sizes_copy(Tensor self, SymInt[] split_sizes, int dim=0) -> Tensor[]", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "split_sizes", "type": "SymInt[]" }, - { "name": "dim", "type": "int64", "default": 0 } - ], - "outputs": [ - { "type": "Tensor[]" } - ] + "name": "aten::split_with_sizes_copy(Tensor self, SymInt[] split_sizes, int dim=0) -> Tensor[]" }, { - "name": "aten::split_with_sizes_copy.out(Tensor self, SymInt[] split_sizes, int dim=0, *, Tensor(a!)[] out) -> ()", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "split_sizes", "type": "SymInt[]" }, - { "name": "dim", "type": "int64", "default": 0 } - ], - "outputs": [] + "name": "aten::split_with_sizes_copy.out(Tensor self, SymInt[] split_sizes, int dim=0, *, Tensor(a!)[] out) -> ()" }, { - "name": "aten::splitlines(str self, bool keepends=False) -> str[]", - "inputs": [ - { "name": "self", "type": "string" }, - { "name": "keepends", "type": "boolean", "default": false } - ], - "outputs": [ - { "type": "string[]" } - ] + "name": "aten::splitlines(str self, bool keepends=False) -> str[]" }, { - "name": "aten::sqrt(Tensor self) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::sqrt(Tensor self) -> Tensor" }, { - "name": "aten::sqrt.Scalar(Scalar a) -> Scalar", - "inputs": [ - { "name": "a", "type": "Scalar" } - ], - "outputs": [ - { "type": "Scalar" } - ] + "name": "aten::sqrt.Scalar(Scalar a) -> Scalar" }, { - "name": "aten::sqrt.complex(complex a) -> complex", - "inputs": [ - { "name": "a", "type": "complex" } - ], - "outputs": [ - { "type": "complex" } - ] + "name": "aten::sqrt.complex(complex a) -> complex" }, { - "name": "aten::sqrt.float(float a) -> float", - "inputs": [ - { "name": "a", "type": "float32" } - ], - "outputs": [ - { "type": "float32" } - ] + "name": "aten::sqrt.float(float a) -> float" }, { - "name": "aten::sqrt.int(int a) -> float", - "inputs": [ - { "name": "a", "type": "int64" } - ], - "outputs": [ - { "type": "float32" } - ] + "name": "aten::sqrt.int(int a) -> float" }, { - "name": "aten::sqrt.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::sqrt.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::sqrt_(Tensor(a!) self) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::sqrt_(Tensor(a!) self) -> Tensor(a!)" }, { - "name": "aten::square(Tensor self) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::square(Tensor self) -> Tensor" }, { - "name": "aten::square.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::square.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::square_(Tensor(a!) self) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::square_(Tensor(a!) self) -> Tensor(a!)" }, { "name": "aten::squeeze(Tensor(a) self) -> Tensor(a)", - "category": "Transform", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "category": "Transform" }, { "name": "aten::squeeze.dim(Tensor(a) self, int dim) -> Tensor(a)", - "category": "Transform", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "category": "Transform" }, { - "name": "aten::squeeze.dimname(Tensor(a) self, Dimname dim) -> Tensor(a)", - "category": "Transform", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "Dimname" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::squeeze.dimname(Tensor(a) self, str dim) -> Tensor(a)", + "category": "Transform" }, { "name": "aten::squeeze.dims(Tensor(a) self, int[] dim) -> Tensor(a)", - "category": "Transform", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64[]" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "category": "Transform" }, { "name": "aten::squeeze_(Tensor(a!) self) -> Tensor(a!)", - "category": "Transform", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "category": "Transform" }, { "name": "aten::squeeze_.dim(Tensor(a!) self, int dim) -> Tensor(a!)", - "category": "Transform", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "category": "Transform" }, { - "name": "aten::squeeze_.dimname(Tensor(a!) self, Dimname dim) -> Tensor(a!)", - "category": "Transform", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "Dimname" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::squeeze_.dimname(Tensor(a!) self, str dim) -> Tensor(a!)", + "category": "Transform" }, { - "name": "aten::squeeze_.dims(Tensor(a!) self, int[] dim) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64[]" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::squeeze_.dims(Tensor(a!) self, int[] dim) -> Tensor(a!)" }, { "name": "aten::stack(Tensor[] tensors, int dim=0) -> Tensor", - "category": "Tensor", - "inputs": [ - { "name": "tensors", "type": "Tensor[]" }, - { "name": "dim", "type": "int64", "default": 0 } - ], - "outputs": [ - { "type": "Tensor" } - ] + "category": "Tensor" + }, + { + "name": "aten::stack.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)" + }, + { + "name": "aten::std(Tensor self, bool unbiased=True) -> Tensor" + }, + { + "name": "aten::std.correction(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False) -> Tensor" + }, + { + "name": "aten::std.correction_names(Tensor self, str[1] dim, *, Scalar? correction=None, bool keepdim=False) -> Tensor" + }, + { + "name": "aten::std.correction_names_out(Tensor self, str[1] dim, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!)" + }, + { + "name": "aten::std.correction_out(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!)" + }, + { + "name": "aten::std.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> Tensor" }, { - "name": "aten::stack.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "tensors", "type": "Tensor[]" }, - { "name": "dim", "type": "int64", "default": 0 } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::std.names_dim(Tensor self, str[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor" }, { - "name": "aten::std(Tensor self, bool unbiased=True) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "unbiased", "type": "boolean", "default": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::std.names_out(Tensor self, str[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::std.correction(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64[1]?", "default": null }, - { "name": "correction", "type": "Scalar?", "default": null, "kwarg_only": true }, - { "name": "keepdim", "type": "boolean", "default": false, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::std.out(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::std.correction_names(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "Dimname[1]" }, - { "name": "correction", "type": "Scalar?", "default": null, "kwarg_only": true }, - { "name": "keepdim", "type": "boolean", "default": false, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::std_mean(Tensor self, bool unbiased=True) -> (Tensor, Tensor)" }, { - "name": "aten::std.correction_names_out(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "Dimname[1]" }, - { "name": "correction", "type": "Scalar?", "default": null, "kwarg_only": true }, - { "name": "keepdim", "type": "boolean", "default": false, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::std_mean.correction(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False) -> (Tensor, Tensor)" }, { - "name": "aten::std.correction_out(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64[1]?", "default": null }, - { "name": "correction", "type": "Scalar?", "default": null, "kwarg_only": true }, - { "name": "keepdim", "type": "boolean", "default": false, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::std_mean.correction_names(Tensor self, str[1] dim, *, Scalar? correction=None, bool keepdim=False) -> (Tensor, Tensor)" }, { - "name": "aten::std.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64[1]?" }, - { "name": "unbiased", "type": "boolean", "default": true }, - { "name": "keepdim", "type": "boolean", "default": false } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::std_mean.correction_out(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))" }, { - "name": "aten::std.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "Dimname[1]" }, - { "name": "unbiased", "type": "boolean", "default": true }, - { "name": "keepdim", "type": "boolean", "default": false } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::std_mean.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor)" }, { - "name": "aten::std.names_out(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "Dimname[1]" }, - { "name": "unbiased", "type": "boolean", "default": true }, - { "name": "keepdim", "type": "boolean", "default": false } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::std_mean.names_dim(Tensor self, str[1] dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor)" }, { - "name": "aten::std.out(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64[1]?" }, - { "name": "unbiased", "type": "boolean", "default": true }, - { "name": "keepdim", "type": "boolean", "default": false } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::stft(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool normalized=False, bool? onesided=None, bool? return_complex=None) -> Tensor" }, { - "name": "aten::std_mean(Tensor self, bool unbiased=True) -> (Tensor, Tensor)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "unbiased", "type": "boolean", "default": true } - ], - "outputs": [ - { "name": "result1", "type": "Tensor" }, - { "name": "result2", "type": "Tensor" } - ] + "name": "aten::stft.center(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool center=True, str pad_mode=\"reflect\", bool normalized=False, bool? onesided=None, bool? return_complex=None) -> Tensor" }, { - "name": "aten::std_mean.correction(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False) -> (Tensor, Tensor)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64[1]?", "default": null }, - { "name": "correction", "type": "Scalar?", "default": null, "kwarg_only": true }, - { "name": "keepdim", "type": "boolean", "default": false, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" }, - { "type": "Tensor" } - ] + "name": "aten::str(t elem) -> str" }, { - "name": "aten::std_mean.correction_names(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False) -> (Tensor, Tensor)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "Dimname[1]" }, - { "name": "correction", "type": "Scalar?", "default": null, "kwarg_only": true }, - { "name": "keepdim", "type": "boolean", "default": false, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" }, - { "type": "Tensor" } - ] + "name": "aten::stride(Tensor self) -> int[]" }, { - "name": "aten::std_mean.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64[1]?" }, - { "name": "unbiased", "type": "boolean", "default": true }, - { "name": "keepdim", "type": "boolean", "default": false } - ], - "outputs": [ - { "name": "result1", "type": "Tensor" }, - { "name": "result2", "type": "Tensor" } - ] + "name": "aten::stride.Dimname(Tensor self, str dim) -> int" }, { - "name": "aten::std_mean.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "Dimname[1]" }, - { "name": "unbiased", "type": "boolean", "default": true }, - { "name": "keepdim", "type": "boolean", "default": false } - ], - "outputs": [ - { "type": "Tensor" }, - { "type": "Tensor" } - ] + "name": "aten::stride.int(Tensor self, int dim) -> int" }, { - "name": "aten::stft(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool normalized=False, bool? onesided=None, bool? return_complex=None) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "n_fft", "type": "int64" }, - { "name": "hop_length", "type": "int64?", "default": null }, - { "name": "win_length", "type": "int64?", "default": null }, - { "name": "window", "type": "Tensor?", "default": null }, - { "name": "normalized", "type": "boolean", "default": false }, - { "name": "onesided", "type": "boolean?", "default": null }, - { "name": "return_complex", "type": "boolean?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::strip(str self, str chars=\" \\n\\t\\f\\v\") -> str" }, { - "name": "aten::stft.center(Tensor self, int n_fft, int? hop_length=None, int? win_length=None, Tensor? window=None, bool center=True, str pad_mode=\"reflect\", bool normalized=False, bool? onesided=None, bool? return_complex=None) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "n_fft", "type": "int64" }, - { "name": "hop_length", "type": "int64?", "default": null }, - { "name": "win_length", "type": "int64?", "default": null }, - { "name": "window", "type": "Tensor?", "default": null }, - { "name": "center", "type": "boolean", "default": true }, - { "name": "pad_mode", "type": "string", "default": "reflect" }, - { "name": "normalized", "type": "boolean", "default": false }, - { "name": "onesided", "type": "boolean?", "default": null }, - { "name": "return_complex", "type": "boolean?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::sub(Scalar a, Scalar b) -> Scalar" }, { - "name": "aten::str(t elem) -> str", - "inputs": [ - { "name": "elem", "type": "t" } - ], - "outputs": [ - { "type": "string" } - ] + "name": "aten::sub.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor" }, { - "name": "aten::stride(Tensor self) -> int[]", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "int64[]" } - ] + "name": "aten::sub.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::stride.Dimname(Tensor self, Dimname dim) -> int", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "Dimname" } - ], - "outputs": [ - { "type": "int64" } - ] + "name": "aten::sub.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor" }, { - "name": "aten::stride.int(Tensor self, int dim) -> int", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64" } - ], - "outputs": [ - { "type": "int64" } - ] + "name": "aten::sub.complex(complex a, complex b) -> complex" }, { - "name": "aten::strip(str self, str chars=' \\\\n\\\\t\\\\f\\\\v') -> str", - "inputs": [ - { "name": "self", "type": "string" }, - { "name": "chars", "type": "string", "default": " \\\\n\\\\t\\\\f\\\\v" } - ], - "outputs": [ - { "type": "string" } - ] + "name": "aten::sub.complex_float(complex a, float b) -> complex" }, { - "name": "aten::sub(Scalar a, Scalar b) -> Scalar", - "inputs": [ - { "name": "a", "type": "Scalar" }, - { "name": "b", "type": "Scalar" } - ], - "outputs": [ - { "type": "Scalar" } - ] + "name": "aten::sub.complex_int(complex a, int b) -> complex" }, { - "name": "aten::sub.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Scalar" }, - { "name": "alpha", "type": "Scalar", "default": 1 } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::sub.float(float a, float b) -> float" }, { - "name": "aten::sub.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Scalar" }, - { "name": "alpha", "type": "Scalar", "default": 1 } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::sub.float_complex(float a, complex b) -> complex" }, { - "name": "aten::sub.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" }, - { "name": "alpha", "type": "Scalar", "default": 1, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::sub.float_int(float a, int b) -> float" }, { - "name": "aten::sub.complex(complex a, complex b) -> complex", - "inputs": [ - { "name": "a", "type": "complex" }, - { "name": "b", "type": "complex" } - ], - "outputs": [ - { "type": "complex" } - ] + "name": "aten::sub.int(int a, int b) -> int" }, { - "name": "aten::sub.complex_float(complex a, float b) -> complex", - "inputs": [ - { "name": "a", "type": "complex" }, - { "name": "b", "type": "float32" } - ], - "outputs": [ - { "type": "complex" } - ] + "name": "aten::sub.int_complex(int a, complex b) -> complex" }, { - "name": "aten::sub.complex_int(complex a, int b) -> complex", - "inputs": [ - { "name": "a", "type": "complex" }, - { "name": "b", "type": "int64" } - ], - "outputs": [ - { "type": "complex" } - ] + "name": "aten::sub.int_float(int a, float b) -> float" }, { - "name": "aten::sub.float(float a, float b) -> float", - "inputs": [ - { "name": "a", "type": "float32" }, - { "name": "b", "type": "float32" } - ], - "outputs": [ - { "type": "float32" } - ] + "name": "aten::sub.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::sub.float_complex(float a, complex b) -> complex", - "inputs": [ - { "name": "a", "type": "float32" }, - { "name": "b", "type": "complex" } - ], - "outputs": [ - { "type": "complex" } - ] + "name": "aten::sub_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)" }, { - "name": "aten::sub.float_int(float a, int b) -> float", - "inputs": [ - { "name": "a", "type": "float32" }, - { "name": "b", "type": "int64" } - ], - "outputs": [ - { "type": "float32" } - ] + "name": "aten::sub_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)" }, { - "name": "aten::sub.int(int a, int b) -> int", - "inputs": [ - { "name": "a", "type": "int64" }, - { "name": "b", "type": "int64" } - ], - "outputs": [ - { "type": "int64" } - ] + "name": "aten::sum(Tensor self, *, ScalarType? dtype=None) -> Tensor" }, { - "name": "aten::sub.int_complex(int a, complex b) -> complex", - "inputs": [ - { "name": "a", "type": "int64" }, - { "name": "b", "type": "complex" } - ], - "outputs": [ - { "type": "complex" } - ] + "name": "aten::sum.DimnameList_out(Tensor self, str[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::sub.int_float(int a, float b) -> float", - "inputs": [ - { "name": "a", "type": "int64" }, - { "name": "b", "type": "float32" } - ], - "outputs": [ - { "type": "float32" } - ] + "name": "aten::sum.IntList_out(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::sub.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" }, - { "name": "alpha", "type": "Scalar", "default": 1, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::sum.bool(bool[] self) -> int" }, { - "name": "aten::sub_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Scalar" }, - { "name": "alpha", "type": "Scalar", "default": 1 } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::sum.complex(complex[] self) -> complex" }, { - "name": "aten::sub_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" }, - { "name": "alpha", "type": "Scalar", "default": 1, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::sum.dim_DimnameList(Tensor self, str[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor" }, { - "name": "aten::sum(Tensor self, *, ScalarType? dtype=None) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dtype", "type": "ScalarType?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::sum.dim_IntList(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor" }, { - "name": "aten::sum.DimnameList_out(Tensor self, str[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "string[1]" }, - { "name": "keepdim", "type": "boolean", "default": false }, - { "name": "dtype", "type": "ScalarType?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::sum.float(float[] self) -> float" }, { - "name": "aten::sum.IntList_out(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64[1]?" }, - { "name": "keepdim", "type": "boolean", "default": false }, - { "name": "dtype", "type": "ScalarType?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::sum.int(int[] self) -> int" }, { - "name": "aten::sum.bool(bool[] self) -> int", - "inputs": [ - { "name": "self", "type": "boolean[]" } - ], - "outputs": [ - { "type": "int64" } - ] + "name": "aten::sum.out(Tensor self, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::sum.complex(complex[] self) -> complex", - "inputs": [ - { "name": "self", "type": "complex[]" } - ], - "outputs": [ - { "type": "complex" } - ] + "name": "aten::swapaxes(Tensor(a) self, int axis0, int axis1) -> Tensor(a)" }, { - "name": "aten::sum.dim_DimnameList(Tensor self, str[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "string[1]" }, - { "name": "keepdim", "type": "boolean", "default": false }, - { "name": "dtype", "type": "ScalarType?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::swapaxes_(Tensor(a!) self, int axis0, int axis1) -> Tensor(a!)" }, { - "name": "aten::sum.dim_IntList(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64[1]?" }, - { "name": "keepdim", "type": "boolean", "default": false }, - { "name": "dtype", "type": "ScalarType?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::sym_size(Tensor self) -> SymInt[]" }, { - "name": "aten::sum.float(float[] self) -> float", - "inputs": [ - { "name": "self", "type": "float32[]" } - ], - "outputs": [ - { "type": "float32" } - ] + "name": "aten::sym_size.int(Tensor self, int dim) -> SymInt" }, { - "name": "aten::sum.int(int[] self) -> int", - "inputs": [ - { "name": "self", "type": "int64[]" } - ], - "outputs": [ - { "type": "int64" } - ] + "name": "aten::t(Tensor(a) self) -> Tensor(a)" }, { - "name": "aten::sum.out(Tensor self, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dtype", "type": "ScalarType?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::take(Tensor self, Tensor index) -> Tensor", + "category": "Activation" }, { - "name": "aten::swapaxes(Tensor(a) self, int axis0, int axis1) -> Tensor(a)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "axis0", "type": "int64" }, - { "name": "axis1", "type": "int64" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::take.out(Tensor self, Tensor index, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::swapaxes_(Tensor(a!) self, int axis0, int axis1) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "axis0", "type": "int64" }, - { "name": "axis1", "type": "int64" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::take_along_dim(Tensor self, Tensor indices, int? dim=None) -> Tensor" }, { - "name": "aten::sym_size(Tensor self) -> SymInt[]", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "SymInt[]" } - ] + "name": "aten::take_along_dim.out(Tensor self, Tensor indices, int? dim=None, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::sym_size.int(Tensor self, int dim) -> SymInt", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64" } - ], - "outputs": [ - { "type": "SymInt" } - ] + "name": "aten::tan(Tensor self) -> Tensor" }, { - "name": "aten::t(Tensor(a) self) -> Tensor(a)", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::tan.Scalar(Scalar a) -> Scalar" }, { - "name": "aten::take(Tensor self, Tensor index) -> Tensor", - "category": "Activation", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "index", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::tan.complex(complex a) -> complex" }, { - "name": "aten::take.out(Tensor self, Tensor index, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "index", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::tan.float(float a) -> float" }, { - "name": "aten::take_along_dim(Tensor self, Tensor indices, int? dim=None) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "indices", "type": "Tensor" }, - { "name": "dim", "type": "int64?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::tan.int(int a) -> float" }, { - "name": "aten::take_along_dim.out(Tensor self, Tensor indices, int? dim=None, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "indices", "type": "Tensor" }, - { "name": "dim", "type": "int64?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::tan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::tan(Tensor self) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::tan_(Tensor(a!) self) -> Tensor(a!)" }, { - "name": "aten::tan.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::tanh(Tensor self) -> Tensor" }, { - "name": "aten::tan_(Tensor(a!) self) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::tanh.Scalar(Scalar a) -> Scalar" }, { - "name": "aten::tanh(Tensor self) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::tanh.complex(complex a) -> complex" }, { - "name": "aten::tanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::tanh.float(float a) -> float" + }, + { + "name": "aten::tanh.int(int a) -> float" + }, + { + "name": "aten::tanh.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::tanh_(Tensor(a!) self) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::tanh_(Tensor(a!) self) -> Tensor(a!)" }, { - "name": "aten::tensor(t[] data, *, ScalarType? dtype=None, Device? device=None, bool requires_grad=False) -> Tensor", - "inputs": [ - { "name": "data", "type": "t[]" }, - { "name": "dtype", "type": "ScalarType?", "default": null, "kwarg_only": true }, - { "name": "device", "type": "Device?", "default": null, "kwarg_only": true }, - { "name": "requires_grad", "type": "boolean", "default": false, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::tensor(t[] data, *, ScalarType? dtype=None, Device? device=None, bool requires_grad=False) -> Tensor" }, { - "name": "aten::tensor.bool(bool t, *, ScalarType? dtype=None, Device? device=None, bool requires_grad=False) -> Tensor", - "inputs": [ - { "name": "t", "type": "boolean" }, - { "name": "dtype", "type": "ScalarType?", "default": null, "kwarg_only": true }, - { "name": "device", "type": "Device?", "default": null, "kwarg_only": true }, - { "name": "requires_grad", "type": "boolean", "default": false, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::tensor.bool(bool t, *, ScalarType? dtype=None, Device? device=None, bool requires_grad=False) -> Tensor" }, { - "name": "aten::tensor.complex(complex t, *, ScalarType? dtype=None, Device? device=None, bool requires_grad=False) -> Tensor", - "inputs": [ - { "name": "t", "type": "complex" }, - { "name": "dtype", "type": "ScalarType?", "default": null, "kwarg_only": true }, - { "name": "device", "type": "Device?", "default": null, "kwarg_only": true }, - { "name": "requires_grad", "type": "boolean", "default": false, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::tensor.complex(complex t, *, ScalarType? dtype=None, Device? device=None, bool requires_grad=False) -> Tensor" }, { - "name": "aten::tensor.float(float t, *, ScalarType? dtype=None, Device? device=None, bool requires_grad=False) -> Tensor", - "inputs": [ - { "name": "t", "type": "float32" }, - { "name": "dtype", "type": "ScalarType?", "default": null, "kwarg_only": true }, - { "name": "device", "type": "Device?", "default": null, "kwarg_only": true }, - { "name": "requires_grad", "type": "boolean", "default": false, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::tensor.float(float t, *, ScalarType? dtype=None, Device? device=None, bool requires_grad=False) -> Tensor" }, { - "name": "aten::tensor.int(int t, *, ScalarType? dtype=None, Device? device=None, bool requires_grad=False) -> Tensor", - "inputs": [ - { "name": "t", "type": "int64" }, - { "name": "dtype", "type": "ScalarType?", "default": null, "kwarg_only": true }, - { "name": "device", "type": "Device?", "default": null, "kwarg_only": true }, - { "name": "requires_grad", "type": "boolean", "default": false, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::tensor.int(int t, *, ScalarType? dtype=None, Device? device=None, bool requires_grad=False) -> Tensor" }, { - "name": "aten::tensor_split.indices(Tensor(a -> *) self, SymInt[] indices, int dim=0) -> Tensor(a)[]", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "indices", "type": "SymInt[]" }, - { "name": "dim", "type": "int64", "default": 0 } - ], - "outputs": [ - { "type": "Tensor[]" } - ] + "name": "aten::tensor_split.indices(Tensor(a -> *) self, SymInt[] indices, int dim=0) -> Tensor(a)[]" }, { - "name": "aten::tensor_split.sections(Tensor(a -> *) self, SymInt sections, int dim=0) -> Tensor(a)[]", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "sections", "type": "SymInt" }, - { "name": "dim", "type": "int64", "default": 0 } - ], - "outputs": [ - { "type": "Tensor[]" } - ] + "name": "aten::tensor_split.sections(Tensor(a -> *) self, SymInt sections, int dim=0) -> Tensor(a)[]" }, { - "name": "aten::tensor_split.tensor_indices_or_sections(Tensor(a -> *) self, Tensor tensor_indices_or_sections, int dim=0) -> Tensor(a)[]", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "tensor_indices_or_sections", "type": "Tensor" }, - { "name": "dim", "type": "int64", "default": 0 } - ], - "outputs": [ - { "type": "Tensor[]" } - ] + "name": "aten::tensor_split.tensor_indices_or_sections(Tensor(a -> *) self, Tensor tensor_indices_or_sections, int dim=0) -> Tensor(a)[]" }, { - "name": "aten::tensordot(Tensor self, Tensor other, int[] dims_self, int[] dims_other) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" }, - { "name": "dims_self", "type": "int64[]" }, - { "name": "dims_other", "type": "int64[]" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::tensordot(Tensor self, Tensor other, int[] dims_self, int[] dims_other) -> Tensor" }, { - "name": "aten::tensordot.out(Tensor self, Tensor other, int[] dims_self, int[] dims_other, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" }, - { "name": "dims_self", "type": "int64[]" }, - { "name": "dims_other", "type": "int64[]" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::tensordot.out(Tensor self, Tensor other, int[] dims_self, int[] dims_other, *, Tensor(a!) out) -> Tensor(a!)" }, { "name": "aten::threshold(Tensor self, Scalar threshold, Scalar value) -> Tensor", - "category": "Activation", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "threshold", "type": "Scalar" }, - { "name": "value", "type": "Scalar" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "category": "Activation" }, { - "name": "aten::threshold.out(Tensor self, Scalar threshold, Scalar value, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "threshold", "type": "Scalar" }, - { "name": "value", "type": "Scalar" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::threshold.out(Tensor self, Scalar threshold, Scalar value, *, Tensor(a!) out) -> Tensor(a!)" }, { "name": "aten::threshold_(Tensor(a!) self, Scalar threshold, Scalar value) -> Tensor(a!)", - "category": "Activation", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "threshold", "type": "Scalar" }, - { "name": "value", "type": "Scalar" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "category": "Activation" }, { - "name": "aten::tile(Tensor self, SymInt[] dims) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dims", "type": "SymInt[]" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::tile(Tensor self, SymInt[] dims) -> Tensor" }, { - "name": "aten::to.device(Tensor(a) self, Device device, ScalarType dtype, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "device", "type": "Device" }, - { "name": "dtype", "type": "ScalarType" }, - { "name": "non_blocking", "type": "boolean", "default": false }, - { "name": "copy", "type": "boolean", "default": false }, - { "name": "memory_format", "type": "MemoryFormat?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::to.device(Tensor(a) self, Device device, ScalarType dtype, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a)" }, { - "name": "aten::to.dtype(Tensor(a) self, ScalarType dtype, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dtype", "type": "ScalarType" }, - { "name": "non_blocking", "type": "boolean", "default": false }, - { "name": "copy", "type": "boolean", "default": false }, - { "name": "memory_format", "type": "MemoryFormat?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::to.dtype(Tensor(a) self, ScalarType dtype, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a)" }, { - "name": "aten::to.dtype_layout(Tensor(a) self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dtype", "type": "ScalarType?", "default": null, "kwarg_only": true }, - { "name": "layout", "type": "Layout?", "default": null, "kwarg_only": true }, - { "name": "device", "type": "Device?", "default": null, "kwarg_only": true }, - { "name": "pin_memory", "type": "boolean?", "default": null, "kwarg_only": true }, - { "name": "non_blocking", "type": "boolean", "default": false, "kwarg_only": true }, - { "name": "copy", "type": "boolean", "default": false, "kwarg_only": true }, - { "name": "memory_format", "type": "MemoryFormat?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::to.dtype_layout(Tensor(a) self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a)" }, { - "name": "aten::to.other(Tensor(a) self, Tensor other, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" }, - { "name": "non_blocking", "type": "boolean", "default": false }, - { "name": "copy", "type": "boolean", "default": false }, - { "name": "memory_format", "type": "MemoryFormat?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::to.other(Tensor(a) self, Tensor other, bool non_blocking=False, bool copy=False, MemoryFormat? memory_format=None) -> Tensor(a)" }, { - "name": "aten::to.prim_Device(Tensor(a) self, Device? device, int? dtype=None, bool non_blocking=False, bool copy=False) -> Tensor(a|b)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "device", "type": "Device?" }, - { "name": "dtype", "type": "int64?", "default": null }, - { "name": "non_blocking", "type": "boolean", "default": false }, - { "name": "copy", "type": "boolean", "default": false } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::to.prim_Device(Tensor(a) self, Device? device, int? dtype=None, bool non_blocking=False, bool copy=False) -> Tensor(a|b)" }, { - "name": "aten::to.prim_dtype(Tensor(a) self, int? dtype=None, bool non_blocking=False, bool copy=False) -> Tensor(a|b)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dtype", "type": "int64?", "default": null }, - { "name": "non_blocking", "type": "boolean", "default": false }, - { "name": "copy", "type": "boolean", "default": false } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::to.prim_dtype(Tensor(a) self, int? dtype=None, bool non_blocking=False, bool copy=False) -> Tensor(a|b)" }, { - "name": "aten::to.prim_other(Tensor(a) self, bool non_blocking=False, bool copy=False) -> Tensor(a|b)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "non_blocking", "type": "boolean", "default": false }, - { "name": "copy", "type": "boolean", "default": false } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::to.prim_other(Tensor(a) self, bool non_blocking=False, bool copy=False) -> Tensor(a|b)" }, { - "name": "aten::to_dense(Tensor self, ScalarType? dtype=None, *, bool? masked_grad=None) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dtype", "type": "ScalarType?", "default": null }, - { "name": "masked_grad", "type": "boolean?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::to_dense(Tensor self, ScalarType? dtype=None, *, bool? masked_grad=None) -> Tensor" }, { - "name": "aten::to_dense_backward(Tensor grad, Tensor input, bool? masked_grad=None) -> Tensor", - "inputs": [ - { "name": "grad", "type": "Tensor" }, - { "name": "input", "type": "Tensor" }, - { "name": "masked_grad", "type": "boolean?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::to_dense_backward(Tensor grad, Tensor input, bool? masked_grad=None) -> Tensor" }, { - "name": "aten::to_mkldnn(Tensor self, ScalarType? dtype=None) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dtype", "type": "ScalarType?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::to_mkldnn(Tensor self, ScalarType? dtype=None) -> Tensor" }, { - "name": "aten::to_mkldnn_backward(Tensor grad, Tensor input) -> Tensor", - "inputs": [ - { "name": "grad", "type": "Tensor" }, - { "name": "input", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::to_mkldnn.out(Tensor self, ScalarType? dtype=None, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::to_padded_tensor(Tensor self, float padding, SymInt[]? output_size=None) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "padding", "type": "float32" }, - { "name": "output_size", "type": "SymInt[]?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::to_mkldnn_backward(Tensor grad, Tensor input) -> Tensor" }, { - "name": "aten::to_sparse(Tensor self, *, Layout? layout=None, int[2]? blocksize=None, int? dense_dim=None) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "layout", "type": "Layout?", "default": null, "kwarg_only": true }, - { "name": "blocksize", "type": "int64[2]?", "default": null, "kwarg_only": true }, - { "name": "dense_dim", "type": "int64?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::to_padded_tensor(Tensor self, float padding, SymInt[]? output_size=None) -> Tensor" }, { - "name": "aten::to_sparse.sparse_dim(Tensor self, int sparse_dim) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "sparse_dim", "type": "int64" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::to_padded_tensor.out(Tensor self, float padding, SymInt[]? output_size=None, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::to_sparse_bsc(Tensor self, int[2] blocksize, int? dense_dim=None) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "blocksize", "type": "int64[2]" }, - { "name": "dense_dim", "type": "int64?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::to_sparse(Tensor self, *, Layout? layout=None, int[2]? blocksize=None, int? dense_dim=None) -> Tensor" }, { - "name": "aten::to_sparse_bsr(Tensor self, int[2] blocksize, int? dense_dim=None) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "blocksize", "type": "int64[2]" }, - { "name": "dense_dim", "type": "int64?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::to_sparse.sparse_dim(Tensor self, int sparse_dim) -> Tensor" }, { - "name": "aten::to_sparse_csc(Tensor self, int? dense_dim=None) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dense_dim", "type": "int64?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::to_sparse_bsc(Tensor self, int[2] blocksize, int? dense_dim=None) -> Tensor" }, { - "name": "aten::to_sparse_csr(Tensor self, int? dense_dim=None) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dense_dim", "type": "int64?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::to_sparse_bsr(Tensor self, int[2] blocksize, int? dense_dim=None) -> Tensor" }, { - "name": "aten::topk(Tensor self, SymInt k, int dim=-1, bool largest=True, bool sorted=True) -> (Tensor values, Tensor indices)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "k", "type": "SymInt" }, - { "name": "dim", "type": "int64", "default": -1 }, - { "name": "largest", "type": "boolean", "default": true }, - { "name": "sorted", "type": "boolean", "default": true } - ], - "outputs": [ - { "name": "values", "type": "Tensor" }, - { "name": "indices", "type": "Tensor" } - ] + "name": "aten::to_sparse_csc(Tensor self, int? dense_dim=None) -> Tensor" }, { - "name": "aten::topk.values(Tensor self, SymInt k, int dim=-1, bool largest=True, bool sorted=True, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "k", "type": "SymInt" }, - { "name": "dim", "type": "int64", "default": -1 }, - { "name": "largest", "type": "boolean", "default": true }, - { "name": "sorted", "type": "boolean", "default": true } - ], - "outputs": [ - { "name": "values", "type": "Tensor" }, - { "name": "indices", "type": "Tensor" } - ] + "name": "aten::to_sparse_csr(Tensor self, int? dense_dim=None) -> Tensor" }, { - "name": "aten::transpose.Dimname(Tensor(a) self, Dimname dim0, Dimname dim1) -> Tensor(a)", - "category": "Transform", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim0", "type": "Dimname" }, - { "name": "dim1", "type": "Dimname" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::topk(Tensor self, SymInt k, int dim=-1, bool largest=True, bool sorted=True) -> (Tensor values, Tensor indices)" + }, + { + "name": "aten::topk.values(Tensor self, SymInt k, int dim=-1, bool largest=True, bool sorted=True, *, Tensor(a!) values, Tensor(b!) indices) -> (Tensor(a!) values, Tensor(b!) indices)" + }, + { + "name": "aten::transpose.Dimname(Tensor(a) self, str dim0, str dim1) -> Tensor(a)", + "category": "Transform" }, { "name": "aten::transpose.int(Tensor(a) self, int dim0, int dim1) -> Tensor(a)", - "category": "Transform", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim0", "type": "int64" }, - { "name": "dim1", "type": "int64" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "category": "Transform" }, { "name": "aten::transpose_(Tensor(a!) self, int dim0, int dim1) -> Tensor(a!)", - "category": "Transform", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim0", "type": "int64" }, - { "name": "dim1", "type": "int64" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "category": "Transform" }, { - "name": "aten::transpose_copy.int(Tensor self, int dim0, int dim1) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim0", "type": "int64" }, - { "name": "dim1", "type": "int64" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::transpose_copy.int(Tensor self, int dim0, int dim1) -> Tensor" + }, + { + "name": "aten::transpose_copy.int_out(Tensor self, int dim0, int dim1, *, Tensor(a!) out) -> Tensor(a!)" }, { "name": "aten::tril(Tensor self, int diagonal=0) -> Tensor", - "category": "Layer", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "diagonal", "type": "int64", "default": 0 } - ], - "outputs": [ - { "type": "Tensor" } - ] + "category": "Layer" }, { - "name": "aten::tril.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "diagonal", "type": "int64", "default": 0 } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::tril.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::tril_(Tensor(a!) self, int diagonal=0) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "diagonal", "type": "int64", "default": 0 } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::tril_(Tensor(a!) self, int diagonal=0) -> Tensor(a!)" }, { - "name": "aten::tril_indices(int row, int col, int offset=0, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", - "category": "Layer", - "inputs": [ - { "name": "row", "type": "int64" }, - { "name": "col", "type": "int64" }, - { "name": "offset", "type": "int64", "default": 0 }, - { "name": "dtype", "type": "ScalarType?", "default": "long", "kwarg_only": true }, - { "name": "layout", "type": "Layout?", "default": null, "kwarg_only": true }, - { "name": "device", "type": "Device?", "default": null, "kwarg_only": true }, - { "name": "pin_memory", "type": "boolean?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::tril_indices(int row, int col, int offset=0, *, ScalarType? dtype=4, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", + "category": "Layer" }, { - "name": "aten::triu(Tensor self, int diagonal=0) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "diagonal", "type": "int64", "default": 0 } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::tril_indices.out(int row, int col, int offset=0, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::triu.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "diagonal", "type": "int64", "default": 0 } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::triu(Tensor self, int diagonal=0) -> Tensor" }, { - "name": "aten::triu_(Tensor(a!) self, int diagonal=0) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "diagonal", "type": "int64", "default": 0 } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::triu.out(Tensor self, int diagonal=0, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::triu_indices(int row, int col, int offset=0, *, ScalarType? dtype=long, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", - "inputs": [ - { "name": "row", "type": "int64" }, - { "name": "col", "type": "int64" }, - { "name": "offset", "type": "int64", "default": 0 }, - { "name": "dtype", "type": "ScalarType?", "default": "long", "kwarg_only": true }, - { "name": "layout", "type": "Layout?", "default": null, "kwarg_only": true }, - { "name": "device", "type": "Device?", "default": null, "kwarg_only": true }, - { "name": "pin_memory", "type": "boolean?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::triu_(Tensor(a!) self, int diagonal=0) -> Tensor(a!)" }, { - "name": "aten::true_divide.Scalar(Tensor self, Scalar other) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Scalar" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::triu_indices(int row, int col, int offset=0, *, ScalarType? dtype=4, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor" }, { - "name": "aten::true_divide.Tensor(Tensor self, Tensor other) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::triu_indices.out(int row, int col, int offset=0, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::true_divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::true_divide.Scalar(Tensor self, Scalar other) -> Tensor" }, { - "name": "aten::true_divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Scalar" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::true_divide.Tensor(Tensor self, Tensor other) -> Tensor" }, { - "name": "aten::true_divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::true_divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::type_as(Tensor self, Tensor other) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::true_divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)" }, { - "name": "aten::unbind.Dimname(Tensor(a -> *) self, Dimname dim) -> Tensor(a)[]", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "Dimname" } - ], - "outputs": [ - { "type": "Tensor[]" } - ] + "name": "aten::true_divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)" }, { - "name": "aten::unbind.int(Tensor(a -> *) self, int dim=0) -> Tensor(a)[]", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64", "default": 0 } - ], - "outputs": [ - { "type": "Tensor[]" } - ] + "name": "aten::type_as(Tensor self, Tensor other) -> Tensor" }, { - "name": "aten::unflatten.Dimname(Tensor(a) self, Dimname dim, SymInt[] sizes, Dimname[] names) -> Tensor(a)", - "category": "Shape", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "Dimname" }, - { "name": "sizes", "type": "SymInt[]" }, - { "name": "names", "type": "Dimname[]", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::unbind.Dimname(Tensor(a -> *) self, str dim) -> Tensor(a)[]" + }, + { + "name": "aten::unbind.int(Tensor(a -> *) self, int dim=0) -> Tensor(a)[]" + }, + { + "name": "aten::unflatten.Dimname(Tensor(a) self, str dim, SymInt[] sizes, str[] names) -> Tensor(a)", + "category": "Shape" + }, + { + "name": "aten::unflatten.int(Tensor(a) self, int dim, SymInt[] sizes) -> Tensor(a)", + "category": "Shape" + }, + { + "name": "aten::unfold(Tensor(a) self, int dimension, int size, int step) -> Tensor(a)" }, { - "name": "aten::unflatten.int(Tensor(a) self, int dim, SymInt[] sizes) -> Tensor(a)", - "category": "Shape", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64" }, - { "name": "sizes", "type": "SymInt[]" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::uniform_(Tensor(a!) self, float from=0., float to=1., *, Generator? generator=None) -> Tensor(a!)" }, { - "name": "aten::unfold(Tensor(a) self, int dimension, int size, int step) -> Tensor(a)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dimension", "type": "int64" }, - { "name": "size", "type": "int64" }, - { "name": "step", "type": "int64" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::unique_consecutive(Tensor self, bool return_inverse=False, bool return_counts=False, int? dim=None) -> (Tensor, Tensor, Tensor)", + "category": "Layer" }, { - "name": "aten::uniform_(Tensor(a!) self, float from=0, float to=1, *, Generator? generator=None) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "from", "type": "float32", "default": 0 }, - { "name": "to", "type": "float32", "default": 1 }, - { "name": "generator", "type": "Generator?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::unique_consecutive.out(Tensor self, bool return_inverse=False, bool return_counts=False, int? dim=None, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))" }, { - "name": "aten::unique_consecutive(Tensor self, bool return_inverse=False, bool return_counts=False, int? dim=None) -> (Tensor, Tensor, Tensor)", - "category": "Layer", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "return_inverse", "type": "boolean", "default": false }, - { "name": "return_counts", "type": "boolean", "default": false }, - { "name": "dim", "type": "int64?", "default": null } - ], - "outputs": [ - { "name": "output1", "type": "Tensor" }, - { "name": "output2", "type": "Tensor" }, - { "name": "output3", "type": "Tensor" } - ] + "name": "aten::unique_dim(Tensor self, int dim, bool sorted=True, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor)" }, { - "name": "aten::unique_dim(Tensor self, int dim, bool sorted=True, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64" }, - { "name": "sorted", "type": "boolean", "default": true }, - { "name": "return_inverse", "type": "boolean", "default": false }, - { "name": "return_counts", "type": "boolean", "default": false } - ], - "outputs": [ - { "type": "Tensor" }, - { "type": "Tensor" }, - { "type": "Tensor" } - ] + "name": "aten::unique_dim.out(Tensor self, int dim, bool sorted=True, bool return_inverse=False, bool return_counts=False, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))" }, { "name": "aten::unique_dim_consecutive(Tensor self, int dim, bool return_inverse=False, bool return_counts=False) -> (Tensor, Tensor, Tensor)", - "category": "Layer", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64" }, - { "name": "return_inverse", "type": "boolean", "default": false }, - { "name": "return_counts", "type": "boolean", "default": false } - ], - "outputs": [ - { "name": "output1", "type": "Tensor" }, - { "name": "output2", "type": "Tensor" }, - { "name": "output3", "type": "Tensor" } - ] + "category": "Layer" }, { - "name": "aten::unsafe_chunk(Tensor self, int chunks, int dim=0) -> Tensor[]", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "chunks", "type": "int64" }, - { "name": "dim", "type": "int64", "default": 0 } - ], - "outputs": [ - { "type": "Tensor[]" } - ] + "name": "aten::unique_dim_consecutive.out(Tensor self, int dim, bool return_inverse=False, bool return_counts=False, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))" + }, + { + "name": "aten::unsafe_chunk(Tensor self, int chunks, int dim=0) -> Tensor[]" }, { "name": "aten::unsafe_split.Tensor(Tensor self, SymInt split_size, int dim=0) -> Tensor[]", - "category": "Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "split_size", "type": "SymInt" }, - { "name": "dim", "type": "int64", "default": 0 } - ], - "outputs": [ - { "name": "outputs", "type": "Tensor[]" } - ] + "category": "Tensor" + }, + { + "name": "aten::unsafe_split.Tensor_out(Tensor self, SymInt split_size, int dim=0, *, Tensor(a!)[] out) -> ()" }, { "name": "aten::unsqueeze(Tensor(a) self, int dim) -> Tensor(a)", - "category": "Transform", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "category": "Transform" }, { "name": "aten::unsqueeze_(Tensor(a!) self, int dim) -> Tensor(a!)", - "category": "Transform", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "category": "Transform" }, { - "name": "aten::unsqueeze_copy(Tensor self, int dim) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::unsqueeze_copy(Tensor self, int dim) -> Tensor" + }, + { + "name": "aten::unsqueeze_copy.out(Tensor self, int dim, *, Tensor(a!) out) -> Tensor(a!)" }, { "name": "aten::upsample_bicubic2d(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor", - "category": "Layer", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "output_size", "type": "SymInt[2]" }, - { "name": "align_corners", "type": "boolean" }, - { "name": "scales_h", "type": "float32?", "default": null }, - { "name": "scales_w", "type": "float32?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "category": "Layer" }, { - "name": "aten::upsample_bicubic2d.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "output_size", "type": "SymInt[2]" }, - { "name": "align_corners", "type": "boolean" }, - { "name": "scales_h", "type": "float32?", "default": null }, - { "name": "scales_w", "type": "float32?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::upsample_bicubic2d.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)" }, { "name": "aten::upsample_bicubic2d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor", - "category": "Layer", - "inputs": [ - { "name": "input", "type": "Tensor" }, - { "name": "output_size", "type": "SymInt[]?" }, - { "name": "align_corners", "type": "boolean" }, - { "name": "scale_factors", "type": "float32[]?" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "category": "Layer" }, { "name": "aten::upsample_bilinear2d(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor", - "category": "Layer", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "output_size", "type": "SymInt[2]" }, - { "name": "align_corners", "type": "boolean" }, - { "name": "scales_h", "type": "float32?", "default": null }, - { "name": "scales_w", "type": "float32?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "category": "Layer" }, { - "name": "aten::upsample_bilinear2d.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "output_size", "type": "SymInt[2]" }, - { "name": "align_corners", "type": "boolean" }, - { "name": "scales_h", "type": "float32?", "default": null }, - { "name": "scales_w", "type": "float32?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::upsample_bilinear2d.out(Tensor self, SymInt[2] output_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)" }, { "name": "aten::upsample_bilinear2d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor", - "category": "Layer", - "inputs": [ - { "name": "input", "type": "Tensor" }, - { "name": "output_size", "type": "SymInt[]?" }, - { "name": "align_corners", "type": "boolean" }, - { "name": "scale_factors", "type": "float32[]?" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "category": "Layer" }, { - "name": "aten::upsample_bilinear2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor", - "inputs": [ - { "name": "grad_output", "type": "Tensor" }, - { "name": "output_size", "type": "SymInt[2]" }, - { "name": "input_size", "type": "SymInt[4]" }, - { "name": "align_corners", "type": "boolean" }, - { "name": "scales_h", "type": "float32?", "default": null }, - { "name": "scales_w", "type": "float32?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::upsample_bilinear2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None) -> Tensor" }, { - "name": "aten::upsample_bilinear2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)", - "inputs": [ - { "name": "grad_output", "type": "Tensor" }, - { "name": "output_size", "type": "SymInt[2]" }, - { "name": "input_size", "type": "SymInt[4]" }, - { "name": "align_corners", "type": "boolean" }, - { "name": "scales_h", "type": "float32?", "default": null }, - { "name": "scales_w", "type": "float32?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::upsample_bilinear2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, bool align_corners, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)" }, { - "name": "aten::upsample_linear1d(Tensor self, SymInt[1] output_size, bool align_corners, float? scales=None) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "output_size", "type": "SymInt[1]" }, - { "name": "align_corners", "type": "boolean" }, - { "name": "scales", "type": "float32?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::upsample_linear1d(Tensor self, SymInt[1] output_size, bool align_corners, float? scales=None) -> Tensor" }, { - "name": "aten::upsample_linear1d.out(Tensor self, SymInt[1] output_size, bool align_corners, float? scales=None, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "output_size", "type": "SymInt[1]" }, - { "name": "align_corners", "type": "boolean" }, - { "name": "scales", "type": "float32?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::upsample_linear1d.out(Tensor self, SymInt[1] output_size, bool align_corners, float? scales=None, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::upsample_linear1d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor", - "inputs": [ - { "name": "input", "type": "Tensor" }, - { "name": "output_size", "type": "SymInt[]?" }, - { "name": "align_corners", "type": "boolean" }, - { "name": "scale_factors", "type": "float32[]?" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::upsample_linear1d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor" }, { "name": "aten::upsample_nearest1d(Tensor self, SymInt[1] output_size, float? scales=None) -> Tensor", - "category": "Layer", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "output_size", "type": "SymInt[1]" }, - { "name": "scales", "type": "float32?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "category": "Layer" }, { - "name": "aten::upsample_nearest1d.out(Tensor self, SymInt[1] output_size, float? scales=None, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "output_size", "type": "SymInt[1]" }, - { "name": "scales", "type": "float32?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::upsample_nearest1d.out(Tensor self, SymInt[1] output_size, float? scales=None, *, Tensor(a!) out) -> Tensor(a!)" }, { "name": "aten::upsample_nearest1d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor", - "category": "Layer", - "inputs": [ - { "name": "input", "type": "Tensor" }, - { "name": "output_size", "type": "SymInt[]?" }, - { "name": "scale_factors", "type": "float32[]?" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "category": "Layer" }, { "name": "aten::upsample_nearest2d(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None) -> Tensor", - "category": "Layer", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "output_size", "type": "SymInt[2]" }, - { "name": "scales_h", "type": "float32?", "default": null }, - { "name": "scales_w", "type": "float32?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "category": "Layer" }, { - "name": "aten::upsample_nearest2d.out(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "output_size", "type": "SymInt[2]" }, - { "name": "scales_h", "type": "float32?", "default": null }, - { "name": "scales_w", "type": "float32?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::upsample_nearest2d.out(Tensor self, SymInt[2] output_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)" }, { "name": "aten::upsample_nearest2d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor", - "category": "Layer", - "inputs": [ - { "name": "input", "type": "Tensor" }, - { "name": "output_size", "type": "SymInt[]?" }, - { "name": "scale_factors", "type": "float32[]?" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "category": "Layer" }, { - "name": "aten::upsample_nearest2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None) -> Tensor", - "inputs": [ - { "name": "grad_output", "type": "Tensor" }, - { "name": "output_size", "type": "SymInt[2]" }, - { "name": "input_size", "type": "SymInt[4]" }, - { "name": "scales_h", "type": "float32?", "default": null }, - { "name": "scales_w", "type": "float32?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::upsample_nearest2d_backward(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None) -> Tensor" }, { - "name": "aten::upsample_nearest2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)", - "inputs": [ - { "name": "grad_output", "type": "Tensor" }, - { "name": "output_size", "type": "SymInt[2]" }, - { "name": "input_size", "type": "SymInt[4]" }, - { "name": "scales_h", "type": "float32?", "default": null }, - { "name": "scales_w", "type": "float32?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::upsample_nearest2d_backward.grad_input(Tensor grad_output, SymInt[2] output_size, SymInt[4] input_size, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)" }, { "name": "aten::upsample_nearest3d(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor", - "category": "Layer", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "output_size", "type": "SymInt[3]" }, - { "name": "scales_d", "type": "float32?", "default": null }, - { "name": "scales_h", "type": "float32?", "default": null }, - { "name": "scales_w", "type": "float32?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "category": "Layer" }, { - "name": "aten::upsample_nearest3d.out(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "output_size", "type": "SymInt[3]" }, - { "name": "scales_d", "type": "float32?", "default": null }, - { "name": "scales_h", "type": "float32?", "default": null }, - { "name": "scales_w", "type": "float32?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::upsample_nearest3d.out(Tensor self, SymInt[3] output_size, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)" }, { "name": "aten::upsample_nearest3d.vec(Tensor input, SymInt[]? output_size, float[]? scale_factors) -> Tensor", - "category": "Layer", - "inputs": [ - { "name": "input", "type": "Tensor" }, - { "name": "output_size", "type": "SymInt[]?" }, - { "name": "scale_factors", "type": "float32[]?" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "category": "Layer" }, { - "name": "aten::upsample_trilinear3d(Tensor self, SymInt[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "output_size", "type": "SymInt[3]" }, - { "name": "align_corners", "type": "boolean" }, - { "name": "scales_d", "type": "float32?", "default": null }, - { "name": "scales_h", "type": "float32?", "default": null }, - { "name": "scales_w", "type": "float32?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::upsample_trilinear3d(Tensor self, SymInt[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor" }, { - "name": "aten::upsample_trilinear3d.out(Tensor self, SymInt[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "output_size", "type": "SymInt[3]" }, - { "name": "align_corners", "type": "boolean" }, - { "name": "scales_d", "type": "float32?", "default": null }, - { "name": "scales_h", "type": "float32?", "default": null }, - { "name": "scales_w", "type": "float32?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::upsample_trilinear3d.out(Tensor self, SymInt[3] output_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::upsample_trilinear3d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor", - "inputs": [ - { "name": "input", "type": "Tensor" }, - { "name": "output_size", "type": "SymInt[]?" }, - { "name": "align_corners", "type": "boolean" }, - { "name": "scale_factors", "type": "float32[]?" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::upsample_trilinear3d.vec(Tensor input, SymInt[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor" }, { - "name": "aten::upsample_trilinear3d_backward(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor", - "inputs": [ - { "name": "grad_output", "type": "Tensor" }, - { "name": "output_size", "type": "SymInt[3]" }, - { "name": "input_size", "type": "SymInt[5]" }, - { "name": "align_corners", "type": "boolean" }, - { "name": "scales_d", "type": "float32?", "default": null }, - { "name": "scales_h", "type": "float32?", "default": null }, - { "name": "scales_w", "type": "float32?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::upsample_trilinear3d_backward(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None) -> Tensor" }, { - "name": "aten::upsample_trilinear3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)", - "inputs": [ - { "name": "grad_output", "type": "Tensor" }, - { "name": "output_size", "type": "SymInt[3]" }, - { "name": "input_size", "type": "SymInt[5]" }, - { "name": "align_corners", "type": "boolean" }, - { "name": "scales_d", "type": "float32?", "default": null }, - { "name": "scales_h", "type": "float32?", "default": null }, - { "name": "scales_w", "type": "float32?", "default": null } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::upsample_trilinear3d_backward.grad_input(Tensor grad_output, SymInt[3] output_size, SymInt[5] input_size, bool align_corners, float? scales_d=None, float? scales_h=None, float? scales_w=None, *, Tensor(a!) grad_input) -> Tensor(a!)" }, { - "name": "aten::values(Tensor(a) self) -> Tensor(a)", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::values(Tensor(a) self) -> Tensor(a)" }, { - "name": "aten::values.Tensor(Dict(Tensor, t) self) -> t[](*)", - "inputs": [ - { "name": "self", "type": "Dict(Tensor, t)" } - ], - "outputs": [ - { "type": "t[]" } - ] + "name": "aten::values.Tensor(Dict(Tensor, t) self) -> t[](*)" }, { - "name": "aten::values.bool(Dict(bool, t) self) -> t[](*)", - "inputs": [ - { "name": "self", "type": "Dict(boolean, t)" } - ], - "outputs": [ - { "type": "t[]" } - ] + "name": "aten::values.bool(Dict(bool, t) self) -> t[](*)" }, { - "name": "aten::values.complex(Dict(complex, t) self) -> t[](*)", - "inputs": [ - { "name": "self", "type": "Dict(complex, t)" } - ], - "outputs": [ - { "type": "t[]" } - ] + "name": "aten::values.complex(Dict(complex, t) self) -> t[](*)" }, { - "name": "aten::values.float(Dict(float, t) self) -> t[](*)", - "inputs": [ - { "name": "self", "type": "Dict(float32, t)" } - ], - "outputs": [ - { "type": "t[]" } - ] + "name": "aten::values.float(Dict(float, t) self) -> t[](*)" }, { - "name": "aten::values.int(Dict(int, t) self) -> t[](*)", - "inputs": [ - { "name": "self", "type": "Dict(int64, t)" } - ], - "outputs": [ - { "type": "t[]" } - ] + "name": "aten::values.int(Dict(int, t) self) -> t[](*)" }, { - "name": "aten::values.str(Dict(str, t) self) -> t[](*)", - "inputs": [ - { "name": "self", "type": "Dict(string, t)" } - ], - "outputs": [ - { "type": "t[]" } - ] + "name": "aten::values.str(Dict(str, t) self) -> t[](*)" }, { - "name": "aten::var(Tensor self, bool unbiased=True) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "unbiased", "type": "boolean", "default": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::var(Tensor self, bool unbiased=True) -> Tensor" }, { - "name": "aten::var.correction(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64[1]?", "default": null }, - { "name": "correction", "type": "Scalar?", "default": null, "kwarg_only": true }, - { "name": "keepdim", "type": "boolean", "default": false, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::var.correction(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False) -> Tensor" }, { - "name": "aten::var.correction_names(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "Dimname[1]" }, - { "name": "correction", "type": "Scalar?", "default": null, "kwarg_only": true }, - { "name": "keepdim", "type": "boolean", "default": false, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::var.correction_names(Tensor self, str[1] dim, *, Scalar? correction=None, bool keepdim=False) -> Tensor" }, { - "name": "aten::var.correction_names_out(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "Dimname[1]" }, - { "name": "correction", "type": "Scalar?", "default": null, "kwarg_only": true }, - { "name": "keepdim", "type": "boolean", "default": false, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::var.correction_names_out(Tensor self, str[1] dim, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::var.correction_out(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64[1]?", "default": null }, - { "name": "correction", "type": "Scalar?", "default": null, "kwarg_only": true }, - { "name": "keepdim", "type": "boolean", "default": false, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::var.correction_out(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::var.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64[1]?" }, - { "name": "unbiased", "type": "boolean", "default": true }, - { "name": "keepdim", "type": "boolean", "default": false } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::var.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> Tensor" }, { - "name": "aten::var.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "Dimname[1]" }, - { "name": "unbiased", "type": "boolean", "default": true }, - { "name": "keepdim", "type": "boolean", "default": false } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::var.names_dim(Tensor self, str[1] dim, bool unbiased=True, bool keepdim=False) -> Tensor" + }, + { + "name": "aten::var.names_out(Tensor self, str[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)" + }, + { + "name": "aten::var.out(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)" + }, + { + "name": "aten::var_mean(Tensor self, bool unbiased=True) -> (Tensor, Tensor)" + }, + { + "name": "aten::var_mean.correction(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False) -> (Tensor, Tensor)" + }, + { + "name": "aten::var_mean.correction_names(Tensor self, str[1] dim, *, Scalar? correction=None, bool keepdim=False) -> (Tensor, Tensor)" + }, + { + "name": "aten::var_mean.correction_out(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))" }, { - "name": "aten::var.names_out(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "Dimname[1]" }, - { "name": "unbiased", "type": "boolean", "default": true }, - { "name": "keepdim", "type": "boolean", "default": false } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::var_mean.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor)" }, { - "name": "aten::var.out(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64[1]?" }, - { "name": "unbiased", "type": "boolean", "default": true }, - { "name": "keepdim", "type": "boolean", "default": false } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::var_mean.names_dim(Tensor self, str[1] dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor)" }, { - "name": "aten::var_mean(Tensor self, bool unbiased=True) -> (Tensor, Tensor)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "unbiased", "type": "boolean", "default": true } - ], - "outputs": [ - { "type": "Tensor" }, - { "name": "?", "type": "Tensor" } - ] + "name": "aten::vdot(Tensor self, Tensor other) -> Tensor" }, { - "name": "aten::var_mean.correction(Tensor self, int[1]? dim=None, *, Scalar? correction=None, bool keepdim=False) -> (Tensor, Tensor)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64[1]?", "default": null }, - { "name": "correction", "type": "Scalar?", "default": null, "kwarg_only": true }, - { "name": "keepdim", "type": "boolean", "default": false, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" }, - { "type": "Tensor" } - ] + "name": "aten::vdot.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::var_mean.correction_names(Tensor self, Dimname[1] dim, *, Scalar? correction=None, bool keepdim=False) -> (Tensor, Tensor)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "Dimname[1]" }, - { "name": "correction", "type": "Scalar?", "default": null, "kwarg_only": true }, - { "name": "keepdim", "type": "boolean", "default": false, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" }, - { "type": "Tensor" } - ] + "name": "aten::view(Tensor(a) self, SymInt[] size) -> Tensor(a)" }, { - "name": "aten::var_mean.dim(Tensor self, int[1]? dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "int64[1]?" }, - { "name": "unbiased", "type": "boolean", "default": true }, - { "name": "keepdim", "type": "boolean", "default": false } - ], - "outputs": [ - { "type": "Tensor" }, - { "name": "?", "type": "Tensor" } - ] + "name": "aten::view.dtype(Tensor(a) self, ScalarType dtype) -> Tensor(a)" }, { - "name": "aten::var_mean.names_dim(Tensor self, Dimname[1] dim, bool unbiased=True, bool keepdim=False) -> (Tensor, Tensor)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dim", "type": "Dimname[1]" }, - { "name": "unbiased", "type": "boolean", "default": true }, - { "name": "keepdim", "type": "boolean", "default": false } - ], - "outputs": [ - { "type": "Tensor" }, - { "type": "Tensor" } - ] + "name": "aten::view_as(Tensor(a) self, Tensor other) -> Tensor(a)" }, { - "name": "aten::vdot(Tensor self, Tensor other) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::view_as_complex(Tensor(a) self) -> Tensor(a)" }, { - "name": "aten::vdot.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::view_as_complex_copy(Tensor self) -> Tensor" }, { - "name": "aten::view(Tensor(a) self, SymInt[] size) -> Tensor(a)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "size", "type": "SymInt[]" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::view_as_complex_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::view.dtype(Tensor(a) self, ScalarType dtype) -> Tensor(a)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dtype", "type": "ScalarType" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::view_as_real(Tensor(a) self) -> Tensor(a)" }, { - "name": "aten::view_as(Tensor(a) self, Tensor other) -> Tensor(a)", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::view_as_real_copy(Tensor self) -> Tensor" }, { - "name": "aten::view_as_complex(Tensor(a) self) -> Tensor(a)", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::view_as_real_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::view_as_complex_copy(Tensor self) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::view_copy(Tensor self, SymInt[] size) -> Tensor" }, { - "name": "aten::view_as_real(Tensor(a) self) -> Tensor(a)", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::view_copy.dtype(Tensor self, ScalarType dtype) -> Tensor" }, { - "name": "aten::view_as_real_copy(Tensor self) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::view_copy.dtype_out(Tensor self, ScalarType dtype, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::view_copy(Tensor self, SymInt[] size) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "size", "type": "SymInt[]" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::view_copy.out(Tensor self, SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::view_copy.dtype(Tensor self, ScalarType dtype) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dtype", "type": "ScalarType" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::vstack(Tensor[] tensors) -> Tensor" }, { - "name": "aten::vstack(Tensor[] tensors) -> Tensor", - "inputs": [ - { "name": "tensors", "type": "Tensor[]" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::vstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::vstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "tensors", "type": "Tensor[]" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::wait(Future(t) self) -> t" }, { - "name": "aten::wait(Future(t) self) -> t", - "inputs": [ - { "name": "self", "type": "Future(t)" } - ], - "outputs": [ - { "type": "t" } - ] + "name": "aten::warn(str message, int stacklevel=2) -> ()" }, { - "name": "aten::warn(str message, int stacklevel=2) -> ()", - "inputs": [ - { "name": "message", "type": "string" }, - { "name": "stacklevel", "type": "int64", "default": 2 } - ], - "outputs": [] + "name": "aten::where(Tensor condition) -> Tensor[]" }, { - "name": "aten::where(Tensor condition) -> Tensor[]", - "inputs": [ - { "name": "condition", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor[]" } - ] + "name": "aten::where.Scalar(Tensor condition, Scalar self, Scalar other) -> Tensor" }, { - "name": "aten::where.Scalar(Tensor condition, Scalar self, Scalar other) -> Tensor", - "inputs": [ - { "name": "condition", "type": "Tensor" }, - { "name": "self", "type": "Scalar" }, - { "name": "other", "type": "Scalar" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::where.ScalarOther(Tensor condition, Tensor self, Scalar other) -> Tensor" }, { - "name": "aten::where.ScalarOther(Tensor condition, Tensor self, Scalar other) -> Tensor", - "inputs": [ - { "name": "condition", "type": "Tensor" }, - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Scalar" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::where.ScalarSelf(Tensor condition, Scalar self, Tensor other) -> Tensor" }, { - "name": "aten::where.ScalarSelf(Tensor condition, Scalar self, Tensor other) -> Tensor", - "inputs": [ - { "name": "condition", "type": "Tensor" }, - { "name": "self", "type": "Scalar" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::where.self(Tensor condition, Tensor self, Tensor other) -> Tensor" }, { - "name": "aten::where.self(Tensor condition, Tensor self, Tensor other) -> Tensor", - "inputs": [ - { "name": "condition", "type": "Tensor" }, - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::where.self_out(Tensor condition, Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::where.self_out(Tensor condition, Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "condition", "type": "Tensor" }, - { "name": "self", "type": "Tensor" }, - { "name": "other", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::zero_(Tensor(a!) self) -> Tensor(a!)" }, { - "name": "aten::zero_(Tensor(a!) self) -> Tensor(a!)", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::zeros(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor" }, { - "name": "aten::zeros(SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", - "inputs": [ - { "name": "size", "type": "SymInt[]" }, - { "name": "dtype", "type": "ScalarType?", "default": null, "kwarg_only": true }, - { "name": "layout", "type": "Layout?", "default": null, "kwarg_only": true }, - { "name": "device", "type": "Device?", "default": null, "kwarg_only": true }, - { "name": "pin_memory", "type": "boolean?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::zeros.names(int[] size, *, str[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor" }, { - "name": "aten::zeros.names(int[] size, *, Dimname[]? names, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor", - "inputs": [ - { "name": "size", "type": "int64[]" }, - { "name": "names", "type": "Dimname[]?", "kwarg_only": true }, - { "name": "dtype", "type": "ScalarType?", "default": null, "kwarg_only": true }, - { "name": "layout", "type": "Layout?", "default": null, "kwarg_only": true }, - { "name": "device", "type": "Device?", "default": null, "kwarg_only": true }, - { "name": "pin_memory", "type": "boolean?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::zeros.names_out(int[] size, *, str[]? names, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::zeros.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)", - "inputs": [ - { "name": "size", "type": "SymInt[]" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::zeros.out(SymInt[] size, *, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "aten::zeros_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "dtype", "type": "ScalarType?", "default": null, "kwarg_only": true }, - { "name": "layout", "type": "Layout?", "default": null, "kwarg_only": true }, - { "name": "device", "type": "Device?", "default": null, "kwarg_only": true }, - { "name": "pin_memory", "type": "boolean?", "default": null, "kwarg_only": true }, - { "name": "memory_format", "type": "MemoryFormat?", "default": null, "kwarg_only": true } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "aten::zeros_like(Tensor self, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor" }, { - "name": "neuron::forward_v2_1(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0)", - "inputs": [ - { "name": "_0", "type": "Tensor[]" }, - { "name": "_1", "type": "__torch__.torch.classes.neuron.Model" } - ], - "outputs": [ - { "name": "_0", "type": "Tensor" } - ] + "name": "aten::zeros_like.out(Tensor self, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)" }, { - "name": "prepacked::conv2d_clamp_prepack(Tensor W, Tensor? B, int[2] stride, int[2] padding, int[2] dilation, int groups, Scalar? output_min=None, Scalar? output_max=None) -> __torch__.torch.classes.xnnpack.Conv2dOpContext", - "inputs": [ - { "name": "W", "type": "Tensor" }, - { "name": "B", "type": "Tensor?" }, - { "name": "stride", "type": "int64[2]" }, - { "name": "padding", "type": "int64[2]" }, - { "name": "dilation", "type": "int64[2]" }, - { "name": "groups", "type": "int64" }, - { "name": "output_min", "type": "Scalar?", "default": null }, - { "name": "output_max", "type": "Scalar?", "default": null } - ], - "outputs": [ - { "type": "__torch__.torch.classes.xnnpack.Conv2dOpContext" } - ] + "name": "neuron::forward_v2_1(Tensor[] _0, __torch__.torch.classes.neuron.Model _1) -> (Tensor _0)" + }, + { + "name": "prepacked::conv2d_clamp_prepack(Tensor W, Tensor? B, int[2] stride, int[2] padding, int[2] dilation, int groups, Scalar? output_min=None, Scalar? output_max=None) -> __torch__.torch.classes.xnnpack.Conv2dOpContext" }, { "name": "prepacked::conv2d_clamp_run(Tensor X, __torch__.torch.classes.xnnpack.Conv2dOpContext W_prepack) -> Tensor Y", - "category": "Layer", - "inputs": [ - { "name": "X", "type": "Tensor" }, - { "name": "W_prepack", "type": "__torch__.torch.classes.xnnpack.Conv2dOpContext" } - ], - "outputs": [ - { "name": "Y", "type": "Tensor" } - ] + "category": "Layer" }, { "name": "prepacked::conv2d_transpose_clamp_run(Tensor X, __torch__.torch.classes.xnnpack.TransposeConv2dOpContext W_prepack) -> Tensor Y", - "category": "Layer", - "inputs": [ - { "name": "X", "type": "Tensor" }, - { "name": "W_prepack", "type": "__torch__.torch.classes.xnnpack.TransposeConv2dOpContext" } - ], - "outputs": [ - { "name": "Y", "type": "Tensor" } - ] + "category": "Layer" }, { - "name": "prepacked::linear_clamp_prepack(Tensor W, Tensor? B=None, Scalar? output_min=None, Scalar? output_max=None) -> __torch__.torch.classes.xnnpack.LinearOpContext", - "inputs": [ - { "name": "W", "type": "Tensor" }, - { "name": "B", "type": "Tensor?", "default": null }, - { "name": "output_min", "type": "Scalar?", "default": null }, - { "name": "output_max", "type": "Scalar?", "default": null } - ], - "outputs": [ - { "type": "__torch__.torch.classes.xnnpack.LinearOpContext" } - ] + "name": "prepacked::linear_clamp_prepack(Tensor W, Tensor? B=None, Scalar? output_min=None, Scalar? output_max=None) -> __torch__.torch.classes.xnnpack.LinearOpContext" }, { "name": "prepacked::linear_clamp_run(Tensor X, __torch__.torch.classes.xnnpack.LinearOpContext W_prepack) -> Tensor Y", - "category": "Layer", - "inputs": [ - { "name": "X", "type": "Tensor" }, - { "name": "W_prepack", "type": "__torch__.torch.classes.xnnpack.LinearOpContext" } - ], - "outputs": [ - { "name": "Y", "type": "Tensor" } - ] - }, - { - "name": "prim::AutogradAdd(Any a, Any b) -> Any", - "inputs": [ - { "name": "a", "type": "Any" }, - { "name": "b", "type": "Any" } - ], - "outputs": [ - { "type": "Any" } - ] + "category": "Layer" }, { - "name": "prim::AutogradAllNonZero(...) -> bool", - "inputs": [], - "outputs": [ - { "type": "boolean" } - ] + "name": "prim::AutogradAdd(Any a, Any b) -> Any" }, { - "name": "prim::AutogradAllZero(...) -> bool", - "inputs": [], - "outputs": [ - { "type": "boolean" } - ] + "name": "prim::AutogradAllNonZero(...) -> bool" }, { - "name": "prim::AutogradAnyNonZero(...) -> bool", - "inputs": [], - "outputs": [ - { "type": "boolean" } - ] + "name": "prim::AutogradAllZero(...) -> bool" }, { - "name": "prim::AutogradZero() -> Tensor", - "inputs": [], - "outputs": [ - { "type": "Tensor" } - ] + "name": "prim::AutogradAnyNonZero(...) -> bool" }, { - "name": "prim::BroadcastSizes(...) -> int[]", - "inputs": [], - "outputs": [ - { "type": "int64[]" } - ] + "name": "prim::AutogradZero() -> Tensor" }, { - "name": "prim::EnumName(AnyEnumType enum) -> str", - "inputs": [ - { "name": "enum", "type": "AnyEnumType" } - ], - "outputs": [ - { "type": "string" } - ] + "name": "prim::BroadcastSizes(...) -> int[]" }, { - "name": "prim::EnumValue.float(AnyEnumType enum) -> float", - "inputs": [ - { "name": "enum", "type": "AnyEnumType" } - ], - "outputs": [ - { "type": "float32" } - ] + "name": "prim::EnumName(AnyEnumType enum) -> str" }, { - "name": "prim::EnumValue.int(AnyEnumType enum) -> int", - "inputs": [ - { "name": "enum", "type": "AnyEnumType" } - ], - "outputs": [ - { "type": "int64" } - ] + "name": "prim::EnumValue.float(AnyEnumType enum) -> float" }, { - "name": "prim::EnumValue.str(AnyEnumType enum) -> str", - "inputs": [ - { "name": "enum", "type": "AnyEnumType" } - ], - "outputs": [ - { "type": "string" } - ] + "name": "prim::EnumValue.int(AnyEnumType enum) -> int" }, { - "name": "prim::IfThenElse(bool cond, Any(a) x, Any(b) y) -> Any(a|b)", - "inputs": [ - { "name": "cond", "type": "boolean" }, - { "name": "x", "type": "Any" }, - { "name": "y", "type": "Any" } - ], - "outputs": [ - { "type": "Any" } - ] + "name": "prim::EnumValue.str(AnyEnumType enum) -> str" }, { - "name": "prim::ModuleContainerIndex.list(Any self, int ind) -> Any", - "inputs": [ - { "name": "self", "type": "Any" }, - { "name": "ind", "type": "int64" } - ], - "outputs": [ - { "type": "Any" } - ] + "name": "prim::IfThenElse(bool cond, Any(a) x, Any(b) y) -> Any(a|b)" }, { - "name": "prim::NumToTensor.Scalar(Scalar a) -> Tensor", - "inputs": [ - { "name": "a", "type": "Scalar" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "prim::ModuleContainerIndex.dict(Any self, str ind) -> Any" }, { - "name": "prim::NumToTensor.bool(bool a) -> Tensor", - "inputs": [ - { "name": "a", "type": "boolean" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "prim::ModuleContainerIndex.list(Any self, int ind) -> Any" }, { - "name": "prim::Print(...) -> ()", - "inputs": [], - "outputs": [] + "name": "prim::NumToTensor.Scalar(Scalar a) -> Tensor" }, { - "name": "prim::RaiseException(str msg, str? cls=None) -> ()", - "inputs": [ - { "name": "msg", "type": "string" }, - { "name": "cls", "type": "string?", "default": null } - ], - "outputs": [] + "name": "prim::NumToTensor.bool(bool a) -> Tensor" }, { - "name": "prim::ReductionSizes(int[] size, int[] red_axes, bool keepdim = False) -> int[]", - "inputs": [ - { "name": "size", "type": "int64[]" }, - { "name": "red_axes", "type": "int64[]" }, - { "name": "keepdim", "type": "boolean", "default": false } - ], - "outputs": [ - { "type": "int64[]" } - ] + "name": "prim::Print(...) -> ()" }, { - "name": "prim::StringIndex(str string, int index) -> str", - "inputs": [ - { "name": "string", "type": "string" }, - { "name": "index", "type": "int64" } - ], - "outputs": [ - { "type": "string" } - ] + "name": "prim::RaiseException(str msg, str? cls=None) -> ()" }, { - "name": "prim::TupleIndex(Any tup, int i) -> Any", - "inputs": [ - { "name": "tup", "type": "Any" }, - { "name": "i", "type": "int64" } - ], - "outputs": [ - { "type": "Any" } - ] + "name": "prim::ReductionSizes(int[] size, int[] red_axes, bool keepdim=False) -> int[]" }, { - "name": "prim::TupleUnpack(Any tup) -> ...", - "inputs": [ - { "name": "tup", "type": "Any" } - ], - "outputs": [] + "name": "prim::StringIndex(str string, int index) -> str" }, { - "name": "prim::Uninitialized() -> Any", - "inputs": [], - "outputs": [ - { "type": "Any" } - ] + "name": "prim::TupleIndex(Any tup, int i) -> Any" }, { - "name": "prim::VarConcat(...) -> Tensor", - "inputs": [], - "outputs": [ - { "type": "Tensor" } - ] + "name": "prim::TupleUnpack(Any tup) -> ..." }, { - "name": "prim::VarStack(...) -> Tensor", - "inputs": [], - "outputs": [ - { "type": "Tensor" } - ] + "name": "prim::Uninitialized() -> Any" }, { - "name": "prim::abs(Tensor x) -> Tensor", - "inputs": [ - { "name": "x", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "prim::VarConcat(...) -> Tensor" }, { - "name": "prim::abs.Scalar(Scalar a) -> Scalar", - "inputs": [ - { "name": "a", "type": "Scalar" } - ], - "outputs": [ - { "type": "Scalar" } - ] + "name": "prim::VarStack(...) -> Tensor" }, { - "name": "prim::abs.complex(complex a) -> float", - "inputs": [ - { "name": "a", "type": "complex" } - ], - "outputs": [ - { "type": "float32" } - ] + "name": "prim::abs(Tensor x) -> Tensor" }, { - "name": "prim::abs.float(float a) -> float", - "inputs": [ - { "name": "a", "type": "float32" } - ], - "outputs": [ - { "type": "float32" } - ] + "name": "prim::abs.Scalar(Scalar a) -> Scalar" }, - { - "name": "prim::abs.int(int a) -> int", - "inputs": [ - { "name": "a", "type": "int64" } - ], - "outputs": [ - { "type": "int64" } - ] + { + "name": "prim::abs.complex(complex a) -> float" }, { - "name": "prim::data(Tensor(a) a) -> Tensor(a)", - "inputs": [ - { "name": "a", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "prim::abs.float(float a) -> float" }, { - "name": "prim::device(Tensor a) -> Device", - "inputs": [ - { "name": "a", "type": "Tensor" } - ], - "outputs": [ - { "type": "Device" } - ] + "name": "prim::abs.int(int a) -> int" }, { - "name": "prim::dtype(Tensor a) -> int", - "inputs": [ - { "name": "a", "type": "Tensor" } - ], - "outputs": [ - { "type": "int64" } - ] + "name": "prim::data(Tensor(a) a) -> Tensor(a)" }, { - "name": "prim::grad(Tensor a) -> Tensor(*)", - "inputs": [ - { "name": "a", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "prim::device(Tensor a) -> Device" }, { - "name": "prim::id(AnyClassType? x) -> int", - "inputs": [ - { "name": "x", "type": "AnyClassType?" } - ], - "outputs": [ - { "type": "int64" } - ] + "name": "prim::dtype(Tensor a) -> int" }, { - "name": "prim::index(Device self) -> int?", - "inputs": [ - { "name": "self", "type": "Device" } - ], - "outputs": [ - { "type": "int64?" } - ] + "name": "prim::grad(Tensor a) -> Tensor(*)" }, { - "name": "prim::is_cpu(Tensor a) -> bool", - "inputs": [ - { "name": "a", "type": "Tensor" } - ], - "outputs": [ - { "type": "boolean" } - ] + "name": "prim::id(AnyClassType? x) -> int" }, { - "name": "prim::is_cuda(Tensor a) -> bool", - "inputs": [ - { "name": "a", "type": "Tensor" } - ], - "outputs": [ - { "type": "boolean" } - ] + "name": "prim::index(Device self) -> int?" }, { - "name": "prim::is_ipu(Tensor a) -> bool", - "inputs": [ - { "name": "a", "type": "Tensor" } - ], - "outputs": [ - { "type": "boolean" } - ] + "name": "prim::is_cpu(Tensor a) -> bool" }, { - "name": "prim::is_maia(Tensor a) -> bool", - "inputs": [ - { "name": "a", "type": "Tensor" } - ], - "outputs": [ - { "type": "boolean" } - ] + "name": "prim::is_cuda(Tensor a) -> bool" }, { - "name": "prim::is_meta(Tensor a) -> bool", - "inputs": [ - { "name": "a", "type": "Tensor" } - ], - "outputs": [ - { "type": "boolean" } - ] + "name": "prim::is_ipu(Tensor a) -> bool" }, { - "name": "prim::is_mkldnn(Tensor a) -> bool", - "inputs": [ - { "name": "a", "type": "Tensor" } - ], - "outputs": [ - { "type": "boolean" } - ] + "name": "prim::is_maia(Tensor a) -> bool" }, { - "name": "prim::is_mps(Tensor a) -> bool", - "inputs": [ - { "name": "a", "type": "Tensor" } - ], - "outputs": [ - { "type": "boolean" } - ] + "name": "prim::is_meta(Tensor a) -> bool" }, { - "name": "prim::is_mtia(Tensor a) -> bool", - "inputs": [ - { "name": "a", "type": "Tensor" } - ], - "outputs": [ - { "type": "boolean" } - ] + "name": "prim::is_mkldnn(Tensor a) -> bool" }, { - "name": "prim::is_nested(Tensor a) -> bool", - "inputs": [ - { "name": "a", "type": "Tensor" } - ], - "outputs": [ - { "type": "boolean" } - ] + "name": "prim::is_mps(Tensor a) -> bool" }, { - "name": "prim::is_quantized(Tensor a) -> bool", - "inputs": [ - { "name": "a", "type": "Tensor" } - ], - "outputs": [ - { "type": "boolean" } - ] + "name": "prim::is_mtia(Tensor a) -> bool" }, { - "name": "prim::is_sparse(Tensor a) -> bool", - "inputs": [ - { "name": "a", "type": "Tensor" } - ], - "outputs": [ - { "type": "boolean" } - ] + "name": "prim::is_nested(Tensor a) -> bool" }, { - "name": "prim::is_sparse_csr(Tensor a) -> bool", - "inputs": [ - { "name": "a", "type": "Tensor" } - ], - "outputs": [ - { "type": "boolean" } - ] + "name": "prim::is_quantized(Tensor a) -> bool" }, { - "name": "prim::is_vulkan(Tensor a) -> bool", - "inputs": [ - { "name": "a", "type": "Tensor" } - ], - "outputs": [ - { "type": "boolean" } - ] + "name": "prim::is_sparse(Tensor a) -> bool" }, { - "name": "prim::is_xla(Tensor a) -> bool", - "inputs": [ - { "name": "a", "type": "Tensor" } - ], - "outputs": [ - { "type": "boolean" } - ] + "name": "prim::is_sparse_csr(Tensor a) -> bool" }, { - "name": "prim::is_xpu(Tensor a) -> bool", - "inputs": [ - { "name": "a", "type": "Tensor" } - ], - "outputs": [ - { "type": "boolean" } - ] + "name": "prim::is_vulkan(Tensor a) -> bool" }, { - "name": "prim::isinstance(Any to_check) -> bool", - "inputs": [ - { "name": "to_check", "type": "Any" } - ], - "outputs": [ - { "type": "boolean" } - ] + "name": "prim::is_xla(Tensor a) -> bool" }, { - "name": "prim::itemsize(Tensor a) -> int", - "inputs": [ - { "name": "a", "type": "Tensor" } - ], - "outputs": [ - { "type": "int64" } - ] + "name": "prim::is_xpu(Tensor a) -> bool" }, { - "name": "prim::layout(Tensor a) -> Layout", - "inputs": [ - { "name": "a", "type": "Tensor" } - ], - "outputs": [ - { "type": "Layout" } - ] + "name": "prim::isinstance(Any to_check) -> bool" }, { - "name": "prim::max(Scalar a, Scalar b) -> Scalar", - "inputs": [ - { "name": "a", "type": "Scalar" }, - { "name": "b", "type": "Scalar" } - ], - "outputs": [ - { "type": "Scalar" } - ] + "name": "prim::itemsize(Tensor a) -> int" }, { - "name": "prim::max.bool_list(bool[] l, bool[] r) -> bool[]", - "inputs": [ - { "name": "l", "type": "boolean[]" }, - { "name": "r", "type": "boolean[]" } - ], - "outputs": [ - { "type": "boolean[]" } - ] + "name": "prim::layout(Tensor a) -> Layout" }, { - "name": "prim::max.float(float a, float b) -> float", - "inputs": [ - { "name": "a", "type": "float32" }, - { "name": "b", "type": "float32" } - ], - "outputs": [ - { "type": "float32" } - ] + "name": "prim::max(Scalar a, Scalar b) -> Scalar" }, { - "name": "prim::max.float_int(float a, int b) -> float", - "inputs": [ - { "name": "a", "type": "float32" }, - { "name": "b", "type": "int64" } - ], - "outputs": [ - { "type": "float32" } - ] + "name": "prim::max.bool_list(bool[] l, bool[] r) -> bool[]" }, { - "name": "prim::max.float_list(float[] l, float[] r) -> float[]", - "inputs": [ - { "name": "l", "type": "float32[]" }, - { "name": "r", "type": "float32[]" } - ], - "outputs": [ - { "type": "float32[]" } - ] + "name": "prim::max.float(float a, float b) -> float" }, { - "name": "prim::max.int(int a, int b) -> int", - "inputs": [ - { "name": "a", "type": "int64" }, - { "name": "b", "type": "int64" } - ], - "outputs": [ - { "type": "int64" } - ] + "name": "prim::max.float_int(float a, int b) -> float" }, { - "name": "prim::max.int_float(int a, float b) -> float", - "inputs": [ - { "name": "a", "type": "int64" }, - { "name": "b", "type": "float32" } - ], - "outputs": [ - { "type": "float32" } - ] + "name": "prim::max.float_list(float[] l, float[] r) -> float[]" }, { - "name": "prim::max.int_list(int[] l, int[] r) -> int[]", - "inputs": [ - { "name": "l", "type": "int64[]" }, - { "name": "r", "type": "int64[]" } - ], - "outputs": [ - { "type": "int64[]" } - ] + "name": "prim::max.int(int a, int b) -> int" }, { - "name": "prim::max.self_bool(bool[] self) -> bool", - "inputs": [ - { "name": "self", "type": "boolean[]" } - ], - "outputs": [ - { "type": "boolean" } - ] + "name": "prim::max.int_float(int a, float b) -> float" }, { - "name": "prim::max.self_float(float[] self) -> float", - "inputs": [ - { "name": "self", "type": "float32[]" } - ], - "outputs": [ - { "type": "float32" } - ] + "name": "prim::max.int_list(int[] l, int[] r) -> int[]" }, { - "name": "prim::max.self_int(int[] self) -> int", - "inputs": [ - { "name": "self", "type": "int64[]" } - ], - "outputs": [ - { "type": "int64" } - ] + "name": "prim::max.self_bool(bool[] self) -> bool" }, { - "name": "prim::min(Scalar a, Scalar b) -> Scalar", - "inputs": [ - { "name": "a", "type": "Scalar" }, - { "name": "b", "type": "Scalar" } - ], - "outputs": [ - { "type": "Scalar" } - ] + "name": "prim::max.self_float(float[] self) -> float" }, { - "name": "prim::min.bool_list(bool[] l, bool[] r) -> bool[]", - "inputs": [ - { "name": "l", "type": "boolean[]" }, - { "name": "r", "type": "boolean[]" } - ], - "outputs": [ - { "type": "boolean[]" } - ] + "name": "prim::max.self_int(int[] self) -> int" }, { - "name": "prim::min.float(float a, float b) -> float", - "inputs": [ - { "name": "a", "type": "float32" }, - { "name": "b", "type": "float32" } - ], - "outputs": [ - { "type": "float32" } - ] + "name": "prim::min(Scalar a, Scalar b) -> Scalar" }, { - "name": "prim::min.float_int(float a, int b) -> float", - "inputs": [ - { "name": "a", "type": "float32" }, - { "name": "b", "type": "int64" } - ], - "outputs": [ - { "type": "float32" } - ] + "name": "prim::min.bool_list(bool[] l, bool[] r) -> bool[]" }, { - "name": "prim::min.float_list(float[] l, float[] r) -> float[]", - "inputs": [ - { "name": "l", "type": "float32[]" }, - { "name": "r", "type": "float32[]" } - ], - "outputs": [ - { "type": "float32[]" } - ] + "name": "prim::min.float(float a, float b) -> float" }, { - "name": "prim::min.int(int a, int b) -> int", - "inputs": [ - { "name": "a", "type": "int64" }, - { "name": "b", "type": "int64" } - ], - "outputs": [ - { "type": "int64" } - ] + "name": "prim::min.float_int(float a, int b) -> float" }, { - "name": "prim::min.int_float(int a, float b) -> float", - "inputs": [ - { "name": "a", "type": "int64" }, - { "name": "b", "type": "float32" } - ], - "outputs": [ - { "type": "float32" } - ] + "name": "prim::min.float_list(float[] l, float[] r) -> float[]" }, { - "name": "prim::min.int_list(int[] l, int[] r) -> int[]", - "inputs": [ - { "name": "l", "type": "int64[]" }, - { "name": "r", "type": "int64[]" } - ], - "outputs": [ - { "type": "int64[]" } - ] + "name": "prim::min.int(int a, int b) -> int" }, { - "name": "prim::min.self_bool(bool[] self) -> bool", - "inputs": [ - { "name": "self", "type": "boolean[]" } - ], - "outputs": [ - { "type": "boolean" } - ] + "name": "prim::min.int_float(int a, float b) -> float" }, { - "name": "prim::min.self_float(float[] self) -> float", - "inputs": [ - { "name": "self", "type": "float32[]" } - ], - "outputs": [ - { "type": "float32" } - ] + "name": "prim::min.int_list(int[] l, int[] r) -> int[]" }, { - "name": "prim::min.self_int(int[] self) -> int", - "inputs": [ - { "name": "self", "type": "int64[]" } - ], - "outputs": [ - { "type": "int64" } - ] + "name": "prim::min.self_bool(bool[] self) -> bool" }, { - "name": "prim::name(Tensor a) -> str?", - "inputs": [ - { "name": "a", "type": "Tensor" } - ], - "outputs": [ - { "type": "string?" } - ] + "name": "prim::min.self_float(float[] self) -> float" }, { - "name": "prim::nbytes(Tensor a) -> int", - "inputs": [ - { "name": "a", "type": "Tensor" } - ], - "outputs": [ - { "type": "int64" } - ] + "name": "prim::min.self_int(int[] self) -> int" }, { - "name": "prim::rangelist(int n) -> int[]", - "inputs": [ - { "name": "n", "type": "int64" } - ], - "outputs": [ - { "type": "int64[]" } - ] + "name": "prim::name(Tensor a) -> str?" }, { - "name": "prim::requires_grad(Tensor a) -> bool", - "inputs": [ - { "name": "a", "type": "Tensor" } - ], - "outputs": [ - { "type": "boolean" } - ] + "name": "prim::nbytes(Tensor a) -> int" }, { - "name": "prim::shape(Tensor self) -> int[]", - "inputs": [ - { "name": "self", "type": "Tensor" } - ], - "outputs": [ - { "type": "int64[]" } - ] + "name": "prim::rangelist(int n) -> int[]" }, { - "name": "prim::type(Device self) -> str", - "inputs": [ - { "name": "self", "type": "Device" } - ], - "outputs": [ - { "type": "string" } - ] + "name": "prim::requires_grad(Tensor a) -> bool" }, { - "name": "prim::unchecked_cast(t x) -> t", - "inputs": [ - { "name": "x", "type": "t" } - ], - "outputs": [ - { "type": "t" } - ] + "name": "prim::shape(Tensor self) -> int[]" }, { - "name": "prim::unchecked_unwrap_optional(t(a)? optional) -> t(a)", - "inputs": [ - { "name": "optional", "type": "t?" } - ], - "outputs": [ - { "type": "t" } - ] + "name": "prim::type(Device self) -> str" }, { - "name": "quantized::add(Tensor qa, Tensor qb, float scale, int zero_point) -> Tensor qc", - "inputs": [ - { "name": "qa", "type": "Tensor" }, - { "name": "qb", "type": "Tensor" }, - { "name": "scale", "type": "float32" }, - { "name": "zero_point", "type": "int64" } - ], - "outputs": [ - { "name": "qc", "type": "Tensor" } - ] + "name": "prim::unchecked_cast(t x) -> t" }, { - "name": "quantized::add.Scalar(Tensor qa, Scalar b) -> Tensor qc", - "inputs": [ - { "name": "qa", "type": "Tensor" }, - { "name": "b", "type": "Scalar" } - ], - "outputs": [ - { "name": "qc", "type": "Tensor" } - ] + "name": "prim::unchecked_unwrap_optional(t(a)? optional) -> t(a)" }, { - "name": "quantized::add.Scalar2(Scalar b, Tensor qa) -> Tensor qc", - "inputs": [ - { "name": "b", "type": "Scalar" }, - { "name": "qa", "type": "Tensor" } - ], - "outputs": [ - { "name": "qc", "type": "Tensor" } - ] + "name": "quantized::add(Tensor qa, Tensor qb, float scale, int zero_point) -> Tensor qc" }, { - "name": "quantized::add.Scalar_out(Tensor qa, Scalar b, Tensor(a!) out) -> Tensor(a!) out", - "inputs": [ - { "name": "qa", "type": "Tensor" }, - { "name": "b", "type": "Scalar" }, - { "name": "out", "type": "Tensor" } - ], - "outputs": [ - { "name": "out", "type": "Tensor" } - ] + "name": "quantized::add.Scalar(Tensor qa, Scalar b) -> Tensor qc" }, { - "name": "quantized::add.out(Tensor qa, Tensor qb, Tensor(a!) out) -> Tensor(a!) out", - "inputs": [ - { "name": "qa", "type": "Tensor" }, - { "name": "qb", "type": "Tensor" }, - { "name": "out", "type": "Tensor" } - ], - "outputs": [ - { "name": "out", "type": "Tensor" } - ] + "name": "quantized::add.Scalar2(Scalar b, Tensor qa) -> Tensor qc" }, { - "name": "quantized::add_out(Tensor qa, Tensor qb, Tensor(a!) out) -> Tensor(a!) out", - "inputs": [ - { "name": "qa", "type": "Tensor" }, - { "name": "qb", "type": "Tensor" }, - { "name": "out", "type": "Tensor" } - ], - "outputs": [ - { "name": "out", "type": "Tensor" } - ] + "name": "quantized::add.Scalar_out(Tensor qa, Scalar b, Tensor(a!) out) -> Tensor(a!) out" }, { - "name": "quantized::add_relu(Tensor qa, Tensor qb, float scale, int zero_point) -> Tensor qc", - "inputs": [ - { "name": "qa", "type": "Tensor" }, - { "name": "qb", "type": "Tensor" }, - { "name": "scale", "type": "float32" }, - { "name": "zero_point", "type": "int64" } - ], - "outputs": [ - { "name": "qc", "type": "Tensor" } - ] + "name": "quantized::add.out(Tensor qa, Tensor qb, Tensor(a!) out) -> Tensor(a!) out" }, { - "name": "quantized::add_relu.Scalar(Tensor qa, Scalar b) -> Tensor qc", - "inputs": [ - { "name": "qa", "type": "Tensor" }, - { "name": "b", "type": "Scalar" } - ], - "outputs": [ - { "name": "qc", "type": "Tensor" } - ] + "name": "quantized::add_out(Tensor qa, Tensor qb, Tensor(a!) out) -> Tensor(a!) out" }, { - "name": "quantized::add_relu.Scalar2(Scalar b, Tensor qa) -> Tensor qc", - "inputs": [ - { "name": "b", "type": "Scalar" }, - { "name": "qa", "type": "Tensor" } - ], - "outputs": [ - { "name": "qc", "type": "Tensor" } - ] + "name": "quantized::add_relu(Tensor qa, Tensor qb, float scale, int zero_point) -> Tensor qc" + }, + { + "name": "quantized::add_relu.Scalar(Tensor qa, Scalar b) -> Tensor qc" + }, + { + "name": "quantized::add_relu.Scalar2(Scalar b, Tensor qa) -> Tensor qc" }, { - "name": "quantized::add_relu.Scalar_out(Tensor qa, Scalar b, Tensor(a!) out) -> Tensor(a!) out", - "inputs": [ - { "name": "qa", "type": "Tensor" }, - { "name": "b", "type": "Scalar" }, - { "name": "out", "type": "Tensor" } - ], - "outputs": [ - { "name": "out", "type": "Tensor" } - ] + "name": "quantized::add_relu.Scalar_out(Tensor qa, Scalar b, Tensor(a!) out) -> Tensor(a!) out" }, { - "name": "quantized::add_relu.out(Tensor qa, Tensor qb, Tensor(a!) out) -> Tensor(a!) out", - "inputs": [ - { "name": "qa", "type": "Tensor" }, - { "name": "qb", "type": "Tensor" }, - { "name": "out", "type": "Tensor" } - ], - "outputs": [ - { "name": "out", "type": "Tensor" } - ] + "name": "quantized::add_relu.out(Tensor qa, Tensor qb, Tensor(a!) out) -> Tensor(a!) out" }, { - "name": "quantized::add_relu_out(Tensor qa, Tensor qb, Tensor(a!) out) -> Tensor(a!) out", - "inputs": [ - { "name": "qa", "type": "Tensor" }, - { "name": "qb", "type": "Tensor" }, - { "name": "out", "type": "Tensor" } - ], - "outputs": [ - { "name": "out", "type": "Tensor" } - ] + "name": "quantized::add_relu_out(Tensor qa, Tensor qb, Tensor(a!) out) -> Tensor(a!) out" }, { - "name": "quantized::add_scalar(Tensor qa, Scalar b) -> Tensor qc", - "inputs": [ - { "name": "qa", "type": "Tensor" }, - { "name": "b", "type": "Scalar" } - ], - "outputs": [ - { "name": "qc", "type": "Tensor" } - ] + "name": "quantized::add_scalar(Tensor qa, Scalar b) -> Tensor qc" }, { - "name": "quantized::add_scalar.Tensor(Tensor qa, Tensor b) -> Tensor qc", - "inputs": [ - { "name": "qa", "type": "Tensor" }, - { "name": "b", "type": "Tensor" } - ], - "outputs": [ - { "name": "qc", "type": "Tensor" } - ] + "name": "quantized::add_scalar.Tensor(Tensor qa, Tensor b) -> Tensor qc" }, { - "name": "quantized::add_scalar_out(Tensor qa, Scalar b, Tensor(a!) out) -> Tensor(a!) out", - "inputs": [ - { "name": "qa", "type": "Tensor" }, - { "name": "b", "type": "Scalar" }, - { "name": "out", "type": "Tensor" } - ], - "outputs": [ - { "name": "out", "type": "Tensor" } - ] + "name": "quantized::add_scalar_out(Tensor qa, Scalar b, Tensor(a!) out) -> Tensor(a!) out" }, { - "name": "quantized::add_scalar_out.Tensor(Tensor qa, Tensor b, Tensor(a!) out) -> Tensor(a!) out", - "inputs": [ - { "name": "qa", "type": "Tensor" }, - { "name": "b", "type": "Tensor" }, - { "name": "out", "type": "Tensor" } - ], - "outputs": [ - { "name": "out", "type": "Tensor" } - ] + "name": "quantized::add_scalar_out.Tensor(Tensor qa, Tensor b, Tensor(a!) out) -> Tensor(a!) out" }, { - "name": "quantized::add_scalar_relu(Tensor qa, Scalar b) -> Tensor qc", - "inputs": [ - { "name": "qa", "type": "Tensor" }, - { "name": "b", "type": "Scalar" } - ], - "outputs": [ - { "name": "qc", "type": "Tensor" } - ] + "name": "quantized::add_scalar_relu(Tensor qa, Scalar b) -> Tensor qc" }, { - "name": "quantized::add_scalar_relu.Tensor(Tensor qa, Tensor b) -> Tensor qc", - "inputs": [ - { "name": "qa", "type": "Tensor" }, - { "name": "b", "type": "Tensor" } - ], - "outputs": [ - { "name": "qc", "type": "Tensor" } - ] + "name": "quantized::add_scalar_relu.Tensor(Tensor qa, Tensor b) -> Tensor qc" }, { - "name": "quantized::add_scalar_relu_out(Tensor qa, Scalar b, Tensor(a!) out) -> Tensor(a!) out", - "inputs": [ - { "name": "qa", "type": "Tensor" }, - { "name": "b", "type": "Scalar" }, - { "name": "out", "type": "Tensor" } - ], - "outputs": [ - { "name": "out", "type": "Tensor" } - ] + "name": "quantized::add_scalar_relu_out(Tensor qa, Scalar b, Tensor(a!) out) -> Tensor(a!) out" }, { - "name": "quantized::add_scalar_relu_out.Tensor(Tensor qa, Tensor b, Tensor(a!) out) -> Tensor(a!) out", - "inputs": [ - { "name": "qa", "type": "Tensor" }, - { "name": "b", "type": "Tensor" }, - { "name": "out", "type": "Tensor" } - ], - "outputs": [ - { "name": "out", "type": "Tensor" } - ] + "name": "quantized::add_scalar_relu_out.Tensor(Tensor qa, Tensor b, Tensor(a!) out) -> Tensor(a!) out" }, { - "name": "quantized::batch_norm(Tensor qx, Tensor? weight, Tensor? bias, Tensor mean, Tensor var, float eps, float output_scale, int output_zero_point) -> Tensor", - "inputs": [ - { "name": "qx", "type": "Tensor" }, - { "name": "weight", "type": "Tensor?" }, - { "name": "bias", "type": "Tensor?" }, - { "name": "mean", "type": "Tensor" }, - { "name": "var", "type": "Tensor" }, - { "name": "eps", "type": "float32" }, - { "name": "output_scale", "type": "float32" }, - { "name": "output_zero_point", "type": "int64" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "quantized::batch_norm(Tensor qx, Tensor? weight, Tensor? bias, Tensor mean, Tensor var, float eps, float output_scale, int output_zero_point) -> Tensor" }, { "name": "quantized::batch_norm1d(Tensor qx, Tensor? weight, Tensor? bias, Tensor mean, Tensor var, float eps, float output_scale, int output_zero_point) -> Tensor", - "category": "Normalization", - "inputs": [ - { "name": "qx", "type": "Tensor" }, - { "name": "weight", "type": "Tensor?" }, - { "name": "bias", "type": "Tensor?" }, - { "name": "mean", "type": "Tensor" }, - { "name": "var", "type": "Tensor" }, - { "name": "eps", "type": "float32" }, - { "name": "output_scale", "type": "float32" }, - { "name": "output_zero_point", "type": "int64" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "category": "Normalization" }, { "name": "quantized::batch_norm1d_relu(Tensor qx, Tensor? weight, Tensor? bias, Tensor mean, Tensor var, float eps, float output_scale, int output_zero_point) -> Tensor", - "category": "Normalization", - "inputs": [ - { "name": "qx", "type": "Tensor" }, - { "name": "weight", "type": "Tensor?" }, - { "name": "bias", "type": "Tensor?" }, - { "name": "mean", "type": "Tensor" }, - { "name": "var", "type": "Tensor" }, - { "name": "eps", "type": "float32" }, - { "name": "output_scale", "type": "float32" }, - { "name": "output_zero_point", "type": "int64" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "category": "Normalization" }, { "name": "quantized::batch_norm2d(Tensor qx, Tensor? weight, Tensor? bias, Tensor mean, Tensor var, float eps, float output_scale, int output_zero_point) -> Tensor", - "category": "Normalization", - "inputs": [ - { "name": "qx", "type": "Tensor" }, - { "name": "weight", "type": "Tensor?" }, - { "name": "bias", "type": "Tensor?" }, - { "name": "mean", "type": "Tensor" }, - { "name": "var", "type": "Tensor" }, - { "name": "eps", "type": "float32" }, - { "name": "output_scale", "type": "float32" }, - { "name": "output_zero_point", "type": "int64" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "category": "Normalization" }, { "name": "quantized::batch_norm2d_relu(Tensor qx, Tensor? weight, Tensor? bias, Tensor mean, Tensor var, float eps, float output_scale, int output_zero_point) -> Tensor", - "category": "Normalization", - "inputs": [ - { "name": "qx", "type": "Tensor" }, - { "name": "weight", "type": "Tensor?" }, - { "name": "bias", "type": "Tensor?" }, - { "name": "mean", "type": "Tensor" }, - { "name": "var", "type": "Tensor" }, - { "name": "eps", "type": "float32" }, - { "name": "output_scale", "type": "float32" }, - { "name": "output_zero_point", "type": "int64" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "category": "Normalization" }, { "name": "quantized::batch_norm3d(Tensor qx, Tensor? weight, Tensor? bias, Tensor mean, Tensor var, float eps, float output_scale, int output_zero_point) -> Tensor", - "category": "Normalization", - "inputs": [ - { "name": "qx", "type": "Tensor" }, - { "name": "weight", "type": "Tensor?" }, - { "name": "bias", "type": "Tensor?" }, - { "name": "mean", "type": "Tensor" }, - { "name": "var", "type": "Tensor" }, - { "name": "eps", "type": "float32" }, - { "name": "output_scale", "type": "float32" }, - { "name": "output_zero_point", "type": "int64" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "category": "Normalization" }, { "name": "quantized::batch_norm3d_relu(Tensor qx, Tensor? weight, Tensor? bias, Tensor mean, Tensor var, float eps, float output_scale, int output_zero_point) -> Tensor", - "category": "Normalization", - "inputs": [ - { "name": "qx", "type": "Tensor" }, - { "name": "weight", "type": "Tensor?" }, - { "name": "bias", "type": "Tensor?" }, - { "name": "mean", "type": "Tensor" }, - { "name": "var", "type": "Tensor" }, - { "name": "eps", "type": "float32" }, - { "name": "output_scale", "type": "float32" }, - { "name": "output_zero_point", "type": "int64" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "category": "Normalization" }, { "name": "quantized::batch_norm_relu(Tensor qx, Tensor? weight, Tensor? bias, Tensor mean, Tensor var, float eps, float output_scale, int output_zero_point) -> Tensor", - "category": "Normalization", - "inputs": [ - { "name": "qx", "type": "Tensor" }, - { "name": "weight", "type": "Tensor?" }, - { "name": "bias", "type": "Tensor?" }, - { "name": "mean", "type": "Tensor" }, - { "name": "var", "type": "Tensor" }, - { "name": "eps", "type": "float32" }, - { "name": "output_scale", "type": "float32" }, - { "name": "output_zero_point", "type": "int64" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "category": "Normalization" }, { "name": "quantized::cat(Tensor[] qx, int dim, float? scale, int? zero_point) -> Tensor", - "category": "Tensor", - "inputs": [ - { "name": "qx", "type": "Tensor[]" }, - { "name": "dim", "type": "int64" }, - { "name": "scale", "type": "float32?" }, - { "name": "zero_point", "type": "int64?" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "category": "Tensor" }, { "name": "quantized::cat_relu(Tensor[] qx, int dim, float? scale, int? zero_point) -> Tensor", - "category": "Tensor", - "inputs": [ - { "name": "qx", "type": "Tensor[]" }, - { "name": "dim", "type": "int64" }, - { "name": "scale", "type": "float32?" }, - { "name": "zero_point", "type": "int64?" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "category": "Tensor" }, { "name": "quantized::celu(Tensor self, float output_scale, int output_zero_point, Scalar alpha=1) -> Tensor", - "category": "Activation", - "inputs": [ - { "name": "self", "type": "Tensor" }, - { "name": "output_scale", "type": "float32" }, - { "name": "output_zero_point", "type": "int64" }, - { "name": "alpha", "type": "Scalar", "default": 1 } - ], - "outputs": [ - { "type": "Tensor" } - ] + "category": "Activation" }, { "name": "quantized::conv1d(Tensor qx, __torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weight, float output_scale, int output_zero_point) -> Tensor", - "category": "Layer", - "inputs": [ - { "name": "qx", "type": "Tensor" }, - { "name": "packed_weight", "type": "__torch__.torch.classes.quantized.Conv2dPackedParamsBase" }, - { "name": "output_scale", "type": "float32" }, - { "name": "output_zero_point", "type": "int64" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "category": "Layer" }, { - "name": "quantized::conv1d_prepack(Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups) -> __torch__.torch.classes.quantized.Conv2dPackedParamsBase", - "inputs": [ - { "name": "weight", "type": "Tensor" }, - { "name": "bias", "type": "Tensor?" }, - { "name": "stride", "type": "int64[]" }, - { "name": "padding", "type": "int64[]" }, - { "name": "dilation", "type": "int64[]" }, - { "name": "groups", "type": "int64" } - ], - "outputs": [ - { "type": "__torch__.torch.classes.quantized.Conv2dPackedParamsBase" } - ] + "name": "quantized::conv1d_prepack(Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups) -> __torch__.torch.classes.quantized.Conv2dPackedParamsBase" }, { "name": "quantized::conv1d_relu(Tensor qx, __torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weight, float output_scale, int output_zero_point) -> Tensor", - "category": "Layer", - "inputs": [ - { "name": "qx", "type": "Tensor" }, - { "name": "packed_weight", "type": "__torch__.torch.classes.quantized.Conv2dPackedParamsBase" }, - { "name": "output_scale", "type": "float32" }, - { "name": "output_zero_point", "type": "int64" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "category": "Layer" }, { "name": "quantized::conv2d(Tensor qx, __torch__.torch.classes.quantized.Conv2dPackedParamsBase weight, int[] stride, int[] padding, int[] dilation, int groups, float output_scale, int output_zero_point) -> Tensor", - "category": "Layer", - "inputs": [ - { "name": "qx", "type": "Tensor" }, - { "name": "weight", "type": "__torch__.torch.classes.quantized.Conv2dPackedParamsBase" }, - { "name": "stride", "type": "int64[]" }, - { "name": "padding", "type": "int64[]" }, - { "name": "dilation", "type": "int64[]" }, - { "name": "groups", "type": "int64" }, - { "name": "output_scale", "type": "float32" }, - { "name": "output_zero_point", "type": "int64" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "category": "Layer" }, { "name": "quantized::conv2d.new(Tensor qx, __torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weight, float output_scale, int output_zero_point) -> Tensor", - "category": "Layer", - "inputs": [ - { "name": "qx", "type": "Tensor" }, - { "name": "packed_weight", "type": "__torch__.torch.classes.quantized.Conv2dPackedParamsBase" }, - { "name": "output_scale", "type": "float32" }, - { "name": "output_zero_point", "type": "int64" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "category": "Layer" }, { - "name": "quantized::conv2d_dilation(__torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weights) -> int[]", - "inputs": [ - { "name": "packed_weights", "type": "__torch__.torch.classes.quantized.Conv2dPackedParamsBase" } - ], - "outputs": [ - { "type": "int64[]" } - ] + "name": "quantized::conv2d_dilation(__torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weights) -> int[]" }, { - "name": "quantized::conv2d_dynamic(Tensor qx, __torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weight, bool reduce_range=False) -> Tensor", - "inputs": [ - { "name": "qx", "type": "Tensor" }, - { "name": "packed_weight", "type": "__torch__.torch.classes.quantized.Conv2dPackedParamsBase" }, - { "name": "reduce_range", "type": "boolean", "default": false } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "quantized::conv2d_dynamic(Tensor qx, __torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weight, bool reduce_range=False) -> Tensor" }, { - "name": "quantized::conv2d_groups(__torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weights) -> int", - "inputs": [ - { "name": "packed_weights", "type": "__torch__.torch.classes.quantized.Conv2dPackedParamsBase" } - ], - "outputs": [ - { "type": "int64" } - ] + "name": "quantized::conv2d_groups(__torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weights) -> int" }, { - "name": "quantized::conv2d_output_padding(__torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weights) -> int[]", - "inputs": [ - { "name": "packed_weights", "type": "__torch__.torch.classes.quantized.Conv2dPackedParamsBase" } - ], - "outputs": [ - { "type": "int64[]" } - ] + "name": "quantized::conv2d_output_padding(__torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weights) -> int[]" }, { - "name": "quantized::conv2d_padding(__torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weights) -> int[]", - "inputs": [ - { "name": "packed_weights", "type": "__torch__.torch.classes.quantized.Conv2dPackedParamsBase" } - ], - "outputs": [ - { "type": "int64[]" } - ] + "name": "quantized::conv2d_padding(__torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weights) -> int[]" }, { - "name": "quantized::conv2d_prepack(Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups) -> __torch__.torch.classes.quantized.Conv2dPackedParamsBase", - "inputs": [ - { "name": "weight", "type": "Tensor" }, - { "name": "bias", "type": "Tensor?" }, - { "name": "stride", "type": "int64[]" }, - { "name": "padding", "type": "int64[]" }, - { "name": "dilation", "type": "int64[]" }, - { "name": "groups", "type": "int64" } - ], - "outputs": [ - { "type": "__torch__.torch.classes.quantized.Conv2dPackedParamsBase" } - ] + "name": "quantized::conv2d_prepack(Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups) -> __torch__.torch.classes.quantized.Conv2dPackedParamsBase" }, { "name": "quantized::conv2d_relu(Tensor qx, __torch__.torch.classes.quantized.Conv2dPackedParamsBase weight, int[] stride, int[] padding, int[] dilation, int groups, float output_scale, int output_zero_point) -> Tensor", - "category": "Layer", - "inputs": [ - { "name": "qx", "type": "Tensor" }, - { "name": "weight", "type": "__torch__.torch.classes.quantized.Conv2dPackedParamsBase" }, - { "name": "stride", "type": "int64[]" }, - { "name": "padding", "type": "int64[]" }, - { "name": "dilation", "type": "int64[]" }, - { "name": "groups", "type": "int64" }, - { "name": "output_scale", "type": "float32" }, - { "name": "output_zero_point", "type": "int64" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "category": "Layer" }, { "name": "quantized::conv2d_relu.new(Tensor qx, __torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weight, float output_scale, int output_zero_point) -> Tensor", - "category": "Layer", - "inputs": [ - { "name": "qx", "type": "Tensor" }, - { "name": "packed_weight", "type": "__torch__.torch.classes.quantized.Conv2dPackedParamsBase" }, - { "name": "output_scale", "type": "float32" }, - { "name": "output_zero_point", "type": "int64" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "category": "Layer" }, { - "name": "quantized::conv2d_stride(__torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weights) -> int[]", - "inputs": [ - { "name": "packed_weights", "type": "__torch__.torch.classes.quantized.Conv2dPackedParamsBase" } - ], - "outputs": [ - { "type": "int64[]" } - ] + "name": "quantized::conv2d_stride(__torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weights) -> int[]" }, { - "name": "quantized::conv2d_transpose(__torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weights) -> int", - "inputs": [ - { "name": "packed_weights", "type": "__torch__.torch.classes.quantized.Conv2dPackedParamsBase" } - ], - "outputs": [ - { "type": "int64" } - ] + "name": "quantized::conv2d_transpose(__torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weights) -> int" }, { - "name": "quantized::conv2d_unpack(__torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weights) -> (Tensor unpacked_weights, Tensor? B_origin)", - "inputs": [ - { "name": "packed_weights", "type": "__torch__.torch.classes.quantized.Conv2dPackedParamsBase" } - ], - "outputs": [ - { "name": "unpacked_weights", "type": "Tensor" }, - { "name": "B_origin", "type": "Tensor?" } - ] + "name": "quantized::conv2d_unpack(__torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weights) -> (Tensor unpacked_weights, Tensor? B_origin)" }, { - "name": "quantized::conv2d_unpack_sizes(Any packed_weights) -> (Any)", - "inputs": [ - { "name": "packed_weights", "type": "Any" } - ], - "outputs": [ - { "type": "Any" } - ] + "name": "quantized::conv2d_unpack_sizes(Any packed_weights) -> Any" }, { "name": "quantized::conv3d(Tensor qx, __torch__.torch.classes.quantized.Conv3dPackedParamsBase weight, int[] stride, int[] padding, int[] dilation, int groups, float output_scale, int output_zero_point) -> Tensor", - "category": "Layer", - "inputs": [ - { "name": "qx", "type": "Tensor" }, - { "name": "weight", "type": "__torch__.torch.classes.quantized.Conv3dPackedParamsBase" }, - { "name": "stride", "type": "int64[]" }, - { "name": "padding", "type": "int64[]", "default": 1 }, - { "name": "dilation", "type": "int64[]", "default": 0 }, - { "name": "groups", "type": "int64", "default": 1 }, - { "name": "output_scale", "type": "float32" }, - { "name": "output_zero_point", "type": "int64" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "category": "Layer" }, { "name": "quantized::conv3d.new(Tensor qx, __torch__.torch.classes.quantized.Conv3dPackedParamsBase packed_weight, float output_scale, int output_zero_point) -> Tensor", - "category": "Layer", - "inputs": [ - { "name": "qx", "type": "Tensor" }, - { "name": "packed_weight", "type": "__torch__.torch.classes.quantized.Conv3dPackedParamsBase" }, - { "name": "output_scale", "type": "float32" }, - { "name": "output_zero_point", "type": "int64" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "category": "Layer" }, { - "name": "quantized::conv3d_prepack(Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups) -> __torch__.torch.classes.quantized.Conv3dPackedParamsBase", - "inputs": [ - { "name": "weight", "type": "Tensor" }, - { "name": "bias", "type": "Tensor?" }, - { "name": "stride", "type": "int64[]" }, - { "name": "padding", "type": "int64[]" }, - { "name": "dilation", "type": "int64[]" }, - { "name": "groups", "type": "int64" } - ], - "outputs": [ - { "type": "__torch__.torch.classes.quantized.Conv3dPackedParamsBase" } - ] + "name": "quantized::conv3d_prepack(Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups) -> __torch__.torch.classes.quantized.Conv3dPackedParamsBase" }, { "name": "quantized::conv3d_relu(Tensor qx, __torch__.torch.classes.quantized.Conv3dPackedParamsBase weight, int[] stride, int[] padding, int[] dilation, int groups, float output_scale, int output_zero_point) -> Tensor", - "category": "Layer", - "inputs": [ - { "name": "qx", "type": "Tensor" }, - { "name": "weight", "type": "__torch__.torch.classes.quantized.Conv3dPackedParamsBase" }, - { "name": "stride", "type": "int64[]" }, - { "name": "padding", "type": "int64[]", "default": 1 }, - { "name": "dilation", "type": "int64[]", "default": 0 }, - { "name": "groups", "type": "int64", "default": 1 }, - { "name": "output_scale", "type": "float32", "default": 1 }, - { "name": "output_zero_point", "type": "int64" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "category": "Layer" }, { "name": "quantized::conv3d_relu.new(Tensor qx, __torch__.torch.classes.quantized.Conv3dPackedParamsBase packed_weight, float output_scale, int output_zero_point) -> Tensor", - "category": "Layer", - "inputs": [ - { "name": "qx", "type": "Tensor" }, - { "name": "packed_weight", "type": "__torch__.torch.classes.quantized.Conv3dPackedParamsBase" }, - { "name": "output_scale", "type": "float32" }, - { "name": "output_zero_point", "type": "int64" } - ], - "outputs": [ - { "type": "Tensor" } - ] - }, - { - "name": "quantized::conv_prepack(Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups) -> __torch__.torch.classes.quantized.Conv2dPackedParamsBase", - "inputs": [ - { "name": "weight", "type": "Tensor" }, - { "name": "bias", "type": "Tensor?" }, - { "name": "stride", "type": "int64[]" }, - { "name": "padding", "type": "int64[]" }, - { "name": "dilation", "type": "int64[]" }, - { "name": "groups", "type": "int64" } - ], - "outputs": [ - { "type": "__torch__.torch.classes.quantized.Conv2dPackedParamsBase" } - ] + "category": "Layer" }, - { - "name": "quantized::conv_transpose1d_prepack(Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] output_padding, int[] dilation, int groups) -> __torch__.torch.classes.quantized.Conv2dPackedParamsBase", - "inputs": [ - { "name": "weight", "type": "Tensor" }, - { "name": "bias", "type": "Tensor?" }, - { "name": "stride", "type": "int64[]" }, - { "name": "padding", "type": "int64[]" }, - { "name": "output_padding", "type": "int64[]" }, - { "name": "dilation", "type": "int64[]" }, - { "name": "groups", "type": "int64" } - ], - "outputs": [ - { "type": "__torch__.torch.classes.quantized.Conv2dPackedParamsBase" } - ] + { + "name": "quantized::conv_prepack(Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] dilation, int groups) -> __torch__.torch.classes.quantized.Conv2dPackedParamsBase" }, { - "name": "quantized::conv_transpose2d(Tensor qx, __torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weight, float output_scale, int output_zero_point) -> Tensor", - "inputs": [ - { "name": "qx", "type": "Tensor" }, - { "name": "packed_weight", "type": "__torch__.torch.classes.quantized.Conv2dPackedParamsBase" }, - { "name": "output_scale", "type": "float32" }, - { "name": "output_zero_point", "type": "int64" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "quantized::conv_transpose1d_prepack(Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] output_padding, int[] dilation, int groups) -> __torch__.torch.classes.quantized.Conv2dPackedParamsBase" }, { - "name": "quantized::conv_transpose2d_dilation(__torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weights) -> int[]", - "inputs": [ - { "name": "packed_weights", "type": "__torch__.torch.classes.quantized.Conv2dPackedParamsBase" } - ], - "outputs": [ - { "type": "int64[]" } - ] + "name": "quantized::conv_transpose2d(Tensor qx, __torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weight, float output_scale, int output_zero_point) -> Tensor" }, { - "name": "quantized::conv_transpose2d_dynamic(Tensor qx, __torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weight, bool reduce_range=False) -> Tensor", - "inputs": [ - { "name": "qx", "type": "Tensor" }, - { "name": "packed_weight", "type": "__torch__.torch.classes.quantized.Conv2dPackedParamsBase" }, - { "name": "reduce_range", "type": "boolean", "default": false } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "quantized::conv_transpose2d_dilation(__torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weights) -> int[]" }, { - "name": "quantized::conv_transpose2d_groups(__torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weights) -> int", - "inputs": [ - { "name": "packed_weights", "type": "__torch__.torch.classes.quantized.Conv2dPackedParamsBase" } - ], - "outputs": [ - { "type": "int64" } - ] + "name": "quantized::conv_transpose2d_dynamic(Tensor qx, __torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weight, bool reduce_range=False) -> Tensor" }, { - "name": "quantized::conv_transpose2d_output_padding(__torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weights) -> int[]", - "inputs": [ - { "name": "packed_weights", "type": "__torch__.torch.classes.quantized.Conv2dPackedParamsBase" } - ], - "outputs": [ - { "type": "int64[]" } - ] + "name": "quantized::conv_transpose2d_groups(__torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weights) -> int" }, { - "name": "quantized::conv_transpose2d_padding(__torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weights) -> int[]", - "inputs": [ - { "name": "packed_weights", "type": "__torch__.torch.classes.quantized.Conv2dPackedParamsBase" } - ], - "outputs": [ - { "type": "int64[]" } - ] + "name": "quantized::conv_transpose2d_output_padding(__torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weights) -> int[]" }, { - "name": "quantized::conv_transpose2d_prepack(Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] output_padding, int[] dilation, int groups) -> __torch__.torch.classes.quantized.Conv2dPackedParamsBase", - "inputs": [ - { "name": "weight", "type": "Tensor" }, - { "name": "bias", "type": "Tensor?" }, - { "name": "stride", "type": "int64[]" }, - { "name": "padding", "type": "int64[]" }, - { "name": "output_padding", "type": "int64[]" }, - { "name": "dilation", "type": "int64[]" }, - { "name": "groups", "type": "int64" } - ], - "outputs": [ - { "type": "__torch__.torch.classes.quantized.Conv2dPackedParamsBase" } - ] + "name": "quantized::conv_transpose2d_padding(__torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weights) -> int[]" }, { - "name": "quantized::conv_transpose2d_stride(__torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weights) -> int[]", - "inputs": [ - { "name": "packed_weights", "type": "__torch__.torch.classes.quantized.Conv2dPackedParamsBase" } - ], - "outputs": [ - { "type": "int64[]" } - ] + "name": "quantized::conv_transpose2d_prepack(Tensor weight, Tensor? bias, int[] stride, int[] padding, int[] output_padding, int[] dilation, int groups) -> __torch__.torch.classes.quantized.Conv2dPackedParamsBase" }, { - "name": "quantized::conv_transpose2d_transpose(__torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weights) -> int", - "inputs": [ - { "name": "packed_weights", "type": "__torch__.torch.classes.quantized.Conv2dPackedParamsBase" } - ], - "outputs": [ - { "type": "int64" } - ] + "name": "quantized::conv_transpose2d_stride(__torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weights) -> int[]" }, { - "name": "quantized::conv_transpose2d_unpack(__torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weights) -> (Tensor unpacked_weights, Tensor? B_origin)", - "inputs": [ - { "name": "packed_weights", "type": "__torch__.torch.classes.quantized.Conv2dPackedParamsBase" } - ], - "outputs": [ - { "name": "unpacked_weights", "type": "Tensor" }, - { "name": "B_origin", "type": "Tensor?" } - ] + "name": "quantized::conv_transpose2d_transpose(__torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weights) -> int" }, { - "name": "quantized::embedding_bag_4bit_rowwise_offsets(Tensor weight, Tensor indices, Tensor? offsets=None, bool scale_grad_by_freq=False, int mode=0, bool pruned_weights=False, Tensor? per_sample_weights=None, Tensor? compressed_indices_mapping=None, bool include_last_offset=False) -> Tensor", - "inputs": [ - { "name": "weight", "type": "Tensor" }, - { "name": "indices", "type": "Tensor" }, - { "name": "offsets", "type": "Tensor?", "default": null }, - { "name": "scale_grad_by_freq", "type": "boolean", "default": false }, - { "name": "mode", "type": "int64", "default": 0 }, - { "name": "pruned_weights", "type": "boolean", "default": false }, - { "name": "per_sample_weights", "type": "Tensor?", "default": null }, - { "name": "compressed_indices_mapping", "type": "Tensor?", "default": null }, - { "name": "include_last_offset", "type": "boolean", "default": false } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "quantized::conv_transpose2d_unpack(__torch__.torch.classes.quantized.Conv2dPackedParamsBase packed_weights) -> (Tensor unpacked_weights, Tensor? B_origin)" }, { - "name": "quantized::embedding_bag_byte_rowwise_offsets(Tensor weight, Tensor indices, Tensor? offsets=None, bool scale_grad_by_freq=False, int mode=0, bool pruned_weights=False, Tensor? per_sample_weights=None, Tensor? compressed_indices_mapping=None, bool include_last_offset=False) -> Tensor", - "inputs": [ - { "name": "weight", "type": "Tensor" }, - { "name": "indices", "type": "Tensor" }, - { "name": "offsets", "type": "Tensor?", "default": null }, - { "name": "scale_grad_by_freq", "type": "boolean", "default": false }, - { "name": "mode", "type": "int64", "default": 0 }, - { "name": "pruned_weights", "type": "boolean", "default": false }, - { "name": "per_sample_weights", "type": "Tensor?", "default": null }, - { "name": "compressed_indices_mapping", "type": "Tensor?", "default": null }, - { "name": "include_last_offset", "type": "boolean", "default": false } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "quantized::embedding_bag_4bit_rowwise_offsets(Tensor weight, Tensor indices, Tensor? offsets=None, bool scale_grad_by_freq=False, int mode=0, bool pruned_weights=False, Tensor? per_sample_weights=None, Tensor? compressed_indices_mapping=None, bool include_last_offset=False) -> Tensor" }, { - "name": "quantized::embedding_byte(__torch__.torch.classes.quantized.EmbeddingPackedParamsBase weight, Tensor indices, bool pruned_weights=False) -> Tensor", - "inputs": [ - { "name": "weight", "type": "__torch__.torch.classes.quantized.EmbeddingPackedParamsBase" }, - { "name": "indices", "type": "Tensor" }, - { "name": "pruned_weights", "type": "boolean", "default": false } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "quantized::embedding_bag_byte_rowwise_offsets(Tensor weight, Tensor indices, Tensor? offsets=None, bool scale_grad_by_freq=False, int mode=0, bool pruned_weights=False, Tensor? per_sample_weights=None, Tensor? compressed_indices_mapping=None, bool include_last_offset=False) -> Tensor" + }, + { + "name": "quantized::embedding_byte(__torch__.torch.classes.quantized.EmbeddingPackedParamsBase weight, Tensor indices, bool pruned_weights=False) -> Tensor" }, { "name": "quantized::hardswish(Tensor input, float output_scale, int output_zero_point) -> Tensor", - "category": "Activation", - "inputs": [ - { "name": "input", "type": "Tensor" }, - { "name": "output_scale", "type": "float32" }, - { "name": "output_zero_point", "type": "int64" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "category": "Activation" }, { - "name": "quantized::instance_norm(Tensor input, Tensor? weight, Tensor? bias, float eps, float output_scale, int output_zero_point) -> Tensor", - "inputs": [ - { "name": "input", "type": "Tensor" }, - { "name": "weight", "type": "Tensor?" }, - { "name": "bias", "type": "Tensor?" }, - { "name": "eps", "type": "float32" }, - { "name": "output_scale", "type": "float32" }, - { "name": "output_zero_point", "type": "int64" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "quantized::instance_norm(Tensor input, Tensor? weight, Tensor? bias, float eps, float output_scale, int output_zero_point) -> Tensor" }, { "name": "quantized::layer_norm(Tensor input, int[] normalized_shape, Tensor? weight, Tensor? bias, float eps, float output_scale, int output_zero_point) -> Tensor", - "category": "Normalization", - "inputs": [ - { "name": "input", "type": "Tensor" }, - { "name": "normalized_shape", "type": "int64[]" }, - { "name": "weight", "type": "Tensor?" }, - { "name": "bias", "type": "Tensor?" }, - { "name": "eps", "type": "float32" }, - { "name": "output_scale", "type": "float32" }, - { "name": "output_zero_point", "type": "int64" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "category": "Normalization" }, { "name": "quantized::leaky_relu(Tensor qx, Scalar negative_slope, bool inplace, float output_scale, int output_zero_point) -> Tensor", - "category": "Activation", - "inputs": [ - { "name": "qx", "type": "Tensor" }, - { "name": "negative_slope", "type": "Scalar", "default": false }, - { "name": "inplace", "type": "boolean" }, - { "name": "output_scale", "type": "float32" }, - { "name": "output_zero_point", "type": "int64" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "category": "Activation" }, { "name": "quantized::linear(Tensor X, __torch__.torch.classes.quantized.LinearPackedParamsBase W_prepack, float Y_scale_i, int Y_zero_point_i) -> Tensor Y", - "category": "Layer", - "inputs": [ - { "name": "X", "type": "Tensor" }, - { "name": "W_prepack", "type": "__torch__.torch.classes.quantized.LinearPackedParamsBase" }, - { "name": "Y_scale_i", "type": "float32" }, - { "name": "Y_zero_point_i", "type": "int64" } - ], - "outputs": [ - { "name": "Y", "type": "Tensor" } - ] + "category": "Layer" }, { "name": "quantized::linear_dynamic(Tensor X, __torch__.torch.classes.quantized.LinearPackedParamsBase W_prepack, bool reduce_range=False) -> Tensor Y", - "category": "Layer", - "inputs": [ - { "name": "X", "type": "Tensor" }, - { "name": "W_prepack", "type": "__torch__.torch.classes.quantized.LinearPackedParamsBase" }, - { "name": "reduce_range", "type": "boolean", "default": false } - ], - "outputs": [ - { "name": "Y", "type": "Tensor" } - ] + "category": "Layer" }, { - "name": "quantized::linear_prepack_fp16(Tensor W, Tensor? B=None) -> __torch__.torch.classes.quantized.LinearPackedParamsBase W_prepack", - "inputs": [ - { "name": "W", "type": "Tensor" }, - { "name": "B", "type": "Tensor?", "default": null } - ], - "outputs": [ - { "name": "W_prepack", "type": "__torch__.torch.classes.quantized.LinearPackedParamsBase" } - ] + "name": "quantized::linear_prepack_fp16(Tensor W, Tensor? B=None) -> __torch__.torch.classes.quantized.LinearPackedParamsBase W_prepack" }, { - "name": "quantized::linear_prepack_fp16_legacy(Tensor W, Tensor? B=None) -> Tensor W_prepack", - "inputs": [ - { "name": "W", "type": "Tensor" }, - { "name": "B", "type": "Tensor?", "default": null } - ], - "outputs": [ - { "name": "W_prepack", "type": "Tensor" } - ] + "name": "quantized::linear_prepack_fp16_legacy(Tensor W, Tensor? B=None) -> Tensor W_prepack" }, { "name": "quantized::linear_relu(Tensor X, __torch__.torch.classes.quantized.LinearPackedParamsBase W_prepack, float Y_scale_i, int Y_zero_point_i) -> Tensor Y", - "category": "Layer", - "inputs": [ - { "name": "X", "type": "Tensor" }, - { "name": "W_prepack", "type": "__torch__.torch.classes.quantized.LinearPackedParamsBase" }, - { "name": "Y_scale_i", "type": "float32" }, - { "name": "Y_zero_point_i", "type": "int64" } - ], - "outputs": [ - { "name": "Y", "type": "Tensor" } - ] + "category": "Layer" }, { "name": "quantized::linear_relu_dynamic(Tensor X, __torch__.torch.classes.quantized.LinearPackedParamsBase W_prepack, bool reduce_range=False) -> Tensor Y", - "category": "Layer", - "inputs": [ - { "name": "X", "type": "Tensor" }, - { "name": "W_prepack", "type": "__torch__.torch.classes.quantized.LinearPackedParamsBase" }, - { "name": "reduce_range", "type": "boolean", "default": false } - ], - "outputs": [ - { "name": "Y", "type": "Tensor" } - ] + "category": "Layer" }, { - "name": "quantized::make_quantized_cell_params(Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh) -> __torch__.torch.classes.rnn.CellParamsBase", - "inputs": [ - { "name": "w_ih", "type": "Tensor" }, - { "name": "w_hh", "type": "Tensor" }, - { "name": "b_ih", "type": "Tensor" }, - { "name": "b_hh", "type": "Tensor" } - ], - "outputs": [ - { "type": "__torch__.torch.classes.rnn.CellParamsBase" } - ] + "name": "quantized::make_quantized_cell_params(Tensor w_ih, Tensor w_hh, Tensor b_ih, Tensor b_hh) -> __torch__.torch.classes.rnn.CellParamsBase" }, { - "name": "quantized::make_quantized_cell_params_dynamic(__torch__.torch.classes.quantized.LinearPackedParamsBase w_ih, __torch__.torch.classes.quantized.LinearPackedParamsBase w_hh, Tensor bias_ih, Tensor bias_hh, bool reduce_range=False) -> __torch__.torch.classes.rnn.CellParamsBase", - "inputs": [ - { "name": "w_ih", "type": "__torch__.torch.classes.quantized.LinearPackedParamsBase" }, - { "name": "w_hh", "type": "__torch__.torch.classes.quantized.LinearPackedParamsBase" }, - { "name": "bias_ih", "type": "Tensor" }, - { "name": "bias_hh", "type": "Tensor" }, - { "name": "reduce_range", "type": "boolean", "default": false } - ], - "outputs": [ - { "type": "__torch__.torch.classes.rnn.CellParamsBase" } - ] + "name": "quantized::make_quantized_cell_params_dynamic(__torch__.torch.classes.quantized.LinearPackedParamsBase w_ih, __torch__.torch.classes.quantized.LinearPackedParamsBase w_hh, Tensor bias_ih, Tensor bias_hh, bool reduce_range=False) -> __torch__.torch.classes.rnn.CellParamsBase" }, { - "name": "quantized::make_quantized_cell_params_fp16(__torch__.torch.classes.quantized.LinearPackedParamsBase w_ih, __torch__.torch.classes.quantized.LinearPackedParamsBase w_hh) -> __torch__.torch.classes.rnn.CellParamsBase", - "inputs": [ - { "name": "w_ih", "type": "__torch__.torch.classes.quantized.LinearPackedParamsBase" }, - { "name": "w_hh", "type": "__torch__.torch.classes.quantized.LinearPackedParamsBase" } - ], - "outputs": [ - { "type": "__torch__.torch.classes.rnn.CellParamsBase" } - ] + "name": "quantized::make_quantized_cell_params_fp16(__torch__.torch.classes.quantized.LinearPackedParamsBase w_ih, __torch__.torch.classes.quantized.LinearPackedParamsBase w_hh) -> __torch__.torch.classes.rnn.CellParamsBase" }, { - "name": "quantized::mul(Tensor qa, Tensor qb, float scale, int zero_point)-> Tensor qc", - "inputs": [ - { "name": "qa", "type": "Tensor" }, - { "name": "qb", "type": "Tensor" }, - { "name": "scale", "type": "float32" }, - { "name": "zero_point", "type": "int64" } - ], - "outputs": [ - { "name": "qc", "type": "Tensor" } - ] + "name": "quantized::mul(Tensor qa, Tensor qb, float scale, int zero_point) -> Tensor qc" }, { - "name": "quantized::mul.Scalar(Tensor qa, Scalar b)-> Tensor qc", - "inputs": [ - { "name": "qa", "type": "Tensor" }, - { "name": "b", "type": "Scalar" } - ], - "outputs": [ - { "name": "qc", "type": "Tensor" } - ] + "name": "quantized::mul.Scalar(Tensor qa, Scalar b) -> Tensor qc" }, { - "name": "quantized::mul.Scalar2(Scalar b, Tensor qa)-> Tensor qc", - "inputs": [ - { "name": "b", "type": "Scalar" }, - { "name": "qa", "type": "Tensor" } - ], - "outputs": [ - { "name": "qc", "type": "Tensor" } - ] + "name": "quantized::mul.Scalar2(Scalar b, Tensor qa) -> Tensor qc" }, { - "name": "quantized::mul.Scalar_out(Tensor qa, Scalar b, Tensor(a!) out)-> Tensor(a!) out", - "inputs": [ - { "name": "qa", "type": "Tensor" }, - { "name": "b", "type": "Scalar" }, - { "name": "out", "type": "Tensor" } - ], - "outputs": [ - { "name": "out", "type": "Tensor" } - ] + "name": "quantized::mul.Scalar_out(Tensor qa, Scalar b, Tensor(a!) out) -> Tensor(a!) out" }, { - "name": "quantized::mul.out(Tensor qa, Tensor qb, Tensor(a!) out)-> Tensor(a!) out", - "inputs": [ - { "name": "qa", "type": "Tensor" }, - { "name": "qb", "type": "Tensor" }, - { "name": "out", "type": "Tensor" } - ], - "outputs": [ - { "name": "out", "type": "Tensor" } - ] + "name": "quantized::mul.out(Tensor qa, Tensor qb, Tensor(a!) out) -> Tensor(a!) out" }, { - "name": "quantized::mul_out(Tensor qa, Tensor qb, Tensor(a!) out)-> Tensor(a!) out", - "inputs": [ - { "name": "qa", "type": "Tensor" }, - { "name": "qb", "type": "Tensor" }, - { "name": "out", "type": "Tensor" } - ], - "outputs": [ - { "name": "out", "type": "Tensor" } - ] + "name": "quantized::mul_out(Tensor qa, Tensor qb, Tensor(a!) out) -> Tensor(a!) out" }, { - "name": "quantized::mul_relu(Tensor qa, Tensor qb, float scale, int zero_point)-> Tensor qc", - "inputs": [ - { "name": "qa", "type": "Tensor" }, - { "name": "qb", "type": "Tensor" }, - { "name": "scale", "type": "float32" }, - { "name": "zero_point", "type": "int64" } - ], - "outputs": [ - { "name": "qc", "type": "Tensor" } - ] + "name": "quantized::mul_relu(Tensor qa, Tensor qb, float scale, int zero_point) -> Tensor qc" }, { - "name": "quantized::mul_relu.Scalar(Tensor qa, Scalar b)-> Tensor qc", - "inputs": [ - { "name": "qa", "type": "Tensor" }, - { "name": "b", "type": "Scalar" } - ], - "outputs": [ - { "name": "qc", "type": "Tensor" } - ] + "name": "quantized::mul_relu.Scalar(Tensor qa, Scalar b) -> Tensor qc" }, { - "name": "quantized::mul_relu.Scalar2(Scalar b, Tensor qa)-> Tensor qc", - "inputs": [ - { "name": "b", "type": "Scalar" }, - { "name": "qa", "type": "Tensor" } - ], - "outputs": [ - { "name": "qc", "type": "Tensor" } - ] + "name": "quantized::mul_relu.Scalar2(Scalar b, Tensor qa) -> Tensor qc" }, { - "name": "quantized::mul_relu.Scalar_out(Tensor qa, Scalar b, Tensor(a!) out)-> Tensor(a!) out", - "inputs": [ - { "name": "qa", "type": "Tensor" }, - { "name": "b", "type": "Scalar" }, - { "name": "out", "type": "Tensor" } - ], - "outputs": [ - { "name": "out", "type": "Tensor" } - ] + "name": "quantized::mul_relu.Scalar_out(Tensor qa, Scalar b, Tensor(a!) out) -> Tensor(a!) out" }, { - "name": "quantized::mul_relu.out(Tensor qa, Tensor qb, Tensor(a!) out)-> Tensor(a!) out", - "inputs": [ - { "name": "qa", "type": "Tensor" }, - { "name": "qb", "type": "Tensor" }, - { "name": "out", "type": "Tensor" } - ], - "outputs": [ - { "name": "out", "type": "Tensor" } - ] + "name": "quantized::mul_relu.out(Tensor qa, Tensor qb, Tensor(a!) out) -> Tensor(a!) out" }, { - "name": "quantized::mul_relu_out(Tensor qa, Tensor qb, Tensor(a!) out)-> Tensor(a!) out", - "inputs": [ - { "name": "qa", "type": "Tensor" }, - { "name": "qb", "type": "Tensor" }, - { "name": "out", "type": "Tensor" } - ], - "outputs": [ - { "name": "out", "type": "Tensor" } - ] + "name": "quantized::mul_relu_out(Tensor qa, Tensor qb, Tensor(a!) out) -> Tensor(a!) out" }, { - "name": "quantized::mul_scalar(Tensor qa, Scalar b)-> Tensor qc", - "inputs": [ - { "name": "qa", "type": "Tensor" }, - { "name": "b", "type": "Scalar" } - ], - "outputs": [ - { "name": "qc", "type": "Tensor" } - ] + "name": "quantized::mul_scalar(Tensor qa, Scalar b) -> Tensor qc" }, { - "name": "quantized::mul_scalar.Tensor(Tensor qa, Tensor b)-> Tensor qc", - "inputs": [ - { "name": "qa", "type": "Tensor" }, - { "name": "b", "type": "Tensor" } - ], - "outputs": [ - { "name": "qc", "type": "Tensor" } - ] + "name": "quantized::mul_scalar.Tensor(Tensor qa, Tensor b) -> Tensor qc" }, { - "name": "quantized::mul_scalar_out(Tensor qa, Scalar b, Tensor(a!) out)-> Tensor(a!) out", - "inputs": [ - { "name": "qa", "type": "Tensor" }, - { "name": "b", "type": "Scalar" }, - { "name": "out", "type": "Tensor" } - ], - "outputs": [ - { "name": "out", "type": "Tensor" } - ] + "name": "quantized::mul_scalar_out(Tensor qa, Scalar b, Tensor(a!) out) -> Tensor(a!) out" }, { - "name": "quantized::mul_scalar_out.Tensor(Tensor qa, Tensor b, Tensor(a!) out)-> Tensor(a!) out", - "inputs": [ - { "name": "qa", "type": "Tensor" }, - { "name": "b", "type": "Tensor" }, - { "name": "out", "type": "Tensor" } - ], - "outputs": [ - { "name": "out", "type": "Tensor" } - ] + "name": "quantized::mul_scalar_out.Tensor(Tensor qa, Tensor b, Tensor(a!) out) -> Tensor(a!) out" }, { - "name": "quantized::mul_scalar_relu(Tensor qa, Scalar b)-> Tensor qc", - "inputs": [ - { "name": "qa", "type": "Tensor" }, - { "name": "b", "type": "Scalar" } - ], - "outputs": [ - { "name": "qc", "type": "Tensor" } - ] + "name": "quantized::mul_scalar_relu(Tensor qa, Scalar b) -> Tensor qc" }, { - "name": "quantized::mul_scalar_relu.Tensor(Tensor qa, Tensor b)-> Tensor qc", - "inputs": [ - { "name": "qa", "type": "Tensor" }, - { "name": "b", "type": "Tensor" } - ], - "outputs": [ - { "name": "qc", "type": "Tensor" } - ] + "name": "quantized::mul_scalar_relu.Tensor(Tensor qa, Tensor b) -> Tensor qc" }, { - "name": "quantized::mul_scalar_relu_out(Tensor qa, Scalar b, Tensor(a!) out)-> Tensor(a!) out", - "inputs": [ - { "name": "qa", "type": "Tensor" }, - { "name": "b", "type": "Scalar" }, - { "name": "out", "type": "Tensor" } - ], - "outputs": [ - { "name": "out", "type": "Tensor" } - ] + "name": "quantized::mul_scalar_relu_out(Tensor qa, Scalar b, Tensor(a!) out) -> Tensor(a!) out" }, { - "name": "quantized::mul_scalar_relu_out.Tensor(Tensor qa, Tensor b, Tensor(a!) out)-> Tensor(a!) out", - "inputs": [ - { "name": "qa", "type": "Tensor" }, - { "name": "b", "type": "Tensor" }, - { "name": "out", "type": "Tensor" } - ], - "outputs": [ - { "name": "out", "type": "Tensor" } - ] + "name": "quantized::mul_scalar_relu_out.Tensor(Tensor qa, Tensor b, Tensor(a!) out) -> Tensor(a!) out" }, { - "name": "quantized::prelu(Tensor qx, Tensor weight, float output_scale, int output_zero_point) -> Tensor", - "inputs": [ - { "name": "qx", "type": "Tensor" }, - { "name": "weight", "type": "Tensor" }, - { "name": "output_scale", "type": "float32" }, - { "name": "output_zero_point", "type": "int64" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "quantized::prelu(Tensor qx, Tensor weight, float output_scale, int output_zero_point) -> Tensor" }, { - "name": "quantized::quantized_gru_cell_dynamic(Tensor input, Tensor hx, __torch__.torch.classes.quantized.LinearPackedParamsBase w_ih, __torch__.torch.classes.quantized.LinearPackedParamsBase w_hh, Tensor b_ih, Tensor b_hh) -> Tensor", - "inputs": [ - { "name": "input", "type": "Tensor" }, - { "name": "hx", "type": "Tensor" }, - { "name": "w_ih", "type": "__torch__.torch.classes.quantized.LinearPackedParamsBase" }, - { "name": "w_hh", "type": "__torch__.torch.classes.quantized.LinearPackedParamsBase" }, - { "name": "b_ih", "type": "Tensor" }, - { "name": "b_hh", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "quantized::quantized_gru_cell_dynamic(Tensor input, Tensor hx, __torch__.torch.classes.quantized.LinearPackedParamsBase w_ih, __torch__.torch.classes.quantized.LinearPackedParamsBase w_hh, Tensor b_ih, Tensor b_hh) -> Tensor" }, { - "name": "quantized::quantized_lstm_cell_dynamic(Tensor input, Tensor[] hx, __torch__.torch.classes.quantized.LinearPackedParamsBase w_ih, __torch__.torch.classes.quantized.LinearPackedParamsBase w_hh, Tensor bias_ih, Tensor bias_hh) -> (Tensor, Tensor)", - "inputs": [ - { "name": "input", "type": "Tensor" }, - { "name": "hx", "type": "Tensor[]" }, - { "name": "w_ih", "type": "__torch__.torch.classes.quantized.LinearPackedParamsBase" }, - { "name": "w_hh", "type": "__torch__.torch.classes.quantized.LinearPackedParamsBase" }, - { "name": "bias_ih", "type": "Tensor" }, - { "name": "bias_hh", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" }, - { "type": "Tensor" } - ] + "name": "quantized::quantized_lstm_cell_dynamic(Tensor input, Tensor[] hx, __torch__.torch.classes.quantized.LinearPackedParamsBase w_ih, __torch__.torch.classes.quantized.LinearPackedParamsBase w_hh, Tensor bias_ih, Tensor bias_hh) -> (Tensor, Tensor)" }, { - "name": "quantized::quantized_rnn_relu_cell_dynamic(Tensor input, Tensor hx, __torch__.torch.classes.quantized.LinearPackedParamsBase w_ih, __torch__.torch.classes.quantized.LinearPackedParamsBase w_hh, Tensor b_ih, Tensor b_hh) -> Tensor", - "inputs": [ - { "name": "input", "type": "Tensor" }, - { "name": "hx", "type": "Tensor" }, - { "name": "w_ih", "type": "__torch__.torch.classes.quantized.LinearPackedParamsBase" }, - { "name": "w_hh", "type": "__torch__.torch.classes.quantized.LinearPackedParamsBase" }, - { "name": "b_ih", "type": "Tensor" }, - { "name": "b_hh", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "quantized::quantized_rnn_relu_cell_dynamic(Tensor input, Tensor hx, __torch__.torch.classes.quantized.LinearPackedParamsBase w_ih, __torch__.torch.classes.quantized.LinearPackedParamsBase w_hh, Tensor b_ih, Tensor b_hh) -> Tensor" }, { - "name": "quantized::quantized_rnn_tanh_cell_dynamic(Tensor input, Tensor hx, __torch__.torch.classes.quantized.LinearPackedParamsBase w_ih, __torch__.torch.classes.quantized.LinearPackedParamsBase w_hh, Tensor b_ih, Tensor b_hh) -> Tensor", - "inputs": [ - { "name": "input", "type": "Tensor" }, - { "name": "hx", "type": "Tensor" }, - { "name": "w_ih", "type": "__torch__.torch.classes.quantized.LinearPackedParamsBase" }, - { "name": "w_hh", "type": "__torch__.torch.classes.quantized.LinearPackedParamsBase" }, - { "name": "b_ih", "type": "Tensor" }, - { "name": "b_hh", "type": "Tensor" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "quantized::quantized_rnn_tanh_cell_dynamic(Tensor input, Tensor hx, __torch__.torch.classes.quantized.LinearPackedParamsBase w_ih, __torch__.torch.classes.quantized.LinearPackedParamsBase w_hh, Tensor b_ih, Tensor b_hh) -> Tensor" }, { "name": "quantized::relu6(Tensor qx, bool inplace=False) -> Tensor", - "category": "Activation", - "inputs": [ - { "name": "qx", "type": "Tensor" }, - { "name": "inplace", "type": "boolean", "default": false } - ], - "outputs": [ - { "type": "Tensor" } - ] + "category": "Activation" }, { "name": "quantized::sigmoid(Tensor qx, float output_scale, int output_zero_point) -> Tensor", - "category": "Activation", - "inputs": [ - { "name": "qx", "type": "Tensor" }, - { "name": "output_scale", "type": "float32" }, - { "name": "output_zero_point", "type": "int64" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "category": "Activation" }, { - "name": "quantized::softmax(Tensor qx, int dim, float output_scale, int output_zero_point) -> Tensor", - "inputs": [ - { "name": "qx", "type": "Tensor" }, - { "name": "dim", "type": "int64" }, - { "name": "output_scale", "type": "float32" }, - { "name": "output_zero_point", "type": "int64" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "quantized::softmax(Tensor qx, int dim, float output_scale, int output_zero_point) -> Tensor" }, { "name": "torch.nn.modules.activation.ELU", @@ -20411,42 +6846,12 @@ "category": "Data" }, { - "name": "torchaudio::sox_effects_apply_effects_tensor(Tensor tensor, int sample_rate, str[][] effects, bool channels_first=True) -> (Tensor, int)", - "inputs": [ - { "name": "tensor", "type": "Tensor" }, - { "name": "sample_rate", "type": "int64" }, - { "name": "effects", "type": "string[][]" }, - { "name": "channels_first", "type": "boolean", "default": true } - ], - "outputs": [ - { "type": "Tensor" }, - { "type": "int64" } - ] + "name": "torchaudio::sox_effects_apply_effects_tensor(Tensor tensor, int sample_rate, str[][] effects, bool channels_first=True) -> (Tensor, int)" }, { - "name": "torchvision::nms(Tensor dets, Tensor scores, float iou_threshold) -> Tensor", - "inputs": [ - { "name": "dets", "type": "Tensor" }, - { "name": "scores", "type": "Tensor" }, - { "name": "iou_threshold", "type": "float32" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "torchvision::nms(Tensor dets, Tensor scores, float iou_threshold) -> Tensor" }, { - "name": "torchvision::roi_align(Tensor input, Tensor rois, float spatial_scale, int pooled_height, int pooled_width, int sampling_ratio, bool aligned) -> Tensor", - "inputs": [ - { "name": "input", "type": "Tensor" }, - { "name": "rois", "type": "Tensor" }, - { "name": "spatial_scale", "type": "float32" }, - { "name": "pooled_height", "type": "int64" }, - { "name": "pooled_width", "type": "int64" }, - { "name": "sampling_ratio", "type": "int64" }, - { "name": "aligned", "type": "boolean" } - ], - "outputs": [ - { "type": "Tensor" } - ] + "name": "torchvision::roi_align(Tensor input, Tensor rois, float spatial_scale, int pooled_height, int pooled_width, int sampling_ratio, bool aligned) -> Tensor" } ] \ No newline at end of file diff --git a/source/pytorch.js b/source/pytorch.js index 28baf928a5..c1175c0a38 100644 --- a/source/pytorch.js +++ b/source/pytorch.js @@ -156,7 +156,7 @@ pytorch.Graph = class { if (node.kind() === 'prim::ListConstruct') { if (node.outputs().length === 1 && node.outputs().every((output) => output.uses().length === 1) && - node.inputs().every((input) => pytorch.Utility.isTensor(input.value))) { + node.inputs().every((input) => pytorch.Utility.isTensor(input.value) || pytorch.Utility.isInstance(input, 'torch.Value'))) { continue; } if (node.inputs().length === 0 && @@ -476,10 +476,11 @@ pytorch.Node = class { if (Array.isArray(input.value) && input.value.every((value) => pytorch.Utility.isTensor(value))) { continue; } - argument = new pytorch.Argument(name, input.value, 'attribute'); + const type = input.type() ? pytorch.Utility.toType(input.type()) : null; + argument = new pytorch.Argument(name, input.value, type || 'attribute'); } else if (pytorch.Utility.isInstance(input.type(), 'torch.ListType')) { if (input.node() && input.node().kind() === 'prim::ListConstruct' && input.uses().length === 1 && - input.node().inputs().every((value) => pytorch.Utility.isInstance(value.type(), 'torch.IntType') || pytorch.Utility.isInstance(value.type(), 'torch.FloatType') || pytorch.Utility.isInstance(value.type(), 'torch.StringType') || pytorch.Utility.isInstance(value.type(), 'torch.ComplexType') || pytorch.Utility.isInstance(value.type(), 'torch.TensorType'))) { + input.node().inputs().every((value) => pytorch.Utility.isInstance(value, 'torch.Value') || pytorch.Utility.isInstance(value.type(), 'torch.IntType') || pytorch.Utility.isInstance(value.type(), 'torch.FloatType') || pytorch.Utility.isInstance(value.type(), 'torch.StringType') || pytorch.Utility.isInstance(value.type(), 'torch.ComplexType') || pytorch.Utility.isInstance(value.type(), 'torch.TensorType'))) { const list = input.node().inputs(); const args = list.map((value) => { if (value.uses().length === 1 && value.node() === input.node() && value.value !== undefined) { @@ -505,7 +506,7 @@ pytorch.Node = class { } } else if (pytorch.Utility.isTensor(input.value) || input.value === undefined || input.value === null) { let list = [input]; - if (input.node() && + if (input.node() && node !== input.node() && input.node().kind() === 'prim::ListConstruct' && input.uses().length === 1 && input.node().inputs().every((input) => pytorch.Utility.isTensor(input.value))) { @@ -1732,6 +1733,8 @@ pytorch.Execution = class extends python.Execution { } else if (pytorch.Utility.isTensor(result)) { const value = execution.variable(result); execution.graph.return_node().addInput(value); + } else if (result instanceof torch.Value) { + execution.graph.return_node().addInput(result); } else if (Object(result) === result) { for (const key of Object.keys(result)) { const item = result[key]; @@ -2184,18 +2187,18 @@ pytorch.jit.Execution = class extends pytorch.Execution { return this._blocks; } addInput(value) { - const use = execution.invoke('torch.Use', [this]); + const use = new torch.Use(this); value.uses().push(use); this._inputs.push(value); return value; } addOutput() { - const value = execution.invoke('torch.Value', [this]); + const value = new torch.Value(this); this._outputs.push(value); return value; } addBlock() { - const block = execution.invoke('torch.Block', [this._graph, this]); + const block = new torch.Block(this._graph, this); this._blocks.push(block); return block; } @@ -2669,7 +2672,7 @@ pytorch.jit.Execution = class extends pytorch.Execution { node.addInput(value); output.setType(torch.ListType.get(torch.StringType.get())); } else if (pytorch.Utility.isTensor(item)) { - const value = this.variable(item, node); + const value = this.variable(item, null); node.addInput(value); output.setType(torch.ListType.get(torch.TensorType.get())); } else { @@ -2738,12 +2741,13 @@ pytorch.jit.Execution = class extends pytorch.Execution { throw new python.Error(`Unsupported dict item type '${pair.type}'.`); } const key = this.expression(pair.key, context); - const keyValue = this.variable(key, node); + const keyValue = this.variable(key, null); keyType = keyValue.type(); const value = this.expression(pair.value, context); - const valueValue = this.variable(value, node); + const valueValue = this.variable(value, null); valueType = valueValue.type(); - this.variable(value, node); + node.addInput(keyValue); + node.addInput(valueValue); } const output = node.addOutput(); if (keyType && valueType) { @@ -2896,78 +2900,61 @@ pytorch.jit.Execution = class extends pytorch.Execution { return super.call(target, name, args, context); } const [schema, evalArgs] = overload; - const copyArgs = Array.prototype.slice.call(args); - const copyEvalArgs = Array.prototype.slice.call(evalArgs); - const index = schema.name.indexOf('('); - const op = index === -1 ? schema.name : schema.name.substring(0, index); + const op = schema.overload_name ? `${schema.name}.${schema.overload_name}` : schema.name; const node = this._graph.create(op); const referencedParameters = []; - const parameters = Array.prototype.slice.call(schema.inputs || []).concat(Array.prototype.slice.call(schema.attributes || [])); - const types = new Map(); - while (copyEvalArgs.length > 0) { - if (parameters.length <= 0) { - if (schema.is_vararg || schema.name.startsWith('_caffe2::')) { + const parameters = schema.arguments; + const varTypes = new Map(); + varTypes.map = function(type) { + if (type.kind() === 'VarType') { + const key = type.annotation_str(); + if (!varTypes.has(key)) { + throw new pytorch.Error(`Unknown var type '${key}'.`); + } + return varTypes.get(key); + } + return type; + }; + let position = 0; + let index = 0; + while (position < evalArgs.length) { + if (index >= parameters.length) { + if (schema.name.startsWith('_caffe2::') || schema.is_vararg) { break; } throw new pytorch.Error('Invalid parameter length.'); } - if (copyArgs.every((arg) => arg.type === '=' && arg.target && arg.target.type === 'id') && - parameters.every((parameter) => parameter.type !== 'Tensor' && parameter.type !== 'Tensor[]')) { - const map = new Map(parameters.map((parameter) => [parameter.name, parameter])); - while (copyArgs.length > 0) { - const argument = copyArgs.shift(); - const arg = copyEvalArgs.shift(); - const parameter = map.get(argument.target.value); - if (!parameter) { - throw new pytorch.Error(); - } - let type = parameter.type; - let optional = false; - if (type.endsWith('?')) { - type = parameter.type.substring(0, parameter.type.length - 1); - optional = true; - } - if (!this.isType(arg, type)) { - if (optional) { - continue; - } - throw new pytorch.Error(); - } - const value = this.variable(arg); - value.value = arg; - node.addInput(value); - } - continue; + const arg = parameters[index]; + if (arg.kwarg_only) { + break; } - const parameter = parameters.shift(); - const [argument] = copyEvalArgs; + index++; + const v = evalArgs[position]; let match = false; let input = null; let optional = false; - let type = parameter.type; - if (type.endsWith('?')) { - type = parameter.type.slice(0, -1); + let type = arg.real_type; + if (type instanceof torch.OptionalType) { + type = type.getElementType(); optional = true; } if (optional === true && - (type === 'float32' || type === 'boolean' || type === 'int64' || type === 'complex') && - argument instanceof torch.Value && argument.type() instanceof torch.NoneType) { - copyArgs.shift(); - copyEvalArgs.shift(); - input = argument; + (type instanceof torch.FloatType || type instanceof torch.BoolType || type instanceof torch.IntType || type instanceof torch.ComplexType || type.kind() === 'ScalarTypeType' || type instanceof torch.DeviceObjType || type.kind() === 'LayoutKind') && + v instanceof torch.Value && v.type() instanceof torch.NoneType) { + position++; + input = v; match = true; - } else if (type === 'Tensor[]') { - const [argument] = copyEvalArgs; - if ((argument instanceof torch.Value && pytorch.Utility.toType(argument.type()) === 'Tensor[]') || - (Array.isArray(argument) && argument.every((item) => pytorch.Utility.isTensor(item) || item === null || (item instanceof torch.Value && item.type() instanceof torch.TensorType)))) { - copyArgs.shift(); - copyEvalArgs.shift(); - if (argument instanceof torch.Value) { - input = argument; + } else if (type instanceof torch.ListType && type.getElementType() instanceof torch.TensorType) { + const v = evalArgs[position]; + if ((v instanceof torch.Value && v.type() instanceof torch.ListType && v.type().getElementType() instanceof torch.TensorType) || + (Array.isArray(v) && v.every((item) => pytorch.Utility.isTensor(item) || item === null || (item instanceof torch.Value && item.type() instanceof torch.TensorType)))) { + position++; + if (v instanceof torch.Value) { + input = v; match = true; } else { const list = this._graph.create('prim::ListConstruct'); - for (const arg of argument) { + for (const arg of v) { const tensor = arg; if (tensor) { tensor.__count__ = (tensor.__count__ || 0) + 1; @@ -2987,281 +2974,108 @@ pytorch.jit.Execution = class extends pytorch.Execution { } throw new pytorch.Error(); } + } else if (!this.isNativeType(v, type, arg.N) && v !== null) { + if (optional) { + continue; + } + throw new pytorch.Error('Invalid argument type.'); + } else if (args[position].type === '=' && args[position].target.value !== arg.name) { + throw new pytorch.Error('Expected named argument.'); } else { - const [arg] = copyArgs; - if (!this.isType(argument, type) && argument !== null) { - if (optional) { - continue; - } - throw new pytorch.Error('Invalid argument type.'); - } else if (arg.type === '=') { - throw new pytorch.Error('Expected named argument.'); + position++; + if (v instanceof torch.Value) { + input = v; + match = true; } else { - copyArgs.shift(); - copyEvalArgs.shift(); - if (pytorch.Utility.isInstance(argument, 'torch.Value')) { - input = argument; - match = true; - } else { - const value = this.variable(argument); - value.value = argument; - input = value; - match = true; - } + const value = this.variable(v); + value.value = v; + input = value; + match = true; } } if (match) { node.addInput(input); - if (type === 't') { - const key = type; + if (type.kind() === 'VarType') { + const key = type.annotation_str(); if (input instanceof torch.Value && input.type()) { - types.set(key, input.type()); + varTypes.set(key, input.type()); } else if (input instanceof torch.Value && Number.isInteger(input.value)) { - types.set(key, torch.IntType.get()); + varTypes.set(key, torch.IntType.get()); } // throw new pytorch.Error("Unknown value type 't'."); } - if (type === 't[]') { - const key = type.replace('[]', ''); + if (type instanceof torch.ListType && type.getElementType().kind() === 'VarType') { + const key = type.getElementType().annotation_str(); if (input instanceof torch.Value && input.type() instanceof torch.OptionalType && input.type().getElementType() instanceof torch.ListType) { - types.set(key, input.type().getElementType().getElementType()); + varTypes.set(key, input.type().getElementType().getElementType()); } else if (input instanceof torch.Value && input.type() instanceof torch.ListType) { - types.set(key, input.type().getElementType()); + varTypes.set(key, input.type().getElementType()); } else if (Array.isArray(input) && input.length > 0 && input.every((item) => Number.isInteger(item))) { - types.set(key, torch.IntType.get()); + varTypes.set(key, torch.IntType.get()); } else if (input.value && Array.isArray(input.value) && input.value.length > 0 && input.value.every((item) => Number.isInteger(item) || isNaN(item))) { - types.set(key, torch.IntType.get()); + varTypes.set(key, torch.IntType.get()); } else if (input.value && Array.isArray(input.value) && input.value.length > 0 && input.value.every((item) => pytorch.Utility.isTensor(item))) { - types.set(key, torch.TensorType.get()); + varTypes.set(key, torch.TensorType.get()); } else { // throw new pytorch.Error("Unknown value type 't'."); continue; } } - const dict = type.match(/^Dict\((\w+),\s*(\w+)\)$/); - if (dict && (dict[2] === 't' || dict[2] === 'tVal')) { + if (type instanceof torch.DictType && type.getValueType().kind() === 'VarType') { + const key = type.getValueType().annotation_str(); if (input instanceof torch.Value && input.type() instanceof torch.DictType) { - types.set(dict[2], input.type().getValueType()); + varTypes.set(key, input.type().getValueType()); } else if (input.value && Object.values(input.value).every((item) => pytorch.Utility.isTensor(item))) { - types.set(dict[2], input.type().getValueType()); + varTypes.set(key, input.type().getValueType()); } else { throw new pytorch.Error("Unknown dict type 't[]'."); } } - const tuple = type.match(/^\((\w+),\s*(\w+)\)\[\]$/); - if (tuple && (tuple[2] === 'tVal')) { + if (type instanceof torch.ListType && type.getElementType() instanceof torch.TupleType && type.getElementType().elements().length === 2 && type.getElementType().elements()[1].kind() === 'VarType') { + const key = type.getElementType().elements()[1].annotation_str(); if (input instanceof torch.Value && input.type() instanceof torch.ListType && input.type().getElementType() instanceof torch.TupleType) { const elements = input.type().getElementType().elements(); if (elements.length === 2) { - types.set(tuple[2], elements[1]); + varTypes.set(key, elements[1]); } } } } } + if (args.every((arg, index) => index < position || (arg.type === '=' && arg.target && arg.target.type === 'id'))) { + const params = new Map(parameters.slice(index).map((a) => [a.name, a])); + while (position < args.length) { + const v = evalArgs[position]; + const arg = params.get(args[position].target.value); + position++; + if (!arg) { + throw new pytorch.Error(); + } + let type = arg.real_type; + let optional = false; + if (type instanceof torch.OptionalType) { + type = type.getElementType(); + optional = true; + } + if (!this.isNativeType(v, type)) { + if (optional) { + continue; + } + throw new pytorch.Error(); + } + const value = this.variable(v); + value.value = v; + node.addInput(value); + } + } const result = []; - for (let i = 0; i < schema.outputs.length; i++) { - const parameter = schema.outputs[i]; - const type = parameter.type; - switch (type) { + for (let i = 0; i < schema.returns.length; i++) { + const arg = schema.returns[i]; + const type = arg.real_type; + switch (type.str()) { case 'Tensor': { - const output = this.invoke('torch.Tensor', []); + const output = this.createTensorOutput(schema.name, evalArgs, i); output.__origin__ = schema.name; - if (i === 0) { - switch (schema.name) { - case 'aten::conv1d': - case 'aten::embedding': { - output.resize_([NaN, NaN, NaN]); - break; - } - case 'aten::cat': - case 'aten::conv2d': - case 'aten::dropout': - case 'aten::flatten': - case 'aten::flatten.named_out_dim': - case 'aten::max_pool2d': - case 'aten::adaptive_avg_pool2d': - case 'aten::avg_pool2d': - case 'aten::quantize_per_tensor': - case 'aten::relu_': - case 'aten::prelu': - case 'aten::hardtanh_': - case 'aten::upsample_bilinear2d': - case 'prepacked::conv2d_clamp_run': { - const [input] = evalArgs; - if (pytorch.Utility.isTensor(input) && input.size() === undefined) { - input.resize_([NaN, NaN, NaN, NaN]); - } - output.resize_([NaN, NaN, NaN, NaN]); - break; - } - case 'aten::slice': - case 'aten::slice.Tensor': { - const [input] = evalArgs; - if (pytorch.Utility.isTensor(input) && Array.isArray(input.size())) { - const size = input.size(); - output.resize_(size); - } - break; - } - case 'aten::to': - case 'aten::to.device': - case 'aten::to.dtype': - case 'aten::to.dtype_layout': { - const [input] = evalArgs; - if (pytorch.Utility.isTensor(input) && Array.isArray(input.size())) { - const size = input.size(); - output.resize_(size); - } - break; - } - case 'aten::conv3d': { - output.resize_([NaN, NaN, NaN, NaN, NaN]); - break; - } - case 'aten::roll': - case 'aten::detach': - case 'aten::mean': - case 'aten::mul': - case 'aten::mul.Scalar': - case 'aten::div': - case 'aten::div.Scalar': - case 'aten::batch_norm': - case 'aten::gelu': - case 'aten::relu': - case 'aten::clamp': - case 'aten::clamp_': - case 'aten::_add_relu_': - case 'aten::hardswish_': { - const [input] = evalArgs; - if (pytorch.Utility.isTensor(input) && Array.isArray(input.size())) { - output.resize_(input.size()); - } - break; - } - case 'aten::add': - case 'aten::add.Scalar': - case 'aten::sub': - case 'aten::sub.Scalar': { - const [input] = evalArgs; - if (pytorch.Utility.isTensor(input) && Array.isArray(input.size())) { - output.resize_(input.size()); - } else { - const [, other] = evalArgs; - if (pytorch.Utility.isTensor(other) && Array.isArray(other.size())) { - output.resize_(other.size()); - } - } - break; - } - case 'aten::select': - case 'aten::select.int': { - const [input] = evalArgs; - if (pytorch.Utility.isTensor(input) && Array.isArray(input.size())) { - output.resize_(Array(input.size().length - 1).fill(NaN)); - } - break; - } - case 'aten::layer_norm': { - const [input, normalized_shape] = evalArgs; - if (pytorch.Utility.isTensor(input) && Array.isArray(input.size())) { - const shape = input.size(); - if (Array.isArray(normalized_shape) && normalized_shape.length === 1) { - const [value] = normalized_shape; - shape[shape.length - 1] = value; - } - output.resize_(shape); - } - break; - } - case 'aten::empty': - case 'aten::ones': - case 'aten::zeros': - case 'aten::zeros_like': { - output.resize_(evalArgs[0]); - break; - } - case 'aten::view': - case 'aten::reshape': - case 'aten::new_full': { - output.resize_(evalArgs[1]); - break; - } - case 'aten::squeeze': - case 'aten::squeeze.dim': { - const [input] = evalArgs; - if (input instanceof torch.Value === false) { - const size = input.size(); - if (Array.isArray(size)) { - switch (evalArgs.length) { - case 1: { - output.resize_(size.filter((value) => value !== 1)); - break; - } - case 2: { - const [, dim] = evalArgs; - output.resize_(size.filter((value, index) => (value !== 1 && !isNaN(value)) || index !== dim)); - break; - } - default: { - break; - } - } - } - } - break; - } - case 'aten::unsqueeze': { - const [input, dim] = evalArgs; - if (pytorch.Utility.isTensor(input)) { - const size = input.size(); - if (Array.isArray(size) && dim !== undefined) { - const shape = size.slice(); - shape.splice(dim, 0, 1); - output.resize_(shape); - } else { - output.resize_([NaN, NaN, NaN, NaN]); - } - } - break; - } - case 'aten::transpose': - case 'aten::transpose.int': { - const [input, dim0, dim1] = evalArgs; - if (pytorch.Utility.isTensor(input) && Array.isArray(input.size())) { - const size = input.size().slice(); - const d0 = dim0 >= 0 ? dim0 : size.length + dim0; - const d1 = dim1 >= 0 ? dim1 : size.length + dim1; - const value = size[dim0]; - /* eslint-disable prefer-destructuring */ - size[d0] = size[1]; - /* eslint-enable prefer-destructuring */ - size[d1] = value; - output.resize_(size); - } - break; - } - case 'aten::contiguous': { - const [source] = evalArgs; - output.__source__ = source; - break; - } - case 'quantized::cat': - case 'quantized::cat_relu': - case 'quantized::linear': - case 'quantized::conv2d': - case 'quantized::conv2d.new': - case 'quantized::conv2d_relu': - case 'quantized::conv2d_relu.new': - case 'quantized::add': - case 'quantized::add_relu': - output.resize_([NaN, NaN, NaN, NaN]); - output.__quantized__ = true; - break; - default: - break; - } - } this.variable(output, node); result.push(output); break; @@ -3294,25 +3108,9 @@ pytorch.jit.Execution = class extends pytorch.Execution { default: break; } - const value = node.addOutput(); value.setType(torch.ListType.get(torch.TensorType.get())); result.push(value); - - /* - const value = node.addOutput(); - const list = this._graph.create('prim::ListUnpack'); - list.addInput(value); - - const tensors = []; - for (let i = 0; i < count; i ++) { - const tensor = this.invoke('torch.Tensor', []); - tensor.__origin__ = schema.name; - this.variable(tensor, list); - tensors.push(tensor); - } - result.push(tensors); - */ break; } case '__torch__.torch.classes.quantized.Conv2dPackedParamsBase': @@ -3322,40 +3120,12 @@ pytorch.jit.Execution = class extends pytorch.Execution { case '__torch__.torch.classes.xnnpack.Conv2dOpContext': case '__torch__.torch.classes.xnnpack.LinearOpContext': case '__torch__.torch.classes.xnnpack.TransposeConv2dOpContext': { - const value = this.invoke(parameter.type, []); + const value = this.invoke(type.qualified_name(), []); this.variable(value, node); result.push(value); break; } - case 'Scalar': { - const value = this.variable(null, node); - value.__origin__ = schema.name; - value.setType(torch.NumberType.get()); - result.push(value); - break; - } - case 'boolean': { - const value = this.variable(null, node); - value.__origin__ = schema.name; - value.setType(torch.BoolType.get()); - result.push(value); - break; - } - case 'boolean[]': { - const value = this.variable(null, node); - value.__origin__ = schema.name; - value.setType(torch.ListType.get(torch.BoolType.get())); - result.push(value); - break; - } - case 'string[]': { - const value = this.variable(null, node); - value.__origin__ = schema.name; - value.setType(torch.ListType.get(torch.StringType.get())); - result.push(value); - break; - } - case 'int64': { + case 'int': { const value = this.variable(null, node); value.__origin__ = schema.name; value.setType(torch.IntType.get()); @@ -3369,7 +3139,7 @@ pytorch.jit.Execution = class extends pytorch.Execution { result.push(value); break; } - case 'int64[]': { + case 'int[]': { const value = this.variable(null, node); value.__origin__ = schema.name; value.setType(torch.ListType.get(torch.IntType.get())); @@ -3380,31 +3150,18 @@ pytorch.jit.Execution = class extends pytorch.Execution { result.push(value); break; } - case 'float32': { - const value = this.variable(null, node); - value.__origin__ = schema.name; - value.setType(torch.FloatType.get()); - result.push(value); - break; - } - case 'float32[]': { - const value = this.variable(null, node); - value.__origin__ = schema.name; - value.setType(torch.ListType.get(torch.FloatType.get())); - result.push(value); - break; - } - case 'complex': { + case 'Scalar': + case 'Dict(str, Tensor)': + case 'str': + case 'str[]': + case 'float': + case 'float[]': + case 'complex': + case 'bool': + case 'bool[]': { const value = this.variable(null, node); value.__origin__ = schema.name; - value.setType(torch.ComplexType.get()); - result.push(value); - break; - } - case 'string': { - const value = this.variable(null, node); - value.__origin__ = schema.name; - value.setType(torch.StringType.get()); + value.setType(type); result.push(value); break; } @@ -3415,17 +3172,10 @@ pytorch.jit.Execution = class extends pytorch.Execution { result.push(value); break; } - case 'Dict(string, Tensor)': { - const value = this.variable(null, node); - value.__origin__ = schema.name; - value.setType(torch.DictType.get(torch.StringType.get(), torch.TensorType.get())); - result.push(value); - break; - } case 't': { const value = this.variable(null, node); value.__origin__ = schema.name; - const t = types.get('t'); + const t = varTypes.map(type); if (!t) { throw new pytorch.Error(`Unknown var type 't'.`); } @@ -3436,7 +3186,7 @@ pytorch.jit.Execution = class extends pytorch.Execution { case 't[]': { const value = this.variable(null, node); value.__origin__ = schema.name; - const t = types.get('t'); + const t = varTypes.map(type.getElementType()); if (!t) { throw new pytorch.Error(); } @@ -3445,22 +3195,20 @@ pytorch.jit.Execution = class extends pytorch.Execution { break; } default: { - const dict = parameter.type.match(/^Dict\((\w+),\s*(\w+)\)$/); - if (dict) { + if (type instanceof torch.DictType) { const value = this.variable(null, node); value.__origin__ = schema.name; - const keyType = this.toType(dict[1], types); - const valueType = this.toType(dict[2], types); + const keyType = varTypes.map(type.getKeyType()); + const valueType = varTypes.map(type.getValueType()); value.setType(torch.DictType.get(keyType, valueType)); result.push(value); break; } - const tuple = type.match(/^\((\w+),\s*(\w+)\)\[\]$/); - if (tuple && (tuple[2] === 't' || tuple[2] === 'tVal')) { + if (type instanceof torch.TupleType && type.elements().length === 2) { const value = this.variable(null, node); value.__origin__ = schema.name; - const keyType = this.toType(tuple[1], types); - const valueType = this.toType(tuple[2], types); + const keyType = varTypes.map(type.elements()[0]); + const valueType = varTypes.map(type.elements()[1]); value.setType(torch.ListType.get(torch.TupleType.get([keyType, valueType]))); result.push(value); break; @@ -3483,7 +3231,208 @@ pytorch.jit.Execution = class extends pytorch.Execution { return result[0]; } - isNativeType(obj, type) { + createTensorOutput(op_name, evalArgs, i) { + const torch = this.torch; + const output = new torch.Tensor(); + if (i === 0) { + switch (op_name) { + case 'aten::conv1d': + case 'aten::embedding': { + output.resize_([NaN, NaN, NaN]); + break; + } + case 'aten::cat': + case 'aten::conv2d': + case 'aten::dropout': + case 'aten::flatten': + case 'aten::flatten.named_out_dim': + case 'aten::max_pool2d': + case 'aten::adaptive_avg_pool2d': + case 'aten::avg_pool2d': + case 'aten::quantize_per_tensor': + case 'aten::relu_': + case 'aten::prelu': + case 'aten::hardtanh_': + case 'aten::upsample_bilinear2d': + case 'prepacked::conv2d_clamp_run': { + const [input] = evalArgs; + if (pytorch.Utility.isTensor(input) && input.size() === undefined) { + input.resize_([NaN, NaN, NaN, NaN]); + } + output.resize_([NaN, NaN, NaN, NaN]); + break; + } + case 'aten::slice': + case 'aten::slice.Tensor': { + const [input] = evalArgs; + if (pytorch.Utility.isTensor(input) && Array.isArray(input.size())) { + const size = input.size(); + output.resize_(size); + } + break; + } + case 'aten::to': + case 'aten::to.device': + case 'aten::to.dtype': + case 'aten::to.dtype_layout': { + const [input] = evalArgs; + if (pytorch.Utility.isTensor(input) && Array.isArray(input.size())) { + const size = input.size(); + output.resize_(size); + } + break; + } + case 'aten::conv3d': { + output.resize_([NaN, NaN, NaN, NaN, NaN]); + break; + } + case 'aten::roll': + case 'aten::detach': + case 'aten::mean': + case 'aten::mul': + case 'aten::mul.Scalar': + case 'aten::div': + case 'aten::div.Scalar': + case 'aten::batch_norm': + case 'aten::gelu': + case 'aten::relu': + case 'aten::clamp': + case 'aten::clamp_': + case 'aten::_add_relu_': + case 'aten::hardswish_': { + const [input] = evalArgs; + if (pytorch.Utility.isTensor(input) && Array.isArray(input.size())) { + output.resize_(input.size()); + } + break; + } + case 'aten::add': + case 'aten::add.Scalar': + case 'aten::sub': + case 'aten::sub.Scalar': { + const [input] = evalArgs; + if (pytorch.Utility.isTensor(input) && Array.isArray(input.size())) { + output.resize_(input.size()); + } else { + const [, other] = evalArgs; + if (pytorch.Utility.isTensor(other) && Array.isArray(other.size())) { + output.resize_(other.size()); + } + } + break; + } + case 'aten::select': + case 'aten::select.int': { + const [input] = evalArgs; + if (pytorch.Utility.isTensor(input) && Array.isArray(input.size())) { + output.resize_(Array(input.size().length - 1).fill(NaN)); + } + break; + } + case 'aten::layer_norm': { + const [input, normalized_shape] = evalArgs; + if (pytorch.Utility.isTensor(input) && Array.isArray(input.size())) { + const shape = input.size(); + if (Array.isArray(normalized_shape) && normalized_shape.length === 1) { + const [value] = normalized_shape; + shape[shape.length - 1] = value; + } + output.resize_(shape); + } + break; + } + case 'aten::empty': + case 'aten::ones': + case 'aten::zeros': + case 'aten::zeros_like': { + output.resize_(evalArgs[0]); + break; + } + case 'aten::view': + case 'aten::reshape': + case 'aten::new_full': { + output.resize_(evalArgs[1]); + break; + } + case 'aten::squeeze': + case 'aten::squeeze.dim': { + const [input] = evalArgs; + if (input instanceof torch.Value === false) { + const size = input.size(); + if (Array.isArray(size)) { + switch (evalArgs.length) { + case 1: { + output.resize_(size.filter((value) => value !== 1)); + break; + } + case 2: { + const [, dim] = evalArgs; + output.resize_(size.filter((value, index) => (value !== 1 && !isNaN(value)) || index !== dim)); + break; + } + default: { + break; + } + } + } + } + break; + } + case 'aten::unsqueeze': { + const [input, dim] = evalArgs; + if (pytorch.Utility.isTensor(input)) { + const size = input.size(); + if (Array.isArray(size) && dim !== undefined) { + const shape = size.slice(); + shape.splice(dim, 0, 1); + output.resize_(shape); + } else { + output.resize_([NaN, NaN, NaN, NaN]); + } + } + break; + } + case 'aten::transpose': + case 'aten::transpose.int': { + const [input, dim0, dim1] = evalArgs; + if (pytorch.Utility.isTensor(input) && Array.isArray(input.size())) { + const size = input.size().slice(); + const d0 = dim0 >= 0 ? dim0 : size.length + dim0; + const d1 = dim1 >= 0 ? dim1 : size.length + dim1; + const value = size[dim0]; + /* eslint-disable prefer-destructuring */ + size[d0] = size[1]; + /* eslint-enable prefer-destructuring */ + size[d1] = value; + output.resize_(size); + } + break; + } + case 'aten::contiguous': { + const [source] = evalArgs; + output.__source__ = source; + break; + } + case 'quantized::cat': + case 'quantized::cat_relu': + case 'quantized::linear': + case 'quantized::conv2d': + case 'quantized::conv2d.new': + case 'quantized::conv2d_relu': + case 'quantized::conv2d_relu.new': + case 'quantized::add': + case 'quantized::add_relu': + output.resize_([NaN, NaN, NaN, NaN]); + output.__quantized__ = true; + break; + default: + break; + } + } + return output; + } + + isNativeType(obj, type, N) { const torch = this.torch; switch (type.str()) { case 'Tensor': @@ -3526,15 +3475,15 @@ pytorch.jit.Execution = class extends pytorch.Execution { return true; } return false; - case 'SymInt[1]': - return this.isNativeType(obj, torch.IntType.get()) || this.isNativeType(obj, torch.ListType.get(torch.IntType.get())); case 'int[]': - case 'int[2]': - case 'int[3]': + if (N === 1 && this.isNativeType(obj, torch.IntType.get())) { + return true; + } return (Array.isArray(obj) && obj.every((item) => this.isNativeType(item, torch.IntType.get()) || item === undefined || (item.__class__ === 'number' && isNaN(item))) || (obj instanceof torch.Value && obj.type() instanceof torch.ListType && obj.type().getElementType() instanceof torch.IntType)) || (obj instanceof torch.Value && obj.type() instanceof torch.OptionalType && obj.type().getElementType() instanceof torch.ListType && obj.type().getElementType().getElementType() instanceof torch.IntType); - case 'int[1]': + case 'SymInt[1]': + return this.isNativeType(obj, torch.IntType.get()) || this.isNativeType(obj, torch.ListType.get(torch.IntType.get())); case 'float': return obj !== null && (typeof obj === 'number' || obj instanceof Number) || (pytorch.Utility.isInstance(obj, 'torch.Value') && pytorch.Utility.isInstance(obj.type(), 'torch.FloatType')); @@ -3621,7 +3570,7 @@ pytorch.jit.Execution = class extends pytorch.Execution { } } - isType(obj, type) { + isType(obj, type) { // remove const torch = this.torch; switch (type) { case 'Tensor': @@ -3758,6 +3707,55 @@ pytorch.jit.Execution = class extends pytorch.Execution { } } + getNativeType(value) { // rename + const torch = this.torch; + if (value === null || value === undefined) { + return undefined; + } else if (value === true || value === false) { + return torch.BoolType.get(); + } else if (pytorch.Utility.isTensor(value)) { + return torch.TensorType.get(); + } else if (typeof value === 'string') { + return torch.StringType.get(); + } else if (Number(value) === value && value % 1 === 0) { + return torch.IntType.get(); + } else if (Number(value) === value) { + return torch.FloatType.get(); + } else if (Array.isArray(value) && value.every((item) => Number(item) === item && item % 1 === 0)) { + return torch.ListType.get(torch.IntType.get()); + } else if (Array.isArray(value) && value.every((item) => Number(item) === item)) { + return torch.ListType.get(torch.FloatType.get()); + } else if (value instanceof torch.Value) { + return value.type(); + } + const text = (JSON.stringify(value) || '(undefined)').substring(0, 10); + throw new pytorch.Error(`Unsupported ops argument type '${text}'.`); + } + + getType(value) { // remove + if (value === null || value === undefined) { + return undefined; + } else if (value === true || value === false) { + return 'boolean'; + } else if (pytorch.Utility.isTensor(value)) { + return 'Tensor'; + } else if (typeof value === 'string') { + return 'string'; + } else if (Number(value) === value && value % 1 === 0) { + return 'int64'; + } else if (Number(value) === value) { + return 'float32'; + } else if (Array.isArray(value) && value.every((item) => Number(item) === item && item % 1 === 0)) { + return 'int64[]'; + } else if (Array.isArray(value) && value.every((item) => Number(item) === item)) { + return 'float32[]'; + } else if (pytorch.Utility.isInstance(value, 'torch.Value')) { + return pytorch.Utility.toType(value.type()); + } + const text = (JSON.stringify(value) || '(undefined)').substring(0, 10); + throw new pytorch.Error(`Unsupported ops argument type '${text}'.`); + } + _overload(target, name, args, context) { const moduleName = pytorch.Utility.target(target); if (!moduleName) { @@ -3782,185 +3780,40 @@ pytorch.jit.Execution = class extends pytorch.Execution { } else if (type === 'complex') { op_name = 'aten::Complex'; } - this.native = false; - if (this.native && op_name) { - const overloads = torch._C._jit_get_schemas_for_operator(op_name); - /* - if (!overloads && type.startsWith('ops.') && !type.startsWith('ops.prim')) { - const module = this.import(moduleName); - if (!module || !module[name]) { - const metadata = {}; - metadata.name = type; - metadata.inputs = []; - metadata.outputs = []; - for (let i = 0; i < args.length; i++) { - const input = {}; - let argument = args[i]; - input.name = i.toString(); - if (argument.type === '=' && argument.target && argument.target.type === 'id') { - input.name = this.expression(argument.target, context); - argument = argument.expression; - } - const obj = this.expression(argument, context); - input.type = pytorch.Utility.getType(obj); - metadata.inputs.push(input); - } - const count = context.target.length > 0 ? context.target[context.target.length - 1].length : 0; - for (let i = 0; i < count; i++) { - metadata.outputs.push({ name: '', type: '' }); - } - this._metadata.add(type, metadata); - overloads = [metadata]; - } - } - */ - if (!overloads) { - if (type.startsWith('aten::') || type.startsWith('prim::')) { - throw new pytorch.Error(`Unknown function '${type}'.`); - } - return null; - } - const evalArgs = args.map((argument) => { - if (argument.type === '=' && argument.target && argument.target.type === 'id') { - argument = argument.expression; - } - return this.expression(argument, context); - }); - const matches = []; - for (const schema of overloads) { - const parameters = schema.arguments || []; - let next = false; - let kwarg_only = false; - let position = 0; - let index = 0; - while (position < evalArgs.length) { - if (index >= parameters.length) { - next = !schema.name.startsWith('_caffe2::') && !schema.is_vararg; - break; - } - const arg = parameters[index]; - if (arg.kwarg_only) { - break; - } - index++; - const value = evalArgs[position]; - let type = arg.real_type; - let optional = false; - if (type instanceof torch.OptionalType) { - type = type.getElementType(); - optional = true; - } - if (optional === true && - (type instanceof torch.FloatType || type instanceof torch.BoolType || type instanceof torch.IntType || type instanceof torch.ComplexType || type.kind() === 'ScalarTypeType' || type instanceof torch.DeviceObjType || type.kind() === 'LayoutKind') && - value instanceof torch.Value && value.type() instanceof torch.NoneType) { - position++; - } else if (!this.isNativeType(value, type) && value !== null) { - if (optional) { - continue; - } - next = true; - break; - } else if (args[position].type === '=') { - next = true; - break; - } else { - position++; - } - } - if (next) { - continue; - } - if (args.every((arg, index) => index < position || (arg.type === '=' && arg.target && arg.target.type === 'id'))) { - const params = new Map(parameters.slice(index).map((a) => [a.name, a])); - while (position < args.length) { - const value = evalArgs[position]; - const arg = params.get(args[position].target.value); - position++; - if (!arg) { - next = true; - break; - } - if (arg.kwarg_only) { - kwarg_only = true; - } - let type = arg.real_type; - let optional = false; - if (type instanceof torch.OptionalType) { - type = type.getElementType(); - optional = true; - } - if (!this.isNativeType(value, type)) { - if (optional) { - continue; - } - next = true; - break; - } - } - } - if (next) { - continue; - } - if (position < evalArgs.length && !schema.is_vararg && !schema.name.startsWith('_caffe2::')) { - continue; - } - if (!kwarg_only && parameters.slice(index).some((arg) => !arg.has_default_value())) { - continue; - } - matches.push(schema); - } - if (matches.length > 1) { - const keys = new Map([['IntType', 1], ['FloatType', 2], ['TensorType', 3], ['NumberType', 4]]); - matches.sort((a, b) => { - let keyA = keys.get(a.arguments[0].real_type.kind()) || 5; - let keyB = keys.get(b.arguments[0].real_type.kind()) || 5; - if (keyA === keyB && a.arguments.length > 1 && b.arguments.length > 1) { - keyA = keys.get(a.arguments[1].real_type.kind()) || 5; - keyB = keys.get(b.arguments[1].real_type.kind()) || 5; - } - return keyA - keyB; - }); - } - if (matches.length === 0) { - throw new pytorch.Error(`Unknown function '${op_name}'.`); - } - // return [matches[0], evalArgs]; - } - let overloads = this._types.get(op_name); - if (!overloads && type.startsWith('ops.') && !type.startsWith('ops.prim')) { + let overloads = null; + let evalArgs = null; + overloads = torch._C._jit_get_schemas_for_operator(op_name); + if ((!overloads || overloads.length === 0) && type.startsWith('ops.') && !type.startsWith('ops.prim')) { const module = this.import(moduleName); if (!module || !module[name]) { - const metadata = {}; - metadata.name = type; - metadata.inputs = []; - metadata.outputs = []; + const schema = new torch.FunctionSchema(op_name, null, [], [], false, false); for (let i = 0; i < args.length; i++) { - const input = {}; let argument = args[i]; - input.name = i.toString(); + let name = i.toString(); if (argument.type === '=' && argument.target && argument.target.type === 'id') { - input.name = this.expression(argument.target, context); + name = this.expression(argument.target, context); argument = argument.expression; } const obj = this.expression(argument, context); - input.type = pytorch.Utility.getType(obj); - metadata.inputs.push(input); + const real_type = this.getNativeType(obj); + schema.arguments.push(new torch.Argument(name, null, real_type, null, null, false, null)); } const count = context.target.length > 0 ? context.target[context.target.length - 1].length : 0; for (let i = 0; i < count; i++) { - metadata.outputs.push({ name: '', type: '' }); + schema.returns.push(new torch.Argument('', null, null, null, null, false, null)); } - this._metadata.add(type, metadata); - overloads = [metadata]; + const op = new torch._C.Operator(schema); + torch._C._get_registry().registerOperator(op); + overloads = [schema]; } } - if (!overloads) { + if (!overloads || overloads.length === 0) { if (type.startsWith('aten::') || type.startsWith('prim::')) { throw new pytorch.Error(`Unknown function '${type}'.`); } return null; } - const evalArgs = args.map((argument) => { + evalArgs = args.map((argument) => { if (argument.type === '=' && argument.target && argument.target.type === 'id') { argument = argument.expression; } @@ -3968,7 +3821,7 @@ pytorch.jit.Execution = class extends pytorch.Execution { }); const matches = []; for (const schema of overloads) { - const parameters = schema.inputs || []; + const parameters = schema.arguments || []; let next = false; let kwarg_only = false; let position = 0; @@ -3983,18 +3836,18 @@ pytorch.jit.Execution = class extends pytorch.Execution { break; } index++; - const value = evalArgs[position]; - let type = arg.type; + const v = evalArgs[position]; + let type = arg.real_type; let optional = false; - if (type.endsWith('?')) { - type = arg.type.substring(0, arg.type.length - 1); + if (type instanceof torch.OptionalType) { + type = type.getElementType(); optional = true; } if (optional === true && - (type === 'float32' || type === 'boolean' || type === 'int64' || type === 'complex' || type === 'ScalarType' || type === 'Device' || type === 'Layout') && - value instanceof torch.Value && value.type() instanceof torch.NoneType) { + (type instanceof torch.FloatType || type instanceof torch.BoolType || type instanceof torch.IntType || type instanceof torch.ComplexType || type.kind() === 'ScalarTypeType' || type instanceof torch.DeviceObjType || type.kind() === 'LayoutKind') && + v instanceof torch.Value && v.type() instanceof torch.NoneType) { position++; - } else if (!this.isType(value, type) && value !== null) { + } else if (!this.isNativeType(v, type, arg.N) && v !== null) { if (optional) { continue; } @@ -4023,13 +3876,13 @@ pytorch.jit.Execution = class extends pytorch.Execution { if (arg.kwarg_only) { kwarg_only = true; } - let type = arg.type; + let type = arg.real_type; let optional = false; - if (type.endsWith('?')) { - type = arg.type.substring(0, arg.type.length - 1); + if (type instanceof torch.OptionalType) { + type = type.getElementType(); optional = true; } - if (!this.isType(value, type)) { + if (!this.isNativeType(value, type, arg.N)) { if (optional) { continue; } @@ -4044,25 +3897,25 @@ pytorch.jit.Execution = class extends pytorch.Execution { if (position < evalArgs.length && !schema.is_vararg && !schema.name.startsWith('_caffe2::')) { continue; } - if (!kwarg_only && parameters.slice(index).some((parameter) => parameter.default === undefined)) { + if (!kwarg_only && parameters.slice(index).some((arg) => !arg.has_default_value())) { continue; } matches.push(schema); } if (matches.length > 1) { - const keys = new Map([['int64', 1], ['float32', 2], ['Tensor', 3], ['Scalar', 4]]); + const keys = new Map([['IntType', 1], ['FloatType', 2], ['TensorType', 3], ['NumberType', 4]]); matches.sort((a, b) => { - let keyA = keys.get(a.inputs[0].type) || 4; - let keyB = keys.get(b.inputs[0].type) || 4; - if (keyA === keyB && a.inputs.length > 1 && b.inputs.length > 1) { - keyA = keys.get(a.inputs[1].type) || 4; - keyB = keys.get(b.inputs[1].type) || 4; + let keyA = keys.get(a.arguments[0].real_type.kind()) || 5; + let keyB = keys.get(b.arguments[0].real_type.kind()) || 5; + if (keyA === keyB && a.arguments.length > 1 && b.arguments.length > 1) { + keyA = keys.get(a.arguments[1].real_type.kind()) || 5; + keyB = keys.get(b.arguments[1].real_type.kind()) || 5; } return keyA - keyB; }); } if (matches.length === 0) { - throw new pytorch.Error(`Unknown function '${type}'.`); + throw new pytorch.Error(`Unknown function '${op_name}'.`); } return [matches[0], evalArgs]; } @@ -4894,30 +4747,6 @@ pytorch.Utility = class { return pytorch.Utility.isObjectType(type); } - static getType(value) { - if (value === null || value === undefined) { - return undefined; - } else if (value === true || value === false) { - return 'boolean'; - } else if (pytorch.Utility.isTensor(value)) { - return 'Tensor'; - } else if (typeof value === 'string') { - return 'string'; - } else if (Number(value) === value && value % 1 === 0) { - return 'int64'; - } else if (Number(value) === value) { - return 'float32'; - } else if (Array.isArray(value) && value.every((item) => Number(item) === item && item % 1 === 0)) { - return 'int64[]'; - } else if (Array.isArray(value) && value.every((item) => Number(item) === item)) { - return 'float32[]'; - } else if (pytorch.Utility.isInstance(value, 'torch.Value')) { - return pytorch.Utility.toType(value.type()); - } - const text = (JSON.stringify(value) || '(undefined)').substring(0, 10); - throw new pytorch.Error(`Unsupported ops argument type '${text}'.`); - } - static isSubclass(value, name) { if (value && value.__module__ && value.__name__) { return name === `${value.__module__}.${value.__name__}`; @@ -5573,4 +5402,6 @@ pytorch.Error = class extends Error { } }; +export const Metadata = pytorch.Metadata; +export const Execution = pytorch.Execution; export const ModelFactory = pytorch.ModelFactory; diff --git a/source/tf.js b/source/tf.js index 692f5bd816..6d555107f0 100644 --- a/source/tf.js +++ b/source/tf.js @@ -373,22 +373,37 @@ tf.ModelFactory = class { producer = 'PyTorch'; const openPyTorchMetadata = async (context, saved_model) => { try { + const pytorch = await context.require('./pytorch'); + const metadata_ = await pytorch.Metadata.open(context); + const execution = new pytorch.Execution(); + execution.registerMetadata(metadata_); + + /* const data = await context.request('pytorch-metadata.json'); const metadata = new Map(); for (const item of JSON.parse(data)) { - const name = item.name; + let name = item.name; if (name.indexOf('::') !== -1) { - const index = name.indexOf('.'); - const key = index === -1 ? name : name.substring(0, index); - if (!metadata.has(key)) { - metadata.set(key, []); + const brace = name.indexOf('('); + name = brace === -1 ? name : name.substring(0, brace); + const dot = name.indexOf('.'); + name = dot === -1 ? name : name.substring(0, dot); + if (!metadata.has(name)) { + metadata.set(name, []); } - metadata.get(key).push(item); + metadata.get(name).push(item); } } + */ + const torch = execution.register('torch'); for (const graph of saved_model.meta_graphs) { for (const node of graph.graph_def.node) { - node.__metadata__ = Array.from(metadata.get(node.op) || []); + const schemas = torch._C._jit_get_schemas_for_operator(node.op); + if (Array.isArray(schemas) && schemas.length > 0) { + node.__metadata__ = schemas; + node.__torch__ = torch; + } + // node.__metadata__ = Array.from(metadata.get(node.op) || []); } } } catch { @@ -1949,8 +1964,9 @@ tf.Context = class { } } if (node.__metadata__) { + const torch = node.__torch__; const match = (node, schema) => { - const args = schema.inputs || []; + const args = schema.arguments || []; const inputs = node.input || []; if (inputs.length > args.length) { return false; @@ -1958,14 +1974,16 @@ tf.Context = class { for (let i = 0; i < inputs.length; i++) { const input = inputs[i]; const arg = args[i]; - switch (arg.type) { + let type = arg.real_type; + type = type instanceof torch.OptionalType ? type.getElementType() : type; + switch (type.str()) { case 'Tensor': { if ((input.constant === undefined && input.list === undefined) || input.constant === null) { continue; } break; } - case 'int64': + case 'int': case 'SymInt': { if (input.constant !== undefined && Number.isInteger(parseInt(input.constant, 10))) { @@ -1973,14 +1991,14 @@ tf.Context = class { } break; } - case 'float32': { + case 'float': { if (input.constant !== undefined && !isNaN(parseFloat(input.constant))) { continue; } break; } - case 'int64[]': - case 'int64[2]': + case 'int[]': + case 'int[2]': case 'SymInt[]': case 'SymInt[2]': { if (Array.isArray(input.list)) { @@ -1991,7 +2009,7 @@ tf.Context = class { } break; } - case 'boolean': { + case 'bool': { if (input.constant === 'false' || input.constant === 'true' || input.constant === '0' || @@ -2017,18 +2035,20 @@ tf.Context = class { }; const schema = node.__metadata__.find((schema) => match(node, schema)); if (schema) { - const args = schema.inputs || []; + const args = schema.arguments; const inputs = node.input || []; for (let i = 0; i < inputs.length; i++) { const input = inputs[i]; delete input.metadata; const arg = args[i]; - switch (arg.type) { + let type = arg.real_type; + type = type instanceof torch.OptionalType ? type.getElementType() : type; + switch (type.str()) { case 'Tensor': { input.metadata = arg; break; } - case 'int64': + case 'int': case 'SymInt': { const value = parseInt(input.constant, 10); input.attr = new tf.proto.tensorflow.AttrValue(); @@ -2036,15 +2056,15 @@ tf.Context = class { input.attr.metadata = arg; break; } - case 'float32': { + case 'float': { const value = parseFloat(input.constant, 10); input.attr = new tf.proto.tensorflow.AttrValue(); input.attr.f = value; input.attr.metadata = arg; break; } - case 'int64[]': - case 'int64[2]': + case 'int[]': + case 'int[2]': case 'SymInt[]': case 'SymInt[2]': { const list = input.list.map((item) => parseInt(item, 10)); @@ -2054,7 +2074,7 @@ tf.Context = class { input.attr.metadata = arg; break; } - case 'boolean': { + case 'bool': { input.attr = new tf.proto.tensorflow.AttrValue(); input.attr.b = input.constant === 'true' || input.constant === '1'; input.attr.metadata = arg; diff --git a/test/models.json b/test/models.json index ce6c1da9eb..912142e303 100644 --- a/test/models.json +++ b/test/models.json @@ -5844,7 +5844,7 @@ "type": "pytorch", "target": "pyg_model.pt", "source": "https://github.com/lutzroeder/netron/files/10369483/pyg_model.zip[pyg_model.pt]", - "error": "Unknown function 'torch.linear'.", + "error": "Unknown function 'aten::linear'.", "link": "https://github.com/lutzroeder/netron/issues/546" }, { diff --git a/tools/pytorch_script.py b/tools/pytorch_script.py index 2e3647cbd7..9770c78a5f 100644 --- a/tools/pytorch_script.py +++ b/tools/pytorch_script.py @@ -16,365 +16,6 @@ metadata_file = os.path.join(source_dir, 'pytorch-metadata.json') pytorch_source_dir = os.path.join(third_party_dir, 'source', 'pytorch') -class Metadata: # pylint: disable=too-few-public-methods,missing-class-docstring - - def __init__(self, metadata): - self.types = metadata - self.cache = set() - self._primitives = { - 'int': 'int64', 'float': 'float32', 'bool': 'boolean', 'str': 'string' - } - - def type(self, schema): # pylint: disable=missing-function-docstring - key = schema.name if isinstance(schema, Schema) else schema.split('(', 1)[0].strip() - if key not in self.cache: - self.cache.add(key) - schema = schema if isinstance(schema, Schema) else Schema(schema) - arguments = list(filter(lambda _: \ - not(_.kwarg_only and hasattr(_, 'alias')), schema.arguments)) - returns = schema.returns - value = self.types.setdefault(schema.name, { 'name': schema.name, }) - inputs = value.get('inputs', []) - outputs = value.get('outputs', []) - inputs = [ inputs[i] if i < len(inputs) else {} for i in range(len(arguments)) ] - outputs = [ outputs[i] if i < len(outputs) else {} for i in range(len(returns)) ] - value['inputs'] = inputs - value['outputs'] = outputs - for i, _ in enumerate(arguments): - argument = inputs[i] - argument['name'] = _.name - self._argument(argument, getattr(_, 'type')) - if hasattr(_, 'default'): - argument['default'] = _.default - if hasattr(_, 'kwarg_only') and _.kwarg_only is True: - argument['kwarg_only'] = True - for i, _ in enumerate(returns): - argument = outputs[i] - if hasattr(_, 'name'): - argument['name'] = _.name - self._argument(argument, getattr(_, 'type')) - return self.types[key] - - def _argument_type(self, value): - if isinstance(value, Schema.OptionalType): - element_type = self._argument_type(value.element_type) - return f'{element_type}?' - if isinstance(value, Schema.ListType): - element_type = self._argument_type(value.element_type) - size = str(value.size) if hasattr(value, 'size') else '' - return f'{element_type}[{size}]' - if isinstance(value, Schema.DictType): - key_type = self._argument_type(value.getKeyType()) - value_type = self._argument_type(value.getValueType()) - return f'Dict({key_type}, {value_type})' - if isinstance(value, Schema.TupleType): - elements = [] - for element in value.elements(): - elements.append(self._argument_type(element)) - return f'({', '.join(elements)})' - name = value.name - return self._primitives[name] if name in self._primitives else name - - def _argument(self, argument, value): - argument_type = self._argument_type(value) - if argument_type: - argument['type'] = argument_type - else: - argument.pop('type', None) - if 'optional' in argument: - del argument['optional'] - -class Schema: # pylint: disable=too-few-public-methods,missing-class-docstring - def __init__(self, value): - self.value = value - lexer = Schema.Lexer(value) - lexer.whitespace(0) - self._parse_name(lexer) - lexer.whitespace(0) - if lexer.kind == '(': - self._parse_arguments(lexer) - lexer.whitespace(0) - lexer.expect('->') - lexer.whitespace(0) - self._parse_returns(lexer) - def __str__(self): - arguments = [] - kwarg_only = False - for _ in self.arguments: - if not kwarg_only and _.kwarg_only: - kwarg_only = True - arguments.append('*') - arguments.append(_.__str__()) - if self.is_vararg: - arguments.append('...') - returns = ', '.join(map(lambda _: _.__str__(), self.returns)) - returns = returns if len(self.returns) == 1 else '(' + returns + ')' - return self.name + '(' + ', '.join(arguments) + ') -> ' + returns - def _parse_name(self, lexer): - self.name = lexer.expect('id') - if lexer.eat(':'): - lexer.expect(':') - self.name = self.name + '::' + lexer.expect('id') - if lexer.eat('.'): - self.name = self.name + '.' + lexer.expect('id') - def _parse_arguments(self, lexer): - self.arguments = [] - self.is_vararg = False - self.kwarg_only = False - lexer.expect('(') - if not lexer.eat(')'): - while True: - lexer.whitespace(0) - if self.is_vararg: - raise NotImplementedError() - if lexer.eat('*'): - self.kwarg_only = True - elif lexer.eat('...'): - self.is_vararg = True - else: - self.arguments.append(Schema.Argument(lexer, False, self.kwarg_only)) - lexer.whitespace(0) - if not lexer.eat(','): - break - lexer.expect(')') - def _parse_returns(self, lexer): - self.returns = [] - self.is_varret = False - if lexer.eat('...'): - self.is_varret = True - elif lexer.eat('('): - lexer.whitespace(0) - if not lexer.eat(')'): - while True: - lexer.whitespace(0) - if self.is_varret: - raise NotImplementedError() - if lexer.eat('...'): - self.is_varret = True - else: - self.returns.append(Schema.Argument(lexer, True, False)) - lexer.whitespace(0) - if not lexer.eat(','): - break - lexer.expect(')') - lexer.whitespace(0) - else: - self.returns.append(Schema.Argument(lexer, True, False)) - class Argument: # pylint: disable=too-few-public-methods - def __init__(self, lexer, is_return, kwarg_only): - value = Schema.Type.parse(lexer) - lexer.whitespace(0) - while True: - if lexer.eat('['): - size = None - if lexer.kind == '#': - size = int(lexer.value) - lexer.next() - lexer.expect(']') - value = Schema.ListType(value, size) - elif lexer.eat('?'): - value = Schema.OptionalType(value) - elif lexer.kind == '(' and not hasattr(self, 'alias'): - self.alias = self._parse_alias(lexer) - else: - break - self.type = value - if is_return: - lexer.whitespace(0) - self.kwarg_only = False - if lexer.kind == 'id': - self.name = lexer.expect('id') - else: - lexer.whitespace(1) - self.kwarg_only = kwarg_only - self.name = lexer.expect('id') - lexer.whitespace(0) - if lexer.eat('='): - lexer.whitespace(0) - self.default = self._parse_value(lexer) - def __str__(self): - alias = '(' + self.alias + ')' if hasattr(self, 'alias') else '' - name = ' ' + self.name if hasattr(self, 'name') else '' - default = '=' + self.default.__str__() if hasattr(self, 'default') else '' - return self.type.__str__() + alias + name + default - def _parse_value(self, lexer): - if lexer.kind == 'id': - if lexer.value in ('True', 'False'): - value = bool(lexer.value == 'True') - elif lexer.value == 'None': - value = None - elif lexer.value in ('Mean', 'contiguous_format', 'long'): - value = lexer.value - else: - raise NotImplementedError() - elif lexer.kind == '#': - value = float(lexer.value) if \ - lexer.value.find('.') != -1 or lexer.value.find('e') != -1 else \ - int(lexer.value) - elif lexer.kind == 'string': - value = lexer.value[1:-1] - elif lexer.eat('['): - value = [] - if not lexer.eat(']'): - while True: - lexer.whitespace(0) - value.append(self._parse_value(lexer)) - lexer.whitespace(0) - if not lexer.eat(','): - break - lexer.expect(']') - return value - else: - raise NotImplementedError() - lexer.next() - return value - def _parse_alias(self, lexer): - value = '' - lexer.expect('(') - while not lexer.eat(')'): - value += lexer.value - lexer.next() - return value - class Type: # pylint: disable=too-few-public-methods,missing-class-docstring - def __init__(self, name): - self.name = name - def __str__(self): - return self.name - @staticmethod - def parse(lexer): # pylint: disable=missing-function-docstring - if lexer.eat('('): - lexer.whitespace(0) - elements = [] - while not lexer.eat(')'): - elements.append(Schema.Type.parse(lexer)) - lexer.whitespace(0) - lexer.eat(',') - lexer.whitespace(0) - return Schema.TupleType(elements) - name = lexer.expect('id') - while lexer.eat('.'): - name = name + '.' + lexer.expect('id') - if name == 'Dict': - lexer.expect('(') - lexer.whitespace(0) - key_type = Schema.Type.parse(lexer) - lexer.whitespace(0) - lexer.expect(',') - lexer.whitespace(0) - value_type = Schema.Type.parse(lexer) - lexer.whitespace(0) - lexer.expect(')') - return Schema.DictType(key_type, value_type) - if name == 'Future': - lexer.expect('(') - lexer.whitespace(0) - elem_type = Schema.Type.parse(lexer) - lexer.whitespace(0) - lexer.expect(')') - return Schema.Type(f'Future({elem_type})') - return Schema.Type(name) - class OptionalType: # pylint: disable=too-few-public-methods,missing-class-docstring - def __init__(self, element_type): - self.element_type = element_type - def __str__(self): - return self.element_type.__str__() + '?' - class ListType: # pylint: disable=too-few-public-methods,missing-class-docstring - def __init__(self, element_type, size): - self.element_type = element_type - if size: - self.size = size - def __str__(self): - size = self.size.__str__() if hasattr(self, 'size') else '' - return self.element_type.__str__() + '[' + size + ']' - class DictType: - def __init__(self, key_type, value_type): - self._key_type = key_type - self._value_type = value_type - def __str__(self): - return 'Dict(' + str(self._key_type) + ', ' + str(self._value_type) + ')' - def getKeyType(self): # pylint: disable=invalid-name,missing-function-docstring - return self._key_type - def getValueType(self): # pylint: disable=invalid-name,,missing-function-docstring - return self._value_type - class TupleType: - def __init__(self, elements): - self._elements = elements - def elements(self): # pylint: disable=invalid-name,,missing-function-docstring - return self._elements - class Lexer: # pylint: disable=too-few-public-methods,missing-class-docstring - def __init__(self, buffer): - self.buffer = buffer - self.position = 0 - self.value = '' - self.next() - def eat(self, kind): # pylint: disable=missing-function-docstring - if self.kind != kind: - return None - value = self.value - self.next() - return value - def expect(self, kind): # pylint: disable=missing-function-docstring - if self.kind != kind: - raise SyntaxError("Unexpected '" + self.kind + "' instead of '" + kind + "'.") - value = self.value - self.next() - return value - def whitespace(self, count): # pylint: disable=missing-function-docstring - if self.kind != ' ': - if count > len(self.value): - raise IndexError() - return False - self.next() - return True - def next(self): # pylint: disable=missing-function-docstring,too-many-branches - self.position += len(self.value) - i = self.position - if i >= len(self.buffer): - self.kind = '\0' - self.value = '' - elif self.buffer[i] == ' ': - while self.buffer[i] == ' ': - i += 1 - self.kind = ' ' - self.value = self.buffer[self.position:i] - elif self.buffer[i] == '.' and self.buffer[i+1] == '.' and self.buffer[i+2] == '.': - self.kind = '...' - self.value = '...' - elif self.buffer[i] in ('(', ')', ':', '.', '[', ']', ',', '=', '?', '!', '*', '|'): - self.kind = self.buffer[i] - self.value = self.buffer[i] - elif (self.buffer[i] >= 'a' and self.buffer[i] <= 'z') or \ - (self.buffer[i] >= 'A' and self.buffer[i] <= 'Z') or self.buffer[i] == '_': - i += 1 - while i < len(self.buffer) and \ - ((self.buffer[i] >= 'a' and self.buffer[i] <= 'z') or \ - (self.buffer[i] >= 'A' and self.buffer[i] <= 'Z') or \ - (self.buffer[i] >= '0' and self.buffer[i] <= '9') or self.buffer[i] == '_'): - i += 1 - self.kind = 'id' - self.value = self.buffer[self.position:i] - elif self.buffer[i] == '-' and self.buffer[i+1] == '>': - self.kind = '->' - self.value = '->' - elif (self.buffer[i] >= '0' and self.buffer[i] <= '9') or self.buffer[i] == '-': - i += 1 - while i < len(self.buffer) and \ - ((self.buffer[i] >= '0' and self.buffer[i] <= '9') or \ - self.buffer[i] == '.' or self.buffer[i] == 'e' or self.buffer[i] == '-'): - i += 1 - self.kind = '#' - self.value = self.buffer[self.position:i] - elif self.buffer[i] in ("'", '"'): - quote = self.buffer[i] - i += 1 - while i < len(self.buffer) and self.buffer[i] != quote: - i += 2 if self.buffer[i] == '\\' and self.buffer[i+1] in ("'", '"', '\\') else 1 - i += 1 - self.kind = 'string' - self.value = self.buffer[self.position:i] - else: - raise NotImplementedError("Unsupported token at " + self.position) - def _read(path): with open(path, 'r', encoding='utf-8') as file: return file.read() @@ -425,540 +66,6 @@ def _write_metadata(value): ] # pylint: disable=line-too-long -known_schema_definitions = [ - 'aten::__and__.bool(bool a, bool b) -> bool', - 'aten::__and__.int(int a, int b) -> int', - 'aten::__and__.Scalar(Tensor self, Scalar other) -> Tensor', - 'aten::__and__.Tensor(Tensor self, Tensor other) -> Tensor', - 'aten::__contains__.Tensor(Dict(Tensor, t) dict, Tensor key) -> bool', - 'aten::__contains__.bool(Dict(bool, t) dict, bool key) -> bool', - 'aten::__contains__.complex(Dict(complex, t) dict, complex key) -> bool', - 'aten::__contains__.float(Dict(float, t) dict, float key) -> bool', - 'aten::__contains__.float_list(float[] l, float item) -> bool', - 'aten::__contains__.int(Dict(int, t) dict, int key) -> bool', - 'aten::__contains__.int_list(int[] l, int item) -> bool', - 'aten::__contains__.str(Dict(str, t) dict, str key) -> bool', - 'aten::__contains__.str_list(str[] l, str item) -> bool', - 'aten::__getitem__.Dict_bool(Dict(bool, t) self, bool key) -> t(*)', - 'aten::__getitem__.Dict_complex(Dict(complex, t) self, complex key) -> t(*)', - 'aten::__getitem__.Dict_float(Dict(float, t) self, float key) -> t(*)', - 'aten::__getitem__.Dict_int(Dict(int, t) self, int key) -> t(*)', - 'aten::__getitem__.Dict_str(Dict(str, t) self, str key) -> t(*)', - 'aten::__getitem__.Dict_Tensor(Dict(Tensor, t) self, Tensor key) -> t(*)', - 'aten::__getitem__.str(str s, int index) -> str', - 'aten::__getitem__.t(t[](a) list, int idx) -> t(*)', - 'aten::__is__(t1 self, t2 obj) -> bool', - 'aten::_native_batch_norm_legit(Tensor input, Tensor? weight, Tensor? bias, Tensor(a!) running_mean, Tensor(b!) running_var, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor)', - 'aten::_native_batch_norm_legit.no_stats(Tensor input, Tensor? weight, Tensor? bias, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor)', - 'aten::_native_batch_norm_legit.no_stats_out(Tensor input, Tensor? weight, Tensor? bias, bool training, float momentum, float eps, *, Tensor(a!) out, Tensor(b!) save_mean, Tensor(c!) save_invstd) -> (Tensor(a!), Tensor(b!), Tensor(c!))', - 'aten::_native_batch_norm_legit.out(Tensor input, Tensor? weight, Tensor? bias, Tensor(a!) running_mean, Tensor(b!) running_var, bool training, float momentum, float eps, *, Tensor(d!) out, Tensor(e!) save_mean, Tensor(f!) save_invstd) -> (Tensor(d!), Tensor(e!), Tensor(f!))', - 'aten::_native_batch_norm_legit_functional(Tensor input, Tensor? weight, Tensor? bias, Tensor running_mean, Tensor running_var, bool training, float momentum, float eps) -> (Tensor, Tensor, Tensor, Tensor running_mean_out, Tensor running_var_out)', - 'aten::_native_batch_norm_legit_no_training(Tensor input, Tensor? weight, Tensor? bias, Tensor running_mean, Tensor running_var, float momentum, float eps) -> (Tensor, Tensor, Tensor)', - 'aten::_native_batch_norm_legit_no_training.out(Tensor input, Tensor? weight, Tensor? bias, Tensor running_mean, Tensor running_var, float momentum, float eps, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))', - 'aten::_native_multi_head_attention(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None, bool need_weights=True, bool average_attn_weights=True, int? mask_type=None) -> (Tensor, Tensor)', - 'aten::_native_multi_head_attention.out(Tensor query, Tensor key, Tensor value, int embed_dim, int num_head, Tensor qkv_weight, Tensor qkv_bias, Tensor proj_weight, Tensor proj_bias, Tensor? mask=None, bool need_weights=True, bool average_attn_weights=True, int? mask_type=None, *, Tensor(a!) out0, Tensor(b!) out1) -> (Tensor(a!), Tensor(b!))', - 'aten::_set_item.Tensor(Dict(Tensor, t)(a!) l, Tensor(b -> *) idx, t(c -> *) v) -> ()', - 'aten::_set_item.bool(Dict(bool, t)(a!) l, bool(b -> *) idx, t(c -> *) v) -> ()', - 'aten::_set_item.complex(Dict(complex, t)(a!) l, complex(b -> *) idx, t(c -> *) v) -> ()', - 'aten::_set_item.float(Dict(float, t)(a!) l, float(b -> *) idx, t(c -> *) v) -> ()', - 'aten::_set_item.int(Dict(int, t)(a!) l, int(b -> *) idx, t(c -> *) v) -> ()', - 'aten::_set_item.str(Dict(str, t)(a!) l, str(b -> *) idx, t(c -> *) v) -> ()', - 'aten::_set_item.t(t[](a!) l, int idx, t(b -> *) el) -> t[](a!)', - 'aten::add(Scalar a, Scalar b) -> Scalar', - 'aten::add.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor', - 'aten::add.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!)', - 'aten::add.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor', - 'aten::add.complex(complex a, complex b) -> complex', - 'aten::add.complex_float(complex a, float b) -> complex', - 'aten::add.complex_int(complex a, int b) -> complex', - 'aten::add.float(float a, float b) -> float', - 'aten::add.float_complex(float a, complex b) -> complex', - 'aten::add.float_int(float a, int b) -> float', - 'aten::add.int(int a, int b) -> int', - 'aten::add.int_complex(int a, complex b) -> complex', - 'aten::add.int_float(int a, float b) -> float', - 'aten::add.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)', - 'aten::add.str(str a, str b) -> str', - 'aten::add.t(t[] a, t[] b) -> t[]', - 'aten::add_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)', - 'aten::add_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)', - 'aten::add_.t(t[](a!) self, t[] b) -> t[]', - 'aten::any.all_out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)', - 'aten::any.bool(bool[] self) -> bool', - 'aten::any.dim(Tensor self, int dim, bool keepdim=False) -> Tensor', - 'aten::any.dimname_out(Tensor self, str dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)', - 'aten::any.dimname(Tensor self, str dim, bool keepdim=False) -> Tensor', - 'aten::any.dims_out(Tensor self, int[]? dim=None, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)', - 'aten::any.dims(Tensor self, int[]? dim=None, bool keepdim=False) -> Tensor', - 'aten::any.float(float[] self) -> bool', - 'aten::any.int(int[] self) -> bool', - 'aten::any.out(Tensor self, int dim, bool keepdim=False, *, Tensor(a!) out) -> Tensor(a!)', - 'aten::any.str(str[] self) -> bool', - 'aten::any(Tensor self) -> Tensor', - 'aten::as_tensor.bool(bool t, *, ScalarType? dtype=None, Device? device=None) -> Tensor', - 'aten::as_tensor.complex(complex t, *, ScalarType? dtype=None, Device? device=None) -> Tensor', - 'aten::as_tensor.float(float t, *, ScalarType? dtype=None, Device? device=None) -> Tensor', - 'aten::as_tensor.int(int t, *, ScalarType? dtype=None, Device? device=None) -> Tensor', - 'aten::as_tensor.list(t[] data, *, ScalarType? dtype=None, Device? device=None) -> Tensor', - 'aten::as_tensor(Tensor(a) data, *, ScalarType? dtype=None, Device? device=None) -> Tensor(b|a)', - 'aten::bitwise_and.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)', - 'aten::bitwise_and.Scalar_Tensor(Scalar self, Tensor other) -> Tensor', - 'aten::bitwise_and.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)', - 'aten::bitwise_and.Tensor(Tensor self, Tensor other) -> Tensor', - 'aten::bitwise_and.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)', - 'aten::bitwise_and_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)', - 'aten::bitwise_and_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)', - 'aten::bitwise_left_shift.Scalar_Tensor(Scalar self, Tensor other) -> Tensor', - 'aten::bitwise_left_shift.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)', - 'aten::bitwise_left_shift.Tensor(Tensor self, Tensor other) -> Tensor', - 'aten::bitwise_left_shift.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)', - 'aten::bitwise_left_shift.Tensor_Scalar(Tensor self, Scalar other) -> Tensor', - 'aten::bitwise_left_shift.Tensor_Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)', - 'aten::bitwise_left_shift_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)', - 'aten::bitwise_left_shift_.Tensor_Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)', - 'aten::bitwise_not(Tensor self) -> Tensor', - 'aten::bitwise_not.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)', - 'aten::bitwise_not_(Tensor(a!) self) -> Tensor(a!)', - 'aten::bitwise_or.Scalar(Tensor self, Scalar other) -> Tensor', - 'aten::bitwise_or.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)', - 'aten::bitwise_or.Scalar_Tensor(Scalar self, Tensor other) -> Tensor', - 'aten::bitwise_or.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)', - 'aten::bitwise_or.Tensor(Tensor self, Tensor other) -> Tensor', - 'aten::bitwise_or.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)', - 'aten::bitwise_or_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)', - 'aten::bitwise_or_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)', - 'aten::bitwise_right_shift.Scalar_Tensor(Scalar self, Tensor other) -> Tensor', - 'aten::bitwise_right_shift.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)', - 'aten::bitwise_right_shift.Tensor(Tensor self, Tensor other) -> Tensor', - 'aten::bitwise_right_shift.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)', - 'aten::bitwise_right_shift.Tensor_Scalar(Tensor self, Scalar other) -> Tensor', - 'aten::bitwise_right_shift.Tensor_Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)', - 'aten::bitwise_right_shift_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)', - 'aten::bitwise_right_shift_.Tensor_Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)', - 'aten::bitwise_xor.Scalar(Tensor self, Scalar other) -> Tensor', - 'aten::bitwise_xor.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)', - 'aten::bitwise_xor.Scalar_Tensor(Scalar self, Tensor other) -> Tensor', - 'aten::bitwise_xor.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)', - 'aten::bitwise_xor.Tensor(Tensor self, Tensor other) -> Tensor', - 'aten::bitwise_xor.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)', - 'aten::bitwise_xor_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)', - 'aten::bitwise_xor_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)', - 'aten::Bool.float(float a) -> bool', - 'aten::Bool.int(int a) -> bool', - 'aten::Bool.Tensor(Tensor a) -> bool', - 'aten::ceil.float(float a) -> int', - 'aten::ceil.int(int a) -> int', - 'aten::ceil.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)', - 'aten::ceil.Scalar(Scalar a) -> Scalar', - 'aten::ceil(Tensor self) -> Tensor', - 'aten::complex(Tensor real, Tensor imag) -> Tensor', - 'aten::Complex.bool_bool(bool x, bool y) -> complex', - 'aten::Complex.bool_float(bool x, float y) -> complex', - 'aten::Complex.bool_int(bool x, int y) -> complex', - 'aten::Complex.bool_Tensor(bool x, Tensor y) -> complex', - 'aten::Complex.float_bool(float x, bool y) -> complex', - 'aten::Complex.float_float(float x, float y) -> complex', - 'aten::Complex.float_int(float x, int y) -> complex', - 'aten::Complex.float_Tensor(float x, Tensor y) -> complex', - 'aten::Complex.int_bool(int x, bool y) -> complex', - 'aten::Complex.int_float(int x, float y) -> complex', - 'aten::Complex.int_int(int x, int y) -> complex', - 'aten::Complex.int_Tensor(int x, Tensor y) -> complex', - 'aten::complex.out(Tensor real, Tensor imag, *, Tensor(a!) out) -> Tensor(a!)', - 'aten::Complex.Scalar(Scalar a) -> complex', - 'aten::Complex.Tensor_bool(Tensor x, bool y) -> complex', - 'aten::Complex.Tensor_float(Tensor x, float y) -> complex', - 'aten::Complex.Tensor_int(Tensor x, int y) -> complex', - 'aten::Complex.Tensor_Tensor(Tensor a, Tensor b) -> complex', - 'aten::ComplexImplicit(Tensor a) -> complex', - 'aten::device(str a) -> Device', - 'aten::device.with_index(str type, int index) -> Device', - 'aten::dict.bool((bool, tVal)[] inputs) -> Dict(bool, tVal)', - 'aten::dict.complex((complex, tVal)[] inputs) -> Dict(complex, tVal)', - 'aten::dict.Dict_bool(Dict(bool, t)(a) self) -> Dict(bool, t)', - 'aten::dict.Dict_complex(Dict(complex, t)(a) self) -> Dict(complex, t)', - 'aten::dict.Dict_float(Dict(float, t)(a) self) -> Dict(float, t)', - 'aten::dict.Dict_int(Dict(int, t)(a) self) -> Dict(int, t)', - 'aten::dict.Dict_str(Dict(str, t)(a) self) -> Dict(str, t)', - 'aten::dict.Dict_Tensor(Dict(Tensor, t)(a) self) -> Dict(Tensor, t)', - 'aten::dict.float((float, tVal)[] inputs) -> Dict(float, tVal)', - 'aten::dict.int((int, tVal)[] inputs) -> Dict(int, tVal)', - 'aten::dict.str((str, tVal)[] inputs) -> Dict(str, tVal)', - 'aten::dict.Tensor((Tensor, tVal)[] inputs) -> Dict(Tensor, tVal)', - 'aten::dict() -> Dict(str, Tensor)', - 'aten::div.complex(complex a, complex b) -> complex', - 'aten::div.float(float a, float b) -> float', - 'aten::div.int(int a, int b) -> float', - 'aten::div.out_mode(Tensor self, Tensor other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!)', - 'aten::div.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)', - 'aten::div.Scalar_mode_out(Tensor self, Scalar other, *, str? rounding_mode, Tensor(a!) out) -> Tensor(a!)', - 'aten::div.Scalar_mode(Tensor self, Scalar other, *, str? rounding_mode) -> Tensor', - 'aten::div.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)', - 'aten::div.Scalar(Tensor self, Scalar other) -> Tensor', - 'aten::div.Tensor_mode(Tensor self, Tensor other, *, str? rounding_mode) -> Tensor', - 'aten::div.Tensor(Tensor self, Tensor other) -> Tensor', - 'aten::div(Scalar a, Scalar b) -> float', - 'aten::eq_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)', - 'aten::eq_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)', - 'aten::eq.bool_list(bool[] a, bool[] b) -> bool', - 'aten::eq.bool(bool a, bool b) -> bool', - 'aten::eq.complex_float(complex a, float b) -> bool', - 'aten::eq.complex(complex a, complex b) -> bool', - 'aten::eq.device(Device a, Device b) -> bool', - 'aten::eq.enum(AnyEnumType a, AnyEnumType b) -> bool', - 'aten::eq.float_complex(float a, complex b) -> bool', - 'aten::eq.float_int(float a, int b) -> bool', - 'aten::eq.float_list(float[] a, float[] b) -> bool', - 'aten::eq.float(float a, float b) -> bool', - 'aten::eq.int_float(int a, float b) -> bool', - 'aten::eq.int_list(int[] a, int[] b) -> bool', - 'aten::eq.int(int a, int b) -> bool', - 'aten::eq.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)', - 'aten::eq.Scalar(Tensor self, Scalar other) -> Tensor', - 'aten::eq.str_list(str[] a, str[] b) -> bool', - 'aten::eq.str(str a, str b) -> bool', - 'aten::eq.Tensor_list(Tensor[] a, Tensor[] b) -> bool', - 'aten::eq.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)', - 'aten::eq.Tensor(Tensor self, Tensor other) -> Tensor', - 'aten::eq(Scalar a, Scalar b) -> bool', - 'aten::equal(Tensor self, Tensor other) -> bool', - 'aten::extend.t(t[](a!) self, t[] other) -> ()', - 'aten::Float.bool(bool a) -> float', - 'aten::Float.int(int a) -> float', - 'aten::Float.Scalar(Scalar a) -> float', - 'aten::Float.str(str a) -> float', - 'aten::Float.Tensor(Tensor a) -> float', - 'aten::floor(Tensor self) -> Tensor', - 'aten::floor.Scalar(Scalar a) -> Scalar', - 'aten::floor.float(float a) -> int', - 'aten::floor.int(int a) -> int', - 'aten::floor.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)', - 'aten::floor_(Tensor(a!) self) -> Tensor(a!)', - 'aten::floor_divide(Tensor self, Tensor other) -> Tensor', - 'aten::floor_divide.Scalar(Tensor self, Scalar other) -> Tensor', - 'aten::floor_divide.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)', - 'aten::floor_divide.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)', - 'aten::floor_divide_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)', - 'aten::floor_divide_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)', - 'aten::floordiv(Scalar a, Scalar b) -> Scalar', - 'aten::floordiv.float(float a, float b) -> float', - 'aten::floordiv.float_int(float a, int b) -> float', - 'aten::floordiv.int(int a, int b) -> int', - 'aten::floordiv.int_float(int a, float b) -> float', - 'aten::fmax(Tensor self, Tensor other) -> Tensor', - 'aten::fmax.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)', - 'aten::fmin(Tensor self, Tensor other) -> Tensor', - 'aten::fmin.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)', - 'aten::fmod(Scalar a, Scalar b) -> float', - 'aten::fmod.Scalar(Tensor self, Scalar other) -> Tensor', - 'aten::fmod.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)', - 'aten::fmod.Tensor(Tensor self, Tensor other) -> Tensor', - 'aten::fmod.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)', - 'aten::fmod.float(float a, float b) -> float', - 'aten::fmod.float_int(float a, int b) -> float', - 'aten::fmod.int(int a, int b) -> float', - 'aten::fmod.int_float(int a, float b) -> float', - 'aten::fmod_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)', - 'aten::fmod_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)', - 'aten::get.bool(Dict(bool, t) self, bool key) -> t(*)?', - 'aten::get.complex(Dict(complex, t) self, complex key) -> t(*)?', - 'aten::get.default_bool(Dict(bool, t) self, bool key, t default_value) -> t(*)', - 'aten::get.default_complex(Dict(complex, t) self, complex key, t default_value) -> t(*)', - 'aten::get.default_float(Dict(float, t) self, float key, t default_value) -> t(*)', - 'aten::get.default_int(Dict(int, t) self, int key, t default_value) -> t(*)', - 'aten::get.default_str(Dict(str, t) self, str key, t default_value) -> t(*)', - 'aten::get.default_Tensor(Dict(Tensor, t) self, Tensor key, t default_value) -> t(*)', - 'aten::get.float(Dict(float, t) self, float key) -> t(*)?', - 'aten::get.int(Dict(int, t) self, int key) -> t(*)?', - 'aten::get.str(Dict(str, t) self, str key) -> t(*)?', - 'aten::get.Tensor(Dict(Tensor, t) self, Tensor key) -> t(*)?', - 'aten::gt_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)', - 'aten::gt_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)', - 'aten::gt.float_int(float a, int b) -> bool', - 'aten::gt.float(float a, float b) -> bool', - 'aten::gt.int_float(int a, float b) -> bool', - 'aten::gt.int(int a, int b) -> bool', - 'aten::gt.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)', - 'aten::gt.Scalar(Tensor self, Scalar other) -> Tensor', - 'aten::gt.str(str a, str b) -> bool', - 'aten::gt.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)', - 'aten::gt.Tensor(Tensor self, Tensor other) -> Tensor', - 'aten::gt(Scalar a, Scalar b) -> bool', - 'aten::Int.bool(bool a) -> int', - 'aten::Int.float(float a) -> int', - 'aten::Int.Scalar(Scalar a) -> int', - 'aten::Int.str(str a) -> int', - 'aten::Int.Tensor(Tensor a) -> int', - 'aten::int_repr(Tensor self) -> Tensor', - 'aten::int_repr.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)', - 'aten::IntImplicit(Tensor a) -> int', - 'aten::inverse(Tensor self) -> Tensor', - 'aten::inverse.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)', - 'aten::item(Tensor self) -> Scalar', - 'aten::items.bool(Dict(bool, t) self) -> ((bool, t)[])', - 'aten::items.complex(Dict(complex, t) self) -> ((complex, t)[])', - 'aten::items.float(Dict(float, t) self) -> ((float, t)[])', - 'aten::items.int(Dict(int, t) self) -> ((int, t)[])', - 'aten::items.str(Dict(str, t) self) -> ((str, t)[])', - 'aten::items.Tensor(Dict(Tensor, t) self) -> ((Tensor, t)[])', - 'aten::keys.bool(Dict(bool, t) self) -> bool[](*)', - 'aten::keys.complex(Dict(complex, t) self) -> complex[](*)', - 'aten::keys.float(Dict(float, t) self) -> float[](*)', - 'aten::keys.int(Dict(int, t) self) -> int[](*)', - 'aten::keys.str(Dict(str, t) self) -> str[](*)', - 'aten::keys.Tensor(Dict(Tensor, t) self) -> Tensor[](*)', - 'aten::le_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)', - 'aten::le_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)', - 'aten::le.float_int(float a, int b) -> bool', - 'aten::le.float(float a, float b) -> bool', - 'aten::le.int_float(int a, float b) -> bool', - 'aten::le.int(int a, int b) -> bool', - 'aten::le.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)', - 'aten::le.Scalar(Tensor self, Scalar other) -> Tensor', - 'aten::le.str(str a, str b) -> bool', - 'aten::le.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)', - 'aten::le.Tensor(Tensor self, Tensor other) -> Tensor', - 'aten::le(Scalar a, Scalar b) -> bool', - 'aten::leaky_relu(Tensor self, Scalar negative_slope=0.01) -> Tensor', - 'aten::leaky_relu.out(Tensor self, Scalar negative_slope=0.01, *, Tensor(a!) out) -> Tensor(a!)', - 'aten::leaky_relu_(Tensor(a!) self, Scalar negative_slope=0.01) -> Tensor(a!)', - 'aten::leaky_relu_backward(Tensor grad_output, Tensor self, Scalar negative_slope, bool self_is_result) -> Tensor', - 'aten::leaky_relu_backward.grad_input(Tensor grad_output, Tensor self, Scalar negative_slope, bool self_is_result, *, Tensor(a!) grad_input) -> Tensor(a!)', - 'aten::len.any(Any[] a) -> int', - 'aten::len.Dict_bool(Dict(bool, t) self) -> int', - 'aten::len.Dict_complex(Dict(complex, t) self) -> int', - 'aten::len.Dict_float(Dict(float, t) self) -> int', - 'aten::len.Dict_int(Dict(int, t) self) -> int', - 'aten::len.Dict_str(Dict(str, t) self) -> int', - 'aten::len.Dict_Tensor(Dict(Tensor, t) self) -> int', - 'aten::len.str(str s) -> int', - 'aten::len.t(t[] a) -> int', - 'aten::len.Tensor(Tensor t) -> int', - 'aten::lerp.Scalar(Tensor self, Tensor end, Scalar weight) -> Tensor', - 'aten::lerp.Scalar_out(Tensor self, Tensor end, Scalar weight, *, Tensor(a!) out) -> Tensor(a!)', - 'aten::lerp.Tensor(Tensor self, Tensor end, Tensor weight) -> Tensor', - 'aten::lerp.Tensor_out(Tensor self, Tensor end, Tensor weight, *, Tensor(a!) out) -> Tensor(a!)', - 'aten::lerp_.Scalar(Tensor(a!) self, Tensor end, Scalar weight) -> Tensor(a!)', - 'aten::lerp_.Tensor(Tensor(a!) self, Tensor end, Tensor weight) -> Tensor(a!)', - 'aten::less.Scalar(Tensor self, Scalar other) -> Tensor', - 'aten::less.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)', - 'aten::less.Tensor(Tensor self, Tensor other) -> Tensor', - 'aten::less.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)', - 'aten::less_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)', - 'aten::less_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)', - 'aten::less_equal.Scalar(Tensor self, Scalar other) -> Tensor', - 'aten::less_equal.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)', - 'aten::less_equal.Tensor(Tensor self, Tensor other) -> Tensor', - 'aten::less_equal.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)', - 'aten::less_equal_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)', - 'aten::less_equal_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)', - 'aten::log10.complex(complex a) -> complex', - 'aten::log10.float(float a) -> float', - 'aten::log10.int(int a) -> float', - 'aten::log10.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)', - 'aten::log10.Scalar(Scalar a) -> Scalar', - 'aten::log10(Tensor self) -> Tensor', - 'aten::lt_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)', - 'aten::lt_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)', - 'aten::lt.float_int(float a, int b) -> bool', - 'aten::lt.float(float a, float b) -> bool', - 'aten::lt.int_float(int a, float b) -> bool', - 'aten::lt.int(int a, int b) -> bool', - 'aten::lt.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)', - 'aten::lt.Scalar(Tensor self, Scalar other) -> Tensor', - 'aten::lt.str(str a, str b) -> bool', - 'aten::lt.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)', - 'aten::lt.Tensor(Tensor self, Tensor other) -> Tensor', - 'aten::lt(Scalar a, Scalar b) -> bool', - 'aten::mul(Scalar a, Scalar b) -> Scalar', - 'aten::mul.Scalar(Tensor self, Scalar other) -> Tensor', - 'aten::mul.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)', - 'aten::mul.Tensor(Tensor self, Tensor other) -> Tensor', - 'aten::mul.complex(complex a, complex b) -> complex', - 'aten::mul.complex_float(complex a, float b) -> complex', - 'aten::mul.complex_int(complex a, int b) -> complex', - 'aten::mul.float(float a, float b) -> float', - 'aten::mul.float_complex(float a, complex b) -> complex', - 'aten::mul.float_int(float a, int b) -> float', - 'aten::mul.int(int a, int b) -> int', - 'aten::mul.int_complex(int a, complex b) -> complex', - 'aten::mul.int_float(int a, float b) -> float', - 'aten::mul.left_t(t[] l, int n) -> t[]', - 'aten::mul.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)', - 'aten::mul.right_(int n, t[] l) -> t[]', - 'aten::mul_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)', - 'aten::mul_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)', - 'aten::mul_.t(t[](a!) l, int n) -> t[](a!)', - 'aten::ne(Scalar a, Scalar b) -> bool', - 'aten::ne.Scalar(Tensor self, Scalar other) -> Tensor', - 'aten::ne.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)', - 'aten::ne.Tensor(Tensor self, Tensor other) -> Tensor', - 'aten::ne.Tensor_list(Tensor[] a, Tensor[] b) -> bool', - 'aten::ne.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)', - 'aten::ne.bool(bool a, bool b) -> bool', - 'aten::ne.bool_list(bool[] a, bool[] b) -> bool', - 'aten::ne.complex(complex a, complex b) -> bool', - 'aten::ne.complex_float(complex a, float b) -> bool', - 'aten::ne.device(Device a, Device b) -> bool', - 'aten::ne.enum(AnyEnumType a, AnyEnumType b) -> bool', - 'aten::ne.float(float a, float b) -> bool', - 'aten::ne.float_complex(float a, complex b) -> bool', - 'aten::ne.float_int(float a, int b) -> bool', - 'aten::ne.float_list(float[] a, float[] b) -> bool', - 'aten::ne.int(int a, int b) -> bool', - 'aten::ne.int_float(int a, float b) -> bool', - 'aten::ne.int_list(int[] a, int[] b) -> bool', - 'aten::ne.str(str a, str b) -> bool', - 'aten::ne.str_list(str[] a, str[] b) -> bool', - 'aten::ne_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)', - 'aten::ne_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)', - 'aten::neg(Tensor self) -> Tensor', - 'aten::neg.Scalar(Scalar a) -> Scalar', - 'aten::neg.complex(complex a) -> complex', - 'aten::neg.float(float a) -> float', - 'aten::neg.int(int a) -> int', - 'aten::neg.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)', - 'aten::neg_(Tensor(a!) self) -> Tensor(a!)', - 'aten::negative(Tensor self) -> Tensor', - 'aten::negative.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)', - 'aten::negative_(Tensor(a!) self) -> Tensor(a!)', - 'aten::pow.complex(complex a, complex b) -> complex', - 'aten::pow.complex_float(complex a, float b) -> complex', - 'aten::pow.float(float a, float b) -> float', - 'aten::pow.float_complex(float a, complex b) -> complex', - 'aten::pow.float_int(float a, int b) -> float', - 'aten::pow.int(int a, int b) -> float', - 'aten::pow.int_float(int a, float b) -> float', - 'aten::pow.int_to_int(int a, int b) -> int', - 'aten::pow.Scalar(Scalar self, Tensor exponent) -> Tensor', - 'aten::pow.Scalar_out(Scalar self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!)', - 'aten::pow.Scalar_Scalar(Scalar a, Scalar b) -> float', - 'aten::pow.Tensor_Scalar(Tensor self, Scalar exponent) -> Tensor', - 'aten::pow.Tensor_Scalar_out(Tensor self, Scalar exponent, *, Tensor(a!) out) -> Tensor(a!)', - 'aten::pow.Tensor_Tensor(Tensor self, Tensor exponent) -> Tensor', - 'aten::pow.Tensor_Tensor_out(Tensor self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!)', - 'aten::pow_.Scalar(Tensor(a!) self, Scalar exponent) -> Tensor(a!)', - 'aten::pow_.Tensor(Tensor(a!) self, Tensor exponent) -> Tensor(a!)', - 'aten::remainder.float_int(float a, int b) -> float', - 'aten::remainder.float(float a, float b) -> float', - 'aten::remainder.int_float(int a, float b) -> float', - 'aten::remainder.int(int a, int b) -> int', - 'aten::remainder.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)', - 'aten::remainder.Scalar_Tensor_out(Scalar self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)', - 'aten::remainder.Scalar_Tensor(Scalar self, Tensor other) -> Tensor', - 'aten::remainder.Scalar(Tensor self, Scalar other) -> Tensor', - 'aten::remainder.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)', - 'aten::remainder.Tensor(Tensor self, Tensor other) -> Tensor', - 'aten::remainder(Scalar a, Scalar b) -> Scalar', - 'aten::replace(str self, str old, str new, int max=-1) -> str', - 'aten::ScalarImplicit(Tensor a) -> Scalar', - 'aten::searchsorted.Scalar_out(Tensor sorted_sequence, Scalar self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None, Tensor(a!) out) -> Tensor(a!)', - 'aten::searchsorted.Scalar(Tensor sorted_sequence, Scalar self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None) -> Tensor', - 'aten::searchsorted.Tensor_out(Tensor sorted_sequence, Tensor self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None, Tensor(a!) out) -> Tensor(a!)', - 'aten::searchsorted.Tensor(Tensor sorted_sequence, Tensor self, *, bool out_int32=False, bool right=False, str? side=None, Tensor? sorter=None) -> Tensor', - 'aten::sqrt.complex(complex a) -> complex', - 'aten::sqrt.float(float a) -> float', - 'aten::sqrt.int(int a) -> float', - 'aten::sqrt.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)', - 'aten::sqrt.Scalar(Scalar a) -> Scalar', - 'aten::sqrt(Tensor self) -> Tensor', - 'aten::str(t elem) -> str', - 'aten::sub(Scalar a, Scalar b) -> Scalar', - 'aten::sub.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor', - 'aten::sub.Scalar_out(Tensor self, Scalar other, Scalar alpha=1, *, Tensor(a!) out) -> Tensor(a!)', - 'aten::sub.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor', - 'aten::sub.complex(complex a, complex b) -> complex', - 'aten::sub.complex_float(complex a, float b) -> complex', - 'aten::sub.complex_int(complex a, int b) -> complex', - 'aten::sub.float(float a, float b) -> float', - 'aten::sub.float_complex(float a, complex b) -> complex', - 'aten::sub.float_int(float a, int b) -> float', - 'aten::sub.int(int a, int b) -> int', - 'aten::sub.int_complex(int a, complex b) -> complex', - 'aten::sub.int_float(int a, float b) -> float', - 'aten::sub.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)', - 'aten::sub_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)', - 'aten::sub_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)', - 'aten::subtract.Scalar(Tensor self, Scalar other, Scalar alpha=1) -> Tensor', - 'aten::subtract.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor', - 'aten::subtract.out(Tensor self, Tensor other, *, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)', - 'aten::subtract_.Scalar(Tensor(a!) self, Scalar other, Scalar alpha=1) -> Tensor(a!)', - 'aten::subtract_.Tensor(Tensor(a!) self, Tensor other, *, Scalar alpha=1) -> Tensor(a!)', - 'aten::sum(Tensor self, *, ScalarType? dtype=None) -> Tensor', - 'aten::sum.DimnameList_out(Tensor self, str[1] dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)', - 'aten::sum.IntList_out(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)', - 'aten::sum.bool(bool[] self) -> int', - 'aten::sum.complex(complex[] self) -> complex', - 'aten::sum.dim_DimnameList(Tensor self, str[1] dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor', - 'aten::sum.dim_IntList(Tensor self, int[1]? dim, bool keepdim=False, *, ScalarType? dtype=None) -> Tensor', - 'aten::sum.float(float[] self) -> float', - 'aten::sum.int(int[] self) -> int', - 'aten::sum.out(Tensor self, *, ScalarType? dtype=None, Tensor(a!) out) -> Tensor(a!)', - 'aten::sum_to_size(Tensor self, SymInt[] size) -> Tensor', - 'aten::tensor(t[] data, *, ScalarType? dtype=None, Device? device=None, bool requires_grad=False) -> Tensor', - 'aten::tensor.bool(bool t, *, ScalarType? dtype=None, Device? device=None, bool requires_grad=False) -> Tensor', - 'aten::tensor.complex(complex t, *, ScalarType? dtype=None, Device? device=None, bool requires_grad=False) -> Tensor', - 'aten::tensor.float(float t, *, ScalarType? dtype=None, Device? device=None, bool requires_grad=False) -> Tensor', - 'aten::tensor.int(int t, *, ScalarType? dtype=None, Device? device=None, bool requires_grad=False) -> Tensor', - 'aten::tensor_split.indices(Tensor(a -> *) self, SymInt[] indices, int dim=0) -> Tensor(a)[]', - 'aten::tensor_split.sections(Tensor(a -> *) self, SymInt sections, int dim=0) -> Tensor(a)[]', - 'aten::tensor_split.tensor_indices_or_sections(Tensor(a -> *) self, Tensor tensor_indices_or_sections, int dim=0) -> Tensor(a)[]', - 'aten::tensordot(Tensor self, Tensor other, int[] dims_self, int[] dims_other) -> Tensor', - 'aten::tensordot.out(Tensor self, Tensor other, int[] dims_self, int[] dims_other, *, Tensor(a!) out) -> Tensor(a!)', - 'aten::values.bool(Dict(bool, t) self) -> t[](*)', - 'aten::values.complex(Dict(complex, t) self) -> t[](*)', - 'aten::values.float(Dict(float, t) self) -> t[](*)', - 'aten::values.int(Dict(int, t) self) -> t[](*)', - 'aten::values.str(Dict(str, t) self) -> t[](*)', - 'aten::values.Tensor(Dict(Tensor, t) self) -> t[](*)', - 'aten::values(Tensor(a) self) -> Tensor(a)', - 'aten::warn(str message, int stacklevel=2) -> ()', - 'prim::abs.complex(complex a) -> float', - 'prim::abs.float(float a) -> float', - 'prim::abs.int(int a) -> int', - 'prim::abs.Scalar(Scalar a) -> Scalar', - 'prim::abs(Tensor x) -> Tensor', - 'prim::device(Tensor a) -> Device', - 'prim::is_cpu(Tensor a) -> bool', - 'prim::is_cuda(Tensor a) -> bool', - 'prim::is_ipu(Tensor a) -> bool', - 'prim::is_maia(Tensor a) -> bool', - 'prim::is_meta(Tensor a) -> bool', - 'prim::is_mkldnn(Tensor a) -> bool', - 'prim::is_mps(Tensor a) -> bool', - 'prim::is_mtia(Tensor a) -> bool', - 'prim::is_nested(Tensor a) -> bool', - 'prim::is_quantized(Tensor a) -> bool', - 'prim::is_sparse_csr(Tensor a) -> bool', - 'prim::is_sparse(Tensor a) -> bool', - 'prim::is_vulkan(Tensor a) -> bool', - 'prim::is_xla(Tensor a) -> bool', - 'prim::is_xpu(Tensor a) -> bool', - 'prim::itemsize(Tensor a) -> int', - 'prim::layout(Tensor a) -> Layout', - 'prim::max.bool_list(bool[] l, bool[] r) -> bool[]', - 'prim::max.float_int(float a, int b) -> float', - 'prim::max.float_list(float[] l, float[] r) -> float[]', - 'prim::max.float(float a, float b) -> float', - 'prim::max.int_float(int a, float b) -> float', - 'prim::max.int_list(int[] l, int[] r) -> int[]', - 'prim::max.int(int a, int b) -> int', - 'prim::max.self_bool(bool[] self) -> bool', - 'prim::max.self_float(float[] self) -> float', - 'prim::max.self_int(int[] self) -> int', - 'prim::max(Scalar a, Scalar b) -> Scalar', - 'prim::min.bool_list(bool[] l, bool[] r) -> bool[]', - 'prim::min.float_int(float a, int b) -> float', - 'prim::min.float_list(float[] l, float[] r) -> float[]', - 'prim::min.float(float a, float b) -> float', - 'prim::min.int_float(int a, float b) -> float', - 'prim::min.int_list(int[] l, int[] r) -> int[]', - 'prim::min.int(int a, int b) -> int', - 'prim::min.self_bool(bool[] self) -> bool', - 'prim::min.self_float(float[] self) -> float', - 'prim::min.self_int(int[] self) -> int', - 'prim::min(Scalar a, Scalar b) -> Scalar', -] known_legacy_schema_definitions = [ '_caffe2::BBoxTransform(Tensor rois, Tensor deltas, Tensor im_info, float[] weights, bool apply_scale, bool rotated, bool angle_bound_on, int angle_bound_lo, int angle_bound_hi, float clip_angle_thresh, bool legacy_plus_one) -> (Tensor output_0, Tensor output_1)', @@ -994,67 +101,40 @@ def _parse_schemas(): content = '\n'.join(content) for value in entry[1].findall(content): value = re.sub(r'\n|\r|\s*"', '', value) if value.startswith('_caffe2::') else value - definition = entry[2] + value if len(entry) > 2 else value - if not definition in definitions: - definitions.add(definition) - schema = Schema(definition) - if schema.name in schemas: - raise KeyError(schema.name) - schemas[schema.name] = schema - for value in known_legacy_schema_definitions: - schema = Schema(value) - schemas[schema.name] = schema - for value in known_schema_definitions: - schema = Schema(value) - schemas[schema.name] = schema + schema = entry[2] + value if len(entry) > 2 else value + if not schema in definitions: + definitions.add(schema) + key = schema.split('(', 1)[0].strip() + if key in schemas: + raise KeyError(key) + schemas[key] = schema + for schema in known_legacy_schema_definitions: + key = schema.split('(', 1)[0].strip() + schemas[key] = schema + import torch # pylint: disable=import-outside-toplevel,import-error + all_schemas = list(torch._C._jit_get_all_schemas()) # pylint: disable=protected-access + for schema in all_schemas: + definition = str(schema) + key = definition.split('(', 1)[0].strip() + schemas[key] = definition return schemas def _filter_schemas(schemas, types): - keys = set(map(lambda _: _.split('.')[0], types.keys())) + names = set(map(lambda _: _.split('.')[0], types.keys())) filtered_schemas = set() for schema in schemas.values(): - for key in keys: - if schema.name == key or schema.name.startswith(key + '.'): - filtered_schemas.add(schema.name) - # for schema in schemas.values(): - # if schema.name.startswith('aten::pop'): - # filtered_schemas.add(schema.name) - # filtered_schemas = set(types.keys()) - # content = _read('list.csv') - # regex = re.compile(r'Unsupported function \'(.*)\' in', re.MULTILINE) - # matches = set() - # for match in regex.findall(content): - # if match.startswith('torch.'): - # matches.add('aten::' + match[6:]) - # if match.startswith('ops.') and len(match.split('.')) > 2: - # matches.add(match[4:].replace('.', '::')) - # for schema in schemas.values(): - # for match in matches: - # if schema.name.startswith(match): - # filtered_schemas.add(schema.name) + for name in names: + key = schema.split('(', 1)[0].strip() + if key == name or key.startswith(name + '.'): + filtered_schemas.add(key) return dict(filter(lambda _: _[0] in filtered_schemas, schemas.items())) -def _check_schemas(schemas): # pylint: disable=unused-argument - # import torch - # for name in dir(torch.ops.aten): - # if name.startswith('__') or name == 'name': - # continue - # packet = getattr(torch.ops.aten, name) - # for overload in packet.overloads(): - # key = 'aten::' + name + ('.' + overload if overload != 'default' else '') - # overload_schema = str(getattr(packet, overload)._schema) - # if key in schemas: - # schema = schemas[key] - # if overload_schema != str(schema): - # print(overload_schema) - # print(schema) - pass - def _check_types(types, schemas): types = dict(types.items()) for schema in schemas.values(): - if schema.name in types: - types.pop(schema.name) + key = schema.split('(', 1)[0].strip() + if key in types: + types.pop(key) for key in list(types.keys()): if key.startswith('torch.nn') or key.startswith('__torch__.'): types.pop(key) @@ -1076,12 +156,13 @@ def _metadata(): types = _read_metadata() schemas = _parse_schemas() _check_types(types, schemas) - _check_schemas(schemas) filtered_schemas = _filter_schemas(schemas, types) - metadata = Metadata(types) for schema in filtered_schemas.values(): - value = metadata.type(schema) - value['name'] = schema.value + key = schema.split('(', 1)[0].strip() + if key in types: + types[key]['name'] = schema + else: + types[key] = { 'name': schema } _write_metadata(types) def main(): # pylint: disable=missing-function-docstring